xref: /dragonfly/sys/dev/drm/i915/intel_display.c (revision 6c2b3e4e)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  *
26  * $FreeBSD: src/sys/dev/drm2/i915/intel_display.c,v 1.2 2012/05/24 19:13:54 dim Exp $
27  */
28 
29 #include <ddb/ddb.h>
30 #include <sys/limits.h>
31 
32 #include <drm/drmP.h>
33 #include <drm/drm_edid.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_crtc_helper.h>
39 
40 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
41 
42 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
43 static void intel_update_watermarks(struct drm_device *dev);
44 static void intel_increase_pllclock(struct drm_crtc *crtc);
45 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
46 
47 typedef struct {
48 	/* given values */
49 	int n;
50 	int m1, m2;
51 	int p1, p2;
52 	/* derived values */
53 	int	dot;
54 	int	vco;
55 	int	m;
56 	int	p;
57 } intel_clock_t;
58 
59 typedef struct {
60 	int	min, max;
61 } intel_range_t;
62 
63 typedef struct {
64 	int	dot_limit;
65 	int	p2_slow, p2_fast;
66 } intel_p2_t;
67 
68 #define INTEL_P2_NUM		      2
69 typedef struct intel_limit intel_limit_t;
70 struct intel_limit {
71 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
72 	intel_p2_t	    p2;
73 	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
74 			int, int, intel_clock_t *, intel_clock_t *);
75 };
76 
77 /* FDI */
78 #define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
79 
80 static bool
81 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
82 		    int target, int refclk, intel_clock_t *match_clock,
83 		    intel_clock_t *best_clock);
84 static bool
85 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
86 			int target, int refclk, intel_clock_t *match_clock,
87 			intel_clock_t *best_clock);
88 
89 static bool
90 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
91 		      int target, int refclk, intel_clock_t *match_clock,
92 		      intel_clock_t *best_clock);
93 static bool
94 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
95 			   int target, int refclk, intel_clock_t *match_clock,
96 			   intel_clock_t *best_clock);
97 
98 static inline u32 /* units of 100MHz */
99 intel_fdi_link_freq(struct drm_device *dev)
100 {
101 	if (IS_GEN5(dev)) {
102 		struct drm_i915_private *dev_priv = dev->dev_private;
103 		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
104 	} else
105 		return 27;
106 }
107 
108 static const intel_limit_t intel_limits_i8xx_dvo = {
109 	.dot = { .min = 25000, .max = 350000 },
110 	.vco = { .min = 930000, .max = 1400000 },
111 	.n = { .min = 3, .max = 16 },
112 	.m = { .min = 96, .max = 140 },
113 	.m1 = { .min = 18, .max = 26 },
114 	.m2 = { .min = 6, .max = 16 },
115 	.p = { .min = 4, .max = 128 },
116 	.p1 = { .min = 2, .max = 33 },
117 	.p2 = { .dot_limit = 165000,
118 		.p2_slow = 4, .p2_fast = 2 },
119 	.find_pll = intel_find_best_PLL,
120 };
121 
122 static const intel_limit_t intel_limits_i8xx_lvds = {
123 	.dot = { .min = 25000, .max = 350000 },
124 	.vco = { .min = 930000, .max = 1400000 },
125 	.n = { .min = 3, .max = 16 },
126 	.m = { .min = 96, .max = 140 },
127 	.m1 = { .min = 18, .max = 26 },
128 	.m2 = { .min = 6, .max = 16 },
129 	.p = { .min = 4, .max = 128 },
130 	.p1 = { .min = 1, .max = 6 },
131 	.p2 = { .dot_limit = 165000,
132 		.p2_slow = 14, .p2_fast = 7 },
133 	.find_pll = intel_find_best_PLL,
134 };
135 
136 static const intel_limit_t intel_limits_i9xx_sdvo = {
137 	.dot = { .min = 20000, .max = 400000 },
138 	.vco = { .min = 1400000, .max = 2800000 },
139 	.n = { .min = 1, .max = 6 },
140 	.m = { .min = 70, .max = 120 },
141 	.m1 = { .min = 10, .max = 22 },
142 	.m2 = { .min = 5, .max = 9 },
143 	.p = { .min = 5, .max = 80 },
144 	.p1 = { .min = 1, .max = 8 },
145 	.p2 = { .dot_limit = 200000,
146 		.p2_slow = 10, .p2_fast = 5 },
147 	.find_pll = intel_find_best_PLL,
148 };
149 
150 static const intel_limit_t intel_limits_i9xx_lvds = {
151 	.dot = { .min = 20000, .max = 400000 },
152 	.vco = { .min = 1400000, .max = 2800000 },
153 	.n = { .min = 1, .max = 6 },
154 	.m = { .min = 70, .max = 120 },
155 	.m1 = { .min = 10, .max = 22 },
156 	.m2 = { .min = 5, .max = 9 },
157 	.p = { .min = 7, .max = 98 },
158 	.p1 = { .min = 1, .max = 8 },
159 	.p2 = { .dot_limit = 112000,
160 		.p2_slow = 14, .p2_fast = 7 },
161 	.find_pll = intel_find_best_PLL,
162 };
163 
164 
165 static const intel_limit_t intel_limits_g4x_sdvo = {
166 	.dot = { .min = 25000, .max = 270000 },
167 	.vco = { .min = 1750000, .max = 3500000},
168 	.n = { .min = 1, .max = 4 },
169 	.m = { .min = 104, .max = 138 },
170 	.m1 = { .min = 17, .max = 23 },
171 	.m2 = { .min = 5, .max = 11 },
172 	.p = { .min = 10, .max = 30 },
173 	.p1 = { .min = 1, .max = 3},
174 	.p2 = { .dot_limit = 270000,
175 		.p2_slow = 10,
176 		.p2_fast = 10
177 	},
178 	.find_pll = intel_g4x_find_best_PLL,
179 };
180 
181 static const intel_limit_t intel_limits_g4x_hdmi = {
182 	.dot = { .min = 22000, .max = 400000 },
183 	.vco = { .min = 1750000, .max = 3500000},
184 	.n = { .min = 1, .max = 4 },
185 	.m = { .min = 104, .max = 138 },
186 	.m1 = { .min = 16, .max = 23 },
187 	.m2 = { .min = 5, .max = 11 },
188 	.p = { .min = 5, .max = 80 },
189 	.p1 = { .min = 1, .max = 8},
190 	.p2 = { .dot_limit = 165000,
191 		.p2_slow = 10, .p2_fast = 5 },
192 	.find_pll = intel_g4x_find_best_PLL,
193 };
194 
195 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
196 	.dot = { .min = 20000, .max = 115000 },
197 	.vco = { .min = 1750000, .max = 3500000 },
198 	.n = { .min = 1, .max = 3 },
199 	.m = { .min = 104, .max = 138 },
200 	.m1 = { .min = 17, .max = 23 },
201 	.m2 = { .min = 5, .max = 11 },
202 	.p = { .min = 28, .max = 112 },
203 	.p1 = { .min = 2, .max = 8 },
204 	.p2 = { .dot_limit = 0,
205 		.p2_slow = 14, .p2_fast = 14
206 	},
207 	.find_pll = intel_g4x_find_best_PLL,
208 };
209 
210 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
211 	.dot = { .min = 80000, .max = 224000 },
212 	.vco = { .min = 1750000, .max = 3500000 },
213 	.n = { .min = 1, .max = 3 },
214 	.m = { .min = 104, .max = 138 },
215 	.m1 = { .min = 17, .max = 23 },
216 	.m2 = { .min = 5, .max = 11 },
217 	.p = { .min = 14, .max = 42 },
218 	.p1 = { .min = 2, .max = 6 },
219 	.p2 = { .dot_limit = 0,
220 		.p2_slow = 7, .p2_fast = 7
221 	},
222 	.find_pll = intel_g4x_find_best_PLL,
223 };
224 
225 static const intel_limit_t intel_limits_g4x_display_port = {
226 	.dot = { .min = 161670, .max = 227000 },
227 	.vco = { .min = 1750000, .max = 3500000},
228 	.n = { .min = 1, .max = 2 },
229 	.m = { .min = 97, .max = 108 },
230 	.m1 = { .min = 0x10, .max = 0x12 },
231 	.m2 = { .min = 0x05, .max = 0x06 },
232 	.p = { .min = 10, .max = 20 },
233 	.p1 = { .min = 1, .max = 2},
234 	.p2 = { .dot_limit = 0,
235 		.p2_slow = 10, .p2_fast = 10 },
236 	.find_pll = intel_find_pll_g4x_dp,
237 };
238 
239 static const intel_limit_t intel_limits_pineview_sdvo = {
240 	.dot = { .min = 20000, .max = 400000},
241 	.vco = { .min = 1700000, .max = 3500000 },
242 	/* Pineview's Ncounter is a ring counter */
243 	.n = { .min = 3, .max = 6 },
244 	.m = { .min = 2, .max = 256 },
245 	/* Pineview only has one combined m divider, which we treat as m2. */
246 	.m1 = { .min = 0, .max = 0 },
247 	.m2 = { .min = 0, .max = 254 },
248 	.p = { .min = 5, .max = 80 },
249 	.p1 = { .min = 1, .max = 8 },
250 	.p2 = { .dot_limit = 200000,
251 		.p2_slow = 10, .p2_fast = 5 },
252 	.find_pll = intel_find_best_PLL,
253 };
254 
255 static const intel_limit_t intel_limits_pineview_lvds = {
256 	.dot = { .min = 20000, .max = 400000 },
257 	.vco = { .min = 1700000, .max = 3500000 },
258 	.n = { .min = 3, .max = 6 },
259 	.m = { .min = 2, .max = 256 },
260 	.m1 = { .min = 0, .max = 0 },
261 	.m2 = { .min = 0, .max = 254 },
262 	.p = { .min = 7, .max = 112 },
263 	.p1 = { .min = 1, .max = 8 },
264 	.p2 = { .dot_limit = 112000,
265 		.p2_slow = 14, .p2_fast = 14 },
266 	.find_pll = intel_find_best_PLL,
267 };
268 
269 /* Ironlake / Sandybridge
270  *
271  * We calculate clock using (register_value + 2) for N/M1/M2, so here
272  * the range value for them is (actual_value - 2).
273  */
274 static const intel_limit_t intel_limits_ironlake_dac = {
275 	.dot = { .min = 25000, .max = 350000 },
276 	.vco = { .min = 1760000, .max = 3510000 },
277 	.n = { .min = 1, .max = 5 },
278 	.m = { .min = 79, .max = 127 },
279 	.m1 = { .min = 12, .max = 22 },
280 	.m2 = { .min = 5, .max = 9 },
281 	.p = { .min = 5, .max = 80 },
282 	.p1 = { .min = 1, .max = 8 },
283 	.p2 = { .dot_limit = 225000,
284 		.p2_slow = 10, .p2_fast = 5 },
285 	.find_pll = intel_g4x_find_best_PLL,
286 };
287 
288 static const intel_limit_t intel_limits_ironlake_single_lvds = {
289 	.dot = { .min = 25000, .max = 350000 },
290 	.vco = { .min = 1760000, .max = 3510000 },
291 	.n = { .min = 1, .max = 3 },
292 	.m = { .min = 79, .max = 118 },
293 	.m1 = { .min = 12, .max = 22 },
294 	.m2 = { .min = 5, .max = 9 },
295 	.p = { .min = 28, .max = 112 },
296 	.p1 = { .min = 2, .max = 8 },
297 	.p2 = { .dot_limit = 225000,
298 		.p2_slow = 14, .p2_fast = 14 },
299 	.find_pll = intel_g4x_find_best_PLL,
300 };
301 
302 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
303 	.dot = { .min = 25000, .max = 350000 },
304 	.vco = { .min = 1760000, .max = 3510000 },
305 	.n = { .min = 1, .max = 3 },
306 	.m = { .min = 79, .max = 127 },
307 	.m1 = { .min = 12, .max = 22 },
308 	.m2 = { .min = 5, .max = 9 },
309 	.p = { .min = 14, .max = 56 },
310 	.p1 = { .min = 2, .max = 8 },
311 	.p2 = { .dot_limit = 225000,
312 		.p2_slow = 7, .p2_fast = 7 },
313 	.find_pll = intel_g4x_find_best_PLL,
314 };
315 
316 /* LVDS 100mhz refclk limits. */
317 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
318 	.dot = { .min = 25000, .max = 350000 },
319 	.vco = { .min = 1760000, .max = 3510000 },
320 	.n = { .min = 1, .max = 2 },
321 	.m = { .min = 79, .max = 126 },
322 	.m1 = { .min = 12, .max = 22 },
323 	.m2 = { .min = 5, .max = 9 },
324 	.p = { .min = 28, .max = 112 },
325 	.p1 = { .min = 2, .max = 8 },
326 	.p2 = { .dot_limit = 225000,
327 		.p2_slow = 14, .p2_fast = 14 },
328 	.find_pll = intel_g4x_find_best_PLL,
329 };
330 
331 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
332 	.dot = { .min = 25000, .max = 350000 },
333 	.vco = { .min = 1760000, .max = 3510000 },
334 	.n = { .min = 1, .max = 3 },
335 	.m = { .min = 79, .max = 126 },
336 	.m1 = { .min = 12, .max = 22 },
337 	.m2 = { .min = 5, .max = 9 },
338 	.p = { .min = 14, .max = 42 },
339 	.p1 = { .min = 2, .max = 6 },
340 	.p2 = { .dot_limit = 225000,
341 		.p2_slow = 7, .p2_fast = 7 },
342 	.find_pll = intel_g4x_find_best_PLL,
343 };
344 
345 static const intel_limit_t intel_limits_ironlake_display_port = {
346 	.dot = { .min = 25000, .max = 350000 },
347 	.vco = { .min = 1760000, .max = 3510000},
348 	.n = { .min = 1, .max = 2 },
349 	.m = { .min = 81, .max = 90 },
350 	.m1 = { .min = 12, .max = 22 },
351 	.m2 = { .min = 5, .max = 9 },
352 	.p = { .min = 10, .max = 20 },
353 	.p1 = { .min = 1, .max = 2},
354 	.p2 = { .dot_limit = 0,
355 		.p2_slow = 10, .p2_fast = 10 },
356 	.find_pll = intel_find_pll_ironlake_dp,
357 };
358 
359 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
360 						int refclk)
361 {
362 	struct drm_device *dev = crtc->dev;
363 	struct drm_i915_private *dev_priv = dev->dev_private;
364 	const intel_limit_t *limit;
365 
366 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
367 		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
368 		    LVDS_CLKB_POWER_UP) {
369 			/* LVDS dual channel */
370 			if (refclk == 100000)
371 				limit = &intel_limits_ironlake_dual_lvds_100m;
372 			else
373 				limit = &intel_limits_ironlake_dual_lvds;
374 		} else {
375 			if (refclk == 100000)
376 				limit = &intel_limits_ironlake_single_lvds_100m;
377 			else
378 				limit = &intel_limits_ironlake_single_lvds;
379 		}
380 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
381 			HAS_eDP)
382 		limit = &intel_limits_ironlake_display_port;
383 	else
384 		limit = &intel_limits_ironlake_dac;
385 
386 	return limit;
387 }
388 
389 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
390 {
391 	struct drm_device *dev = crtc->dev;
392 	struct drm_i915_private *dev_priv = dev->dev_private;
393 	const intel_limit_t *limit;
394 
395 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
396 		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
397 		    LVDS_CLKB_POWER_UP)
398 			/* LVDS with dual channel */
399 			limit = &intel_limits_g4x_dual_channel_lvds;
400 		else
401 			/* LVDS with dual channel */
402 			limit = &intel_limits_g4x_single_channel_lvds;
403 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
404 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
405 		limit = &intel_limits_g4x_hdmi;
406 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
407 		limit = &intel_limits_g4x_sdvo;
408 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
409 		limit = &intel_limits_g4x_display_port;
410 	} else /* The option is for other outputs */
411 		limit = &intel_limits_i9xx_sdvo;
412 
413 	return limit;
414 }
415 
416 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
417 {
418 	struct drm_device *dev = crtc->dev;
419 	const intel_limit_t *limit;
420 
421 	if (HAS_PCH_SPLIT(dev))
422 		limit = intel_ironlake_limit(crtc, refclk);
423 	else if (IS_G4X(dev)) {
424 		limit = intel_g4x_limit(crtc);
425 	} else if (IS_PINEVIEW(dev)) {
426 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
427 			limit = &intel_limits_pineview_lvds;
428 		else
429 			limit = &intel_limits_pineview_sdvo;
430 	} else if (!IS_GEN2(dev)) {
431 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
432 			limit = &intel_limits_i9xx_lvds;
433 		else
434 			limit = &intel_limits_i9xx_sdvo;
435 	} else {
436 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
437 			limit = &intel_limits_i8xx_lvds;
438 		else
439 			limit = &intel_limits_i8xx_dvo;
440 	}
441 	return limit;
442 }
443 
444 /* m1 is reserved as 0 in Pineview, n is a ring counter */
445 static void pineview_clock(int refclk, intel_clock_t *clock)
446 {
447 	clock->m = clock->m2 + 2;
448 	clock->p = clock->p1 * clock->p2;
449 	clock->vco = refclk * clock->m / clock->n;
450 	clock->dot = clock->vco / clock->p;
451 }
452 
453 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
454 {
455 	if (IS_PINEVIEW(dev)) {
456 		pineview_clock(refclk, clock);
457 		return;
458 	}
459 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
460 	clock->p = clock->p1 * clock->p2;
461 	clock->vco = refclk * clock->m / (clock->n + 2);
462 	clock->dot = clock->vco / clock->p;
463 }
464 
465 /**
466  * Returns whether any output on the specified pipe is of the specified type
467  */
468 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
469 {
470 	struct drm_device *dev = crtc->dev;
471 	struct drm_mode_config *mode_config = &dev->mode_config;
472 	struct intel_encoder *encoder;
473 
474 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
475 		if (encoder->base.crtc == crtc && encoder->type == type)
476 			return true;
477 
478 	return false;
479 }
480 
481 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
482 /**
483  * Returns whether the given set of divisors are valid for a given refclk with
484  * the given connectors.
485  */
486 
487 static bool intel_PLL_is_valid(struct drm_device *dev,
488 			       const intel_limit_t *limit,
489 			       const intel_clock_t *clock)
490 {
491 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
492 		INTELPllInvalid("p1 out of range\n");
493 	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
494 		INTELPllInvalid("p out of range\n");
495 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
496 		INTELPllInvalid("m2 out of range\n");
497 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
498 		INTELPllInvalid("m1 out of range\n");
499 	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
500 		INTELPllInvalid("m1 <= m2\n");
501 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
502 		INTELPllInvalid("m out of range\n");
503 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
504 		INTELPllInvalid("n out of range\n");
505 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
506 		INTELPllInvalid("vco out of range\n");
507 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
508 	 * connector, etc., rather than just a single range.
509 	 */
510 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
511 		INTELPllInvalid("dot out of range\n");
512 
513 	return true;
514 }
515 
516 static bool
517 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
518 		    int target, int refclk, intel_clock_t *match_clock,
519 		    intel_clock_t *best_clock)
520 
521 {
522 	struct drm_device *dev = crtc->dev;
523 	struct drm_i915_private *dev_priv = dev->dev_private;
524 	intel_clock_t clock;
525 	int err = target;
526 
527 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
528 	    (I915_READ(LVDS)) != 0) {
529 		/*
530 		 * For LVDS, if the panel is on, just rely on its current
531 		 * settings for dual-channel.  We haven't figured out how to
532 		 * reliably set up different single/dual channel state, if we
533 		 * even can.
534 		 */
535 		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
536 		    LVDS_CLKB_POWER_UP)
537 			clock.p2 = limit->p2.p2_fast;
538 		else
539 			clock.p2 = limit->p2.p2_slow;
540 	} else {
541 		if (target < limit->p2.dot_limit)
542 			clock.p2 = limit->p2.p2_slow;
543 		else
544 			clock.p2 = limit->p2.p2_fast;
545 	}
546 
547 	memset(best_clock, 0, sizeof(*best_clock));
548 
549 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
550 	     clock.m1++) {
551 		for (clock.m2 = limit->m2.min;
552 		     clock.m2 <= limit->m2.max; clock.m2++) {
553 			/* m1 is always 0 in Pineview */
554 			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
555 				break;
556 			for (clock.n = limit->n.min;
557 			     clock.n <= limit->n.max; clock.n++) {
558 				for (clock.p1 = limit->p1.min;
559 					clock.p1 <= limit->p1.max; clock.p1++) {
560 					int this_err;
561 
562 					intel_clock(dev, refclk, &clock);
563 					if (!intel_PLL_is_valid(dev, limit,
564 								&clock))
565 						continue;
566 					if (match_clock &&
567 					    clock.p != match_clock->p)
568 						continue;
569 
570 					this_err = abs(clock.dot - target);
571 					if (this_err < err) {
572 						*best_clock = clock;
573 						err = this_err;
574 					}
575 				}
576 			}
577 		}
578 	}
579 
580 	return (err != target);
581 }
582 
583 static bool
584 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
585 			int target, int refclk, intel_clock_t *match_clock,
586 			intel_clock_t *best_clock)
587 {
588 	struct drm_device *dev = crtc->dev;
589 	struct drm_i915_private *dev_priv = dev->dev_private;
590 	intel_clock_t clock;
591 	int max_n;
592 	bool found;
593 	/* approximately equals target * 0.00585 */
594 	int err_most = (target >> 8) + (target >> 9);
595 	found = false;
596 
597 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
598 		int lvds_reg;
599 
600 		if (HAS_PCH_SPLIT(dev))
601 			lvds_reg = PCH_LVDS;
602 		else
603 			lvds_reg = LVDS;
604 		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
605 		    LVDS_CLKB_POWER_UP)
606 			clock.p2 = limit->p2.p2_fast;
607 		else
608 			clock.p2 = limit->p2.p2_slow;
609 	} else {
610 		if (target < limit->p2.dot_limit)
611 			clock.p2 = limit->p2.p2_slow;
612 		else
613 			clock.p2 = limit->p2.p2_fast;
614 	}
615 
616 	memset(best_clock, 0, sizeof(*best_clock));
617 	max_n = limit->n.max;
618 	/* based on hardware requirement, prefer smaller n to precision */
619 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
620 		/* based on hardware requirement, prefere larger m1,m2 */
621 		for (clock.m1 = limit->m1.max;
622 		     clock.m1 >= limit->m1.min; clock.m1--) {
623 			for (clock.m2 = limit->m2.max;
624 			     clock.m2 >= limit->m2.min; clock.m2--) {
625 				for (clock.p1 = limit->p1.max;
626 				     clock.p1 >= limit->p1.min; clock.p1--) {
627 					int this_err;
628 
629 					intel_clock(dev, refclk, &clock);
630 					if (!intel_PLL_is_valid(dev, limit,
631 								&clock))
632 						continue;
633 					if (match_clock &&
634 					    clock.p != match_clock->p)
635 						continue;
636 
637 					this_err = abs(clock.dot - target);
638 					if (this_err < err_most) {
639 						*best_clock = clock;
640 						err_most = this_err;
641 						max_n = clock.n;
642 						found = true;
643 					}
644 				}
645 			}
646 		}
647 	}
648 	return found;
649 }
650 
651 static bool
652 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
653 			   int target, int refclk, intel_clock_t *match_clock,
654 			   intel_clock_t *best_clock)
655 {
656 	struct drm_device *dev = crtc->dev;
657 	intel_clock_t clock;
658 
659 	if (target < 200000) {
660 		clock.n = 1;
661 		clock.p1 = 2;
662 		clock.p2 = 10;
663 		clock.m1 = 12;
664 		clock.m2 = 9;
665 	} else {
666 		clock.n = 2;
667 		clock.p1 = 1;
668 		clock.p2 = 10;
669 		clock.m1 = 14;
670 		clock.m2 = 8;
671 	}
672 	intel_clock(dev, refclk, &clock);
673 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
674 	return true;
675 }
676 
677 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
678 static bool
679 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
680 		      int target, int refclk, intel_clock_t *match_clock,
681 		      intel_clock_t *best_clock)
682 {
683 	intel_clock_t clock;
684 	if (target < 200000) {
685 		clock.p1 = 2;
686 		clock.p2 = 10;
687 		clock.n = 2;
688 		clock.m1 = 23;
689 		clock.m2 = 8;
690 	} else {
691 		clock.p1 = 1;
692 		clock.p2 = 10;
693 		clock.n = 1;
694 		clock.m1 = 14;
695 		clock.m2 = 2;
696 	}
697 	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
698 	clock.p = (clock.p1 * clock.p2);
699 	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
700 	clock.vco = 0;
701 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
702 	return true;
703 }
704 
705 /**
706  * intel_wait_for_vblank - wait for vblank on a given pipe
707  * @dev: drm device
708  * @pipe: pipe to wait for
709  *
710  * Wait for vblank to occur on a given pipe.  Needed for various bits of
711  * mode setting code.
712  */
713 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
714 {
715 	struct drm_i915_private *dev_priv = dev->dev_private;
716 	int pipestat_reg = PIPESTAT(pipe);
717 
718 	/* Clear existing vblank status. Note this will clear any other
719 	 * sticky status fields as well.
720 	 *
721 	 * This races with i915_driver_irq_handler() with the result
722 	 * that either function could miss a vblank event.  Here it is not
723 	 * fatal, as we will either wait upon the next vblank interrupt or
724 	 * timeout.  Generally speaking intel_wait_for_vblank() is only
725 	 * called during modeset at which time the GPU should be idle and
726 	 * should *not* be performing page flips and thus not waiting on
727 	 * vblanks...
728 	 * Currently, the result of us stealing a vblank from the irq
729 	 * handler is that a single frame will be skipped during swapbuffers.
730 	 */
731 	I915_WRITE(pipestat_reg,
732 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
733 
734 	/* Wait for vblank interrupt bit to set */
735 	if (_intel_wait_for(dev,
736 	    I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
737 	    50, 1, "915vbl"))
738 		DRM_DEBUG_KMS("vblank wait timed out\n");
739 }
740 
741 /*
742  * intel_wait_for_pipe_off - wait for pipe to turn off
743  * @dev: drm device
744  * @pipe: pipe to wait for
745  *
746  * After disabling a pipe, we can't wait for vblank in the usual way,
747  * spinning on the vblank interrupt status bit, since we won't actually
748  * see an interrupt when the pipe is disabled.
749  *
750  * On Gen4 and above:
751  *   wait for the pipe register state bit to turn off
752  *
753  * Otherwise:
754  *   wait for the display line value to settle (it usually
755  *   ends up stopping at the start of the next frame).
756  *
757  */
758 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
759 {
760 	struct drm_i915_private *dev_priv = dev->dev_private;
761 
762 	if (INTEL_INFO(dev)->gen >= 4) {
763 		int reg = PIPECONF(pipe);
764 
765 		/* Wait for the Pipe State to go off */
766 		if (_intel_wait_for(dev,
767 		    (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
768 		    1, "915pip"))
769 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
770 	} else {
771 		u32 last_line;
772 		int reg = PIPEDSL(pipe);
773 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
774 
775 		/* Wait for the display line to settle */
776 		do {
777 			last_line = I915_READ(reg) & DSL_LINEMASK;
778 			DELAY(5000);
779 		} while (((I915_READ(reg) & DSL_LINEMASK) != last_line) &&
780 			 time_after(timeout, jiffies));
781 		if (time_after(jiffies, timeout))
782 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
783 	}
784 }
785 
786 static const char *state_string(bool enabled)
787 {
788 	return enabled ? "on" : "off";
789 }
790 
791 /* Only for pre-ILK configs */
792 static void assert_pll(struct drm_i915_private *dev_priv,
793 		       enum i915_pipe pipe, bool state)
794 {
795 	int reg;
796 	u32 val;
797 	bool cur_state;
798 
799 	reg = DPLL(pipe);
800 	val = I915_READ(reg);
801 	cur_state = !!(val & DPLL_VCO_ENABLE);
802 	if (cur_state != state)
803 		kprintf("PLL state assertion failure (expected %s, current %s)\n",
804 		    state_string(state), state_string(cur_state));
805 }
806 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
807 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
808 
809 /* For ILK+ */
810 static void assert_pch_pll(struct drm_i915_private *dev_priv,
811 			   enum i915_pipe pipe, bool state)
812 {
813 	int reg;
814 	u32 val;
815 	bool cur_state;
816 
817 	if (HAS_PCH_CPT(dev_priv->dev)) {
818 		u32 pch_dpll;
819 
820 		pch_dpll = I915_READ(PCH_DPLL_SEL);
821 
822 		/* Make sure the selected PLL is enabled to the transcoder */
823 		KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
824 		    ("transcoder %d PLL not enabled\n", pipe));
825 
826 		/* Convert the transcoder pipe number to a pll pipe number */
827 		pipe = (pch_dpll >> (4 * pipe)) & 1;
828 	}
829 
830 	reg = PCH_DPLL(pipe);
831 	val = I915_READ(reg);
832 	cur_state = !!(val & DPLL_VCO_ENABLE);
833 	if (cur_state != state)
834 		kprintf("PCH PLL state assertion failure (expected %s, current %s)\n",
835 		    state_string(state), state_string(cur_state));
836 }
837 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
838 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
839 
840 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
841 			  enum i915_pipe pipe, bool state)
842 {
843 	int reg;
844 	u32 val;
845 	bool cur_state;
846 
847 	reg = FDI_TX_CTL(pipe);
848 	val = I915_READ(reg);
849 	cur_state = !!(val & FDI_TX_ENABLE);
850 	if (cur_state != state)
851 		kprintf("FDI TX state assertion failure (expected %s, current %s)\n",
852 		    state_string(state), state_string(cur_state));
853 }
854 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
855 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
856 
857 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
858 			  enum i915_pipe pipe, bool state)
859 {
860 	int reg;
861 	u32 val;
862 	bool cur_state;
863 
864 	reg = FDI_RX_CTL(pipe);
865 	val = I915_READ(reg);
866 	cur_state = !!(val & FDI_RX_ENABLE);
867 	if (cur_state != state)
868 		kprintf("FDI RX state assertion failure (expected %s, current %s)\n",
869 		    state_string(state), state_string(cur_state));
870 }
871 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
872 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
873 
874 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
875 				      enum i915_pipe pipe)
876 {
877 	int reg;
878 	u32 val;
879 
880 	/* ILK FDI PLL is always enabled */
881 	if (dev_priv->info->gen == 5)
882 		return;
883 
884 	reg = FDI_TX_CTL(pipe);
885 	val = I915_READ(reg);
886 	if (!(val & FDI_TX_PLL_ENABLE))
887 		kprintf("FDI TX PLL assertion failure, should be active but is disabled\n");
888 }
889 
890 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
891 				      enum i915_pipe pipe)
892 {
893 	int reg;
894 	u32 val;
895 
896 	reg = FDI_RX_CTL(pipe);
897 	val = I915_READ(reg);
898 	if (!(val & FDI_RX_PLL_ENABLE))
899 		kprintf("FDI RX PLL assertion failure, should be active but is disabled\n");
900 }
901 
902 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
903 				  enum i915_pipe pipe)
904 {
905 	int pp_reg, lvds_reg;
906 	u32 val;
907 	enum i915_pipe panel_pipe = PIPE_A;
908 	bool locked = true;
909 
910 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
911 		pp_reg = PCH_PP_CONTROL;
912 		lvds_reg = PCH_LVDS;
913 	} else {
914 		pp_reg = PP_CONTROL;
915 		lvds_reg = LVDS;
916 	}
917 
918 	val = I915_READ(pp_reg);
919 	if (!(val & PANEL_POWER_ON) ||
920 	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
921 		locked = false;
922 
923 	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
924 		panel_pipe = PIPE_B;
925 
926 	if (panel_pipe == pipe && locked)
927 		kprintf("panel assertion failure, pipe %c regs locked\n",
928 	     pipe_name(pipe));
929 }
930 
931 void assert_pipe(struct drm_i915_private *dev_priv,
932 		 enum i915_pipe pipe, bool state)
933 {
934 	int reg;
935 	u32 val;
936 	bool cur_state;
937 
938 	/* if we need the pipe A quirk it must be always on */
939 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
940 		state = true;
941 
942 	reg = PIPECONF(pipe);
943 	val = I915_READ(reg);
944 	cur_state = !!(val & PIPECONF_ENABLE);
945 	if (cur_state != state)
946 		kprintf("pipe %c assertion failure (expected %s, current %s)\n",
947 		    pipe_name(pipe), state_string(state), state_string(cur_state));
948 }
949 
950 static void assert_plane(struct drm_i915_private *dev_priv,
951 			 enum plane plane, bool state)
952 {
953 	int reg;
954 	u32 val;
955 	bool cur_state;
956 
957 	reg = DSPCNTR(plane);
958 	val = I915_READ(reg);
959 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
960 	if (cur_state != state)
961 		kprintf("plane %c assertion failure, (expected %s, current %s)\n",
962 		       plane_name(plane), state_string(state), state_string(cur_state));
963 }
964 
965 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
966 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
967 
968 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
969 				   enum i915_pipe pipe)
970 {
971 	int reg, i;
972 	u32 val;
973 	int cur_pipe;
974 
975 	/* Planes are fixed to pipes on ILK+ */
976 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
977 		reg = DSPCNTR(pipe);
978 		val = I915_READ(reg);
979 		if ((val & DISPLAY_PLANE_ENABLE) != 0)
980 			kprintf("plane %c assertion failure, should be disabled but not\n",
981 			       plane_name(pipe));
982 		return;
983 	}
984 
985 	/* Need to check both planes against the pipe */
986 	for (i = 0; i < 2; i++) {
987 		reg = DSPCNTR(i);
988 		val = I915_READ(reg);
989 		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
990 			DISPPLANE_SEL_PIPE_SHIFT;
991 		if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
992 			kprintf("plane %c assertion failure, should be off on pipe %c but is still active\n",
993 		     plane_name(i), pipe_name(pipe));
994 	}
995 }
996 
997 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
998 {
999 	u32 val;
1000 	bool enabled;
1001 
1002 	val = I915_READ(PCH_DREF_CONTROL);
1003 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1004 			    DREF_SUPERSPREAD_SOURCE_MASK));
1005 	if (!enabled)
1006 		kprintf("PCH refclk assertion failure, should be active but is disabled\n");
1007 }
1008 
1009 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1010 				       enum i915_pipe pipe)
1011 {
1012 	int reg;
1013 	u32 val;
1014 	bool enabled;
1015 
1016 	reg = TRANSCONF(pipe);
1017 	val = I915_READ(reg);
1018 	enabled = !!(val & TRANS_ENABLE);
1019 	if (enabled)
1020 		kprintf("transcoder assertion failed, should be off on pipe %c but is still active\n",
1021 	     pipe_name(pipe));
1022 }
1023 
1024 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1025 			      enum i915_pipe pipe, u32 val)
1026 {
1027 	if ((val & PORT_ENABLE) == 0)
1028 		return false;
1029 
1030 	if (HAS_PCH_CPT(dev_priv->dev)) {
1031 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1032 			return false;
1033 	} else {
1034 		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1035 			return false;
1036 	}
1037 	return true;
1038 }
1039 
1040 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1041 			      enum i915_pipe pipe, u32 val)
1042 {
1043 	if ((val & LVDS_PORT_EN) == 0)
1044 		return false;
1045 
1046 	if (HAS_PCH_CPT(dev_priv->dev)) {
1047 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1048 			return false;
1049 	} else {
1050 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1051 			return false;
1052 	}
1053 	return true;
1054 }
1055 
1056 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1057 			      enum i915_pipe pipe, u32 val)
1058 {
1059 	if ((val & ADPA_DAC_ENABLE) == 0)
1060 		return false;
1061 	if (HAS_PCH_CPT(dev_priv->dev)) {
1062 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1063 			return false;
1064 	} else {
1065 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1066 			return false;
1067 	}
1068 	return true;
1069 }
1070 
1071 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1072 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1073 {
1074 	if ((val & DP_PORT_EN) == 0)
1075 		return false;
1076 
1077 	if (HAS_PCH_CPT(dev_priv->dev)) {
1078 		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1079 		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1080 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1081 			return false;
1082 	} else {
1083 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1084 			return false;
1085 	}
1086 	return true;
1087 }
1088 
1089 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1090 				   enum i915_pipe pipe, int reg, u32 port_sel)
1091 {
1092 	u32 val = I915_READ(reg);
1093 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
1094 		kprintf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1095 	     reg, pipe_name(pipe));
1096 }
1097 
1098 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1099 				     enum i915_pipe pipe, int reg)
1100 {
1101 	u32 val = I915_READ(reg);
1102 	if (hdmi_pipe_enabled(dev_priv, val, pipe))
1103 		kprintf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1104 	     reg, pipe_name(pipe));
1105 }
1106 
1107 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1108 				      enum i915_pipe pipe)
1109 {
1110 	int reg;
1111 	u32 val;
1112 
1113 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1114 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1115 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1116 
1117 	reg = PCH_ADPA;
1118 	val = I915_READ(reg);
1119 	if (adpa_pipe_enabled(dev_priv, val, pipe))
1120 		kprintf("PCH VGA enabled on transcoder %c, should be disabled\n",
1121 	     pipe_name(pipe));
1122 
1123 	reg = PCH_LVDS;
1124 	val = I915_READ(reg);
1125 	if (lvds_pipe_enabled(dev_priv, val, pipe))
1126 		kprintf("PCH LVDS enabled on transcoder %c, should be disabled\n",
1127 	     pipe_name(pipe));
1128 
1129 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1130 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1131 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1132 }
1133 
1134 /**
1135  * intel_enable_pll - enable a PLL
1136  * @dev_priv: i915 private structure
1137  * @pipe: pipe PLL to enable
1138  *
1139  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1140  * make sure the PLL reg is writable first though, since the panel write
1141  * protect mechanism may be enabled.
1142  *
1143  * Note!  This is for pre-ILK only.
1144  */
1145 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1146 {
1147 	int reg;
1148 	u32 val;
1149 
1150 	/* No really, not for ILK+ */
1151 	KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
1152 
1153 	/* PLL is protected by panel, make sure we can write it */
1154 	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1155 		assert_panel_unlocked(dev_priv, pipe);
1156 
1157 	reg = DPLL(pipe);
1158 	val = I915_READ(reg);
1159 	val |= DPLL_VCO_ENABLE;
1160 
1161 	/* We do this three times for luck */
1162 	I915_WRITE(reg, val);
1163 	POSTING_READ(reg);
1164 	DELAY(150); /* wait for warmup */
1165 	I915_WRITE(reg, val);
1166 	POSTING_READ(reg);
1167 	DELAY(150); /* wait for warmup */
1168 	I915_WRITE(reg, val);
1169 	POSTING_READ(reg);
1170 	DELAY(150); /* wait for warmup */
1171 }
1172 
1173 /**
1174  * intel_disable_pll - disable a PLL
1175  * @dev_priv: i915 private structure
1176  * @pipe: pipe PLL to disable
1177  *
1178  * Disable the PLL for @pipe, making sure the pipe is off first.
1179  *
1180  * Note!  This is for pre-ILK only.
1181  */
1182 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1183 {
1184 	int reg;
1185 	u32 val;
1186 
1187 	/* Don't disable pipe A or pipe A PLLs if needed */
1188 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1189 		return;
1190 
1191 	/* Make sure the pipe isn't still relying on us */
1192 	assert_pipe_disabled(dev_priv, pipe);
1193 
1194 	reg = DPLL(pipe);
1195 	val = I915_READ(reg);
1196 	val &= ~DPLL_VCO_ENABLE;
1197 	I915_WRITE(reg, val);
1198 	POSTING_READ(reg);
1199 }
1200 
1201 /**
1202  * intel_enable_pch_pll - enable PCH PLL
1203  * @dev_priv: i915 private structure
1204  * @pipe: pipe PLL to enable
1205  *
1206  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1207  * drives the transcoder clock.
1208  */
1209 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1210 				 enum i915_pipe pipe)
1211 {
1212 	int reg;
1213 	u32 val;
1214 
1215 	if (pipe > 1)
1216 		return;
1217 
1218 	/* PCH only available on ILK+ */
1219 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1220 
1221 	/* PCH refclock must be enabled first */
1222 	assert_pch_refclk_enabled(dev_priv);
1223 
1224 	reg = PCH_DPLL(pipe);
1225 	val = I915_READ(reg);
1226 	val |= DPLL_VCO_ENABLE;
1227 	I915_WRITE(reg, val);
1228 	POSTING_READ(reg);
1229 	DELAY(200);
1230 }
1231 
1232 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1233 				  enum i915_pipe pipe)
1234 {
1235 	int reg;
1236 	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1237 		pll_sel = TRANSC_DPLL_ENABLE;
1238 
1239 	if (pipe > 1)
1240 		return;
1241 
1242 	/* PCH only available on ILK+ */
1243 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1244 
1245 	/* Make sure transcoder isn't still depending on us */
1246 	assert_transcoder_disabled(dev_priv, pipe);
1247 
1248 	if (pipe == 0)
1249 		pll_sel |= TRANSC_DPLLA_SEL;
1250 	else if (pipe == 1)
1251 		pll_sel |= TRANSC_DPLLB_SEL;
1252 
1253 
1254 	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1255 		return;
1256 
1257 	reg = PCH_DPLL(pipe);
1258 	val = I915_READ(reg);
1259 	val &= ~DPLL_VCO_ENABLE;
1260 	I915_WRITE(reg, val);
1261 	POSTING_READ(reg);
1262 	DELAY(200);
1263 }
1264 
1265 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1266 				    enum i915_pipe pipe)
1267 {
1268 	int reg;
1269 	u32 val, pipeconf_val;
1270 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1271 
1272 	/* PCH only available on ILK+ */
1273 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1274 
1275 	/* Make sure PCH DPLL is enabled */
1276 	assert_pch_pll_enabled(dev_priv, pipe);
1277 
1278 	/* FDI must be feeding us bits for PCH ports */
1279 	assert_fdi_tx_enabled(dev_priv, pipe);
1280 	assert_fdi_rx_enabled(dev_priv, pipe);
1281 
1282 
1283 	reg = TRANSCONF(pipe);
1284 	val = I915_READ(reg);
1285 	pipeconf_val = I915_READ(PIPECONF(pipe));
1286 
1287 	if (HAS_PCH_IBX(dev_priv->dev)) {
1288 		/*
1289 		 * make the BPC in transcoder be consistent with
1290 		 * that in pipeconf reg.
1291 		 */
1292 		val &= ~PIPE_BPC_MASK;
1293 		val |= pipeconf_val & PIPE_BPC_MASK;
1294 	}
1295 
1296 	val &= ~TRANS_INTERLACE_MASK;
1297 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1298 		if (HAS_PCH_IBX(dev_priv->dev) &&
1299 		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1300 			val |= TRANS_LEGACY_INTERLACED_ILK;
1301 		else
1302 			val |= TRANS_INTERLACED;
1303 	else
1304 		val |= TRANS_PROGRESSIVE;
1305 
1306 	I915_WRITE(reg, val | TRANS_ENABLE);
1307 	if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
1308 	    100, 1, "915trc"))
1309 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1310 }
1311 
1312 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1313 				     enum i915_pipe pipe)
1314 {
1315 	int reg;
1316 	u32 val;
1317 
1318 	/* FDI relies on the transcoder */
1319 	assert_fdi_tx_disabled(dev_priv, pipe);
1320 	assert_fdi_rx_disabled(dev_priv, pipe);
1321 
1322 	/* Ports must be off as well */
1323 	assert_pch_ports_disabled(dev_priv, pipe);
1324 
1325 	reg = TRANSCONF(pipe);
1326 	val = I915_READ(reg);
1327 	val &= ~TRANS_ENABLE;
1328 	I915_WRITE(reg, val);
1329 	/* wait for PCH transcoder off, transcoder state */
1330 	if (_intel_wait_for(dev_priv->dev,
1331 	    (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
1332 	    1, "915trd"))
1333 		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1334 }
1335 
1336 /**
1337  * intel_enable_pipe - enable a pipe, asserting requirements
1338  * @dev_priv: i915 private structure
1339  * @pipe: pipe to enable
1340  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1341  *
1342  * Enable @pipe, making sure that various hardware specific requirements
1343  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1344  *
1345  * @pipe should be %PIPE_A or %PIPE_B.
1346  *
1347  * Will wait until the pipe is actually running (i.e. first vblank) before
1348  * returning.
1349  */
1350 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
1351 			      bool pch_port)
1352 {
1353 	int reg;
1354 	u32 val;
1355 
1356 	/*
1357 	 * A pipe without a PLL won't actually be able to drive bits from
1358 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1359 	 * need the check.
1360 	 */
1361 	if (!HAS_PCH_SPLIT(dev_priv->dev))
1362 		assert_pll_enabled(dev_priv, pipe);
1363 	else {
1364 		if (pch_port) {
1365 			/* if driving the PCH, we need FDI enabled */
1366 			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1367 			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1368 		}
1369 		/* FIXME: assert CPU port conditions for SNB+ */
1370 	}
1371 
1372 	reg = PIPECONF(pipe);
1373 	val = I915_READ(reg);
1374 	if (val & PIPECONF_ENABLE)
1375 		return;
1376 
1377 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1378 	intel_wait_for_vblank(dev_priv->dev, pipe);
1379 }
1380 
1381 /**
1382  * intel_disable_pipe - disable a pipe, asserting requirements
1383  * @dev_priv: i915 private structure
1384  * @pipe: pipe to disable
1385  *
1386  * Disable @pipe, making sure that various hardware specific requirements
1387  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1388  *
1389  * @pipe should be %PIPE_A or %PIPE_B.
1390  *
1391  * Will wait until the pipe has shut down before returning.
1392  */
1393 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1394 			       enum i915_pipe pipe)
1395 {
1396 	int reg;
1397 	u32 val;
1398 
1399 	/*
1400 	 * Make sure planes won't keep trying to pump pixels to us,
1401 	 * or we might hang the display.
1402 	 */
1403 	assert_planes_disabled(dev_priv, pipe);
1404 
1405 	/* Don't disable pipe A or pipe A PLLs if needed */
1406 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1407 		return;
1408 
1409 	reg = PIPECONF(pipe);
1410 	val = I915_READ(reg);
1411 	if ((val & PIPECONF_ENABLE) == 0)
1412 		return;
1413 
1414 	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1415 	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1416 }
1417 
1418 /*
1419  * Plane regs are double buffered, going from enabled->disabled needs a
1420  * trigger in order to latch.  The display address reg provides this.
1421  */
1422 static void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1423 				      enum plane plane)
1424 {
1425 	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1426 	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1427 }
1428 
1429 /**
1430  * intel_enable_plane - enable a display plane on a given pipe
1431  * @dev_priv: i915 private structure
1432  * @plane: plane to enable
1433  * @pipe: pipe being fed
1434  *
1435  * Enable @plane on @pipe, making sure that @pipe is running first.
1436  */
1437 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1438 			       enum plane plane, enum i915_pipe pipe)
1439 {
1440 	int reg;
1441 	u32 val;
1442 
1443 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1444 	assert_pipe_enabled(dev_priv, pipe);
1445 
1446 	reg = DSPCNTR(plane);
1447 	val = I915_READ(reg);
1448 	if (val & DISPLAY_PLANE_ENABLE)
1449 		return;
1450 
1451 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1452 	intel_flush_display_plane(dev_priv, plane);
1453 	intel_wait_for_vblank(dev_priv->dev, pipe);
1454 }
1455 
1456 /**
1457  * intel_disable_plane - disable a display plane
1458  * @dev_priv: i915 private structure
1459  * @plane: plane to disable
1460  * @pipe: pipe consuming the data
1461  *
1462  * Disable @plane; should be an independent operation.
1463  */
1464 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1465 				enum plane plane, enum i915_pipe pipe)
1466 {
1467 	int reg;
1468 	u32 val;
1469 
1470 	reg = DSPCNTR(plane);
1471 	val = I915_READ(reg);
1472 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1473 		return;
1474 
1475 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1476 	intel_flush_display_plane(dev_priv, plane);
1477 	intel_wait_for_vblank(dev_priv->dev, pipe);
1478 }
1479 
1480 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1481 			   enum i915_pipe pipe, int reg, u32 port_sel)
1482 {
1483 	u32 val = I915_READ(reg);
1484 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1485 		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1486 		I915_WRITE(reg, val & ~DP_PORT_EN);
1487 	}
1488 }
1489 
1490 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1491 			     enum i915_pipe pipe, int reg)
1492 {
1493 	u32 val = I915_READ(reg);
1494 	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1495 		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1496 			      reg, pipe);
1497 		I915_WRITE(reg, val & ~PORT_ENABLE);
1498 	}
1499 }
1500 
1501 /* Disable any ports connected to this transcoder */
1502 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1503 				    enum i915_pipe pipe)
1504 {
1505 	u32 reg, val;
1506 
1507 	val = I915_READ(PCH_PP_CONTROL);
1508 	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1509 
1510 	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1511 	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1512 	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1513 
1514 	reg = PCH_ADPA;
1515 	val = I915_READ(reg);
1516 	if (adpa_pipe_enabled(dev_priv, val, pipe))
1517 		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1518 
1519 	reg = PCH_LVDS;
1520 	val = I915_READ(reg);
1521 	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1522 		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1523 		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1524 		POSTING_READ(reg);
1525 		DELAY(100);
1526 	}
1527 
1528 	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1529 	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1530 	disable_pch_hdmi(dev_priv, pipe, HDMID);
1531 }
1532 
1533 static void i8xx_disable_fbc(struct drm_device *dev)
1534 {
1535 	struct drm_i915_private *dev_priv = dev->dev_private;
1536 	u32 fbc_ctl;
1537 
1538 	/* Disable compression */
1539 	fbc_ctl = I915_READ(FBC_CONTROL);
1540 	if ((fbc_ctl & FBC_CTL_EN) == 0)
1541 		return;
1542 
1543 	fbc_ctl &= ~FBC_CTL_EN;
1544 	I915_WRITE(FBC_CONTROL, fbc_ctl);
1545 
1546 	/* Wait for compressing bit to clear */
1547 	if (_intel_wait_for(dev,
1548 	    (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
1549 	    1, "915fbd")) {
1550 		DRM_DEBUG_KMS("FBC idle timed out\n");
1551 		return;
1552 	}
1553 
1554 	DRM_DEBUG_KMS("disabled FBC\n");
1555 }
1556 
1557 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1558 {
1559 	struct drm_device *dev = crtc->dev;
1560 	struct drm_i915_private *dev_priv = dev->dev_private;
1561 	struct drm_framebuffer *fb = crtc->fb;
1562 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1563 	struct drm_i915_gem_object *obj = intel_fb->obj;
1564 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1565 	int cfb_pitch;
1566 	int plane, i;
1567 	u32 fbc_ctl, fbc_ctl2;
1568 
1569 	cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1570 	if (fb->pitches[0] < cfb_pitch)
1571 		cfb_pitch = fb->pitches[0];
1572 
1573 	/* FBC_CTL wants 64B units */
1574 	cfb_pitch = (cfb_pitch / 64) - 1;
1575 	plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1576 
1577 	/* Clear old tags */
1578 	for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1579 		I915_WRITE(FBC_TAG + (i * 4), 0);
1580 
1581 	/* Set it up... */
1582 	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1583 	fbc_ctl2 |= plane;
1584 	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1585 	I915_WRITE(FBC_FENCE_OFF, crtc->y);
1586 
1587 	/* enable it... */
1588 	fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1589 	if (IS_I945GM(dev))
1590 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1591 	fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1592 	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1593 	fbc_ctl |= obj->fence_reg;
1594 	I915_WRITE(FBC_CONTROL, fbc_ctl);
1595 
1596 	DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1597 		      cfb_pitch, crtc->y, intel_crtc->plane);
1598 }
1599 
1600 static bool i8xx_fbc_enabled(struct drm_device *dev)
1601 {
1602 	struct drm_i915_private *dev_priv = dev->dev_private;
1603 
1604 	return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1605 }
1606 
1607 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1608 {
1609 	struct drm_device *dev = crtc->dev;
1610 	struct drm_i915_private *dev_priv = dev->dev_private;
1611 	struct drm_framebuffer *fb = crtc->fb;
1612 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1613 	struct drm_i915_gem_object *obj = intel_fb->obj;
1614 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1615 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1616 	unsigned long stall_watermark = 200;
1617 	u32 dpfc_ctl;
1618 
1619 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1620 	dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1621 	I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1622 
1623 	I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1624 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1625 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1626 	I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1627 
1628 	/* enable it... */
1629 	I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1630 
1631 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1632 }
1633 
1634 static void g4x_disable_fbc(struct drm_device *dev)
1635 {
1636 	struct drm_i915_private *dev_priv = dev->dev_private;
1637 	u32 dpfc_ctl;
1638 
1639 	/* Disable compression */
1640 	dpfc_ctl = I915_READ(DPFC_CONTROL);
1641 	if (dpfc_ctl & DPFC_CTL_EN) {
1642 		dpfc_ctl &= ~DPFC_CTL_EN;
1643 		I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1644 
1645 		DRM_DEBUG_KMS("disabled FBC\n");
1646 	}
1647 }
1648 
1649 static bool g4x_fbc_enabled(struct drm_device *dev)
1650 {
1651 	struct drm_i915_private *dev_priv = dev->dev_private;
1652 
1653 	return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1654 }
1655 
1656 static void sandybridge_blit_fbc_update(struct drm_device *dev)
1657 {
1658 	struct drm_i915_private *dev_priv = dev->dev_private;
1659 	u32 blt_ecoskpd;
1660 
1661 	/* Make sure blitter notifies FBC of writes */
1662 	gen6_gt_force_wake_get(dev_priv);
1663 	blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1664 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1665 		GEN6_BLITTER_LOCK_SHIFT;
1666 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1667 	blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1668 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1669 	blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1670 			 GEN6_BLITTER_LOCK_SHIFT);
1671 	I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1672 	POSTING_READ(GEN6_BLITTER_ECOSKPD);
1673 	gen6_gt_force_wake_put(dev_priv);
1674 }
1675 
1676 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1677 {
1678 	struct drm_device *dev = crtc->dev;
1679 	struct drm_i915_private *dev_priv = dev->dev_private;
1680 	struct drm_framebuffer *fb = crtc->fb;
1681 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1682 	struct drm_i915_gem_object *obj = intel_fb->obj;
1683 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1684 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1685 	unsigned long stall_watermark = 200;
1686 	u32 dpfc_ctl;
1687 
1688 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1689 	dpfc_ctl &= DPFC_RESERVED;
1690 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1691 	/* Set persistent mode for front-buffer rendering, ala X. */
1692 	dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1693 	dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1694 	I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1695 
1696 	I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1697 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1698 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1699 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1700 	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1701 	/* enable it... */
1702 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1703 
1704 	if (IS_GEN6(dev)) {
1705 		I915_WRITE(SNB_DPFC_CTL_SA,
1706 			   SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1707 		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1708 		sandybridge_blit_fbc_update(dev);
1709 	}
1710 
1711 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1712 }
1713 
1714 static void ironlake_disable_fbc(struct drm_device *dev)
1715 {
1716 	struct drm_i915_private *dev_priv = dev->dev_private;
1717 	u32 dpfc_ctl;
1718 
1719 	/* Disable compression */
1720 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1721 	if (dpfc_ctl & DPFC_CTL_EN) {
1722 		dpfc_ctl &= ~DPFC_CTL_EN;
1723 		I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1724 
1725 		DRM_DEBUG_KMS("disabled FBC\n");
1726 	}
1727 }
1728 
1729 static bool ironlake_fbc_enabled(struct drm_device *dev)
1730 {
1731 	struct drm_i915_private *dev_priv = dev->dev_private;
1732 
1733 	return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1734 }
1735 
1736 bool intel_fbc_enabled(struct drm_device *dev)
1737 {
1738 	struct drm_i915_private *dev_priv = dev->dev_private;
1739 
1740 	if (!dev_priv->display.fbc_enabled)
1741 		return false;
1742 
1743 	return dev_priv->display.fbc_enabled(dev);
1744 }
1745 
1746 static void intel_fbc_work_fn(void *arg, int pending)
1747 {
1748 	struct intel_fbc_work *work = arg;
1749 	struct drm_device *dev = work->crtc->dev;
1750 	struct drm_i915_private *dev_priv = dev->dev_private;
1751 
1752 	DRM_LOCK(dev);
1753 	if (work == dev_priv->fbc_work) {
1754 		/* Double check that we haven't switched fb without cancelling
1755 		 * the prior work.
1756 		 */
1757 		if (work->crtc->fb == work->fb) {
1758 			dev_priv->display.enable_fbc(work->crtc,
1759 						     work->interval);
1760 
1761 			dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1762 			dev_priv->cfb_fb = work->crtc->fb->base.id;
1763 			dev_priv->cfb_y = work->crtc->y;
1764 		}
1765 
1766 		dev_priv->fbc_work = NULL;
1767 	}
1768 	DRM_UNLOCK(dev);
1769 
1770 	drm_free(work, DRM_MEM_KMS);
1771 }
1772 
1773 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1774 {
1775 	u_int pending;
1776 
1777 	if (dev_priv->fbc_work == NULL)
1778 		return;
1779 
1780 	DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1781 
1782 	/* Synchronisation is provided by struct_mutex and checking of
1783 	 * dev_priv->fbc_work, so we can perform the cancellation
1784 	 * entirely asynchronously.
1785 	 */
1786 	if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
1787 	    &pending) == 0)
1788 		/* tasklet was killed before being run, clean up */
1789 		drm_free(dev_priv->fbc_work, DRM_MEM_KMS);
1790 
1791 	/* Mark the work as no longer wanted so that if it does
1792 	 * wake-up (because the work was already running and waiting
1793 	 * for our mutex), it will discover that is no longer
1794 	 * necessary to run.
1795 	 */
1796 	dev_priv->fbc_work = NULL;
1797 }
1798 
1799 static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1800 {
1801 	struct intel_fbc_work *work;
1802 	struct drm_device *dev = crtc->dev;
1803 	struct drm_i915_private *dev_priv = dev->dev_private;
1804 
1805 	if (!dev_priv->display.enable_fbc)
1806 		return;
1807 
1808 	intel_cancel_fbc_work(dev_priv);
1809 
1810 	work = kmalloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
1811 	work->crtc = crtc;
1812 	work->fb = crtc->fb;
1813 	work->interval = interval;
1814 	TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
1815 	    work);
1816 
1817 	dev_priv->fbc_work = work;
1818 
1819 	DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1820 
1821 	/* Delay the actual enabling to let pageflipping cease and the
1822 	 * display to settle before starting the compression. Note that
1823 	 * this delay also serves a second purpose: it allows for a
1824 	 * vblank to pass after disabling the FBC before we attempt
1825 	 * to modify the control registers.
1826 	 *
1827 	 * A more complicated solution would involve tracking vblanks
1828 	 * following the termination of the page-flipping sequence
1829 	 * and indeed performing the enable as a co-routine and not
1830 	 * waiting synchronously upon the vblank.
1831 	 */
1832 	taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
1833 	    msecs_to_jiffies(50));
1834 }
1835 
1836 void intel_disable_fbc(struct drm_device *dev)
1837 {
1838 	struct drm_i915_private *dev_priv = dev->dev_private;
1839 
1840 	intel_cancel_fbc_work(dev_priv);
1841 
1842 	if (!dev_priv->display.disable_fbc)
1843 		return;
1844 
1845 	dev_priv->display.disable_fbc(dev);
1846 	dev_priv->cfb_plane = -1;
1847 }
1848 
1849 /**
1850  * intel_update_fbc - enable/disable FBC as needed
1851  * @dev: the drm_device
1852  *
1853  * Set up the framebuffer compression hardware at mode set time.  We
1854  * enable it if possible:
1855  *   - plane A only (on pre-965)
1856  *   - no pixel mulitply/line duplication
1857  *   - no alpha buffer discard
1858  *   - no dual wide
1859  *   - framebuffer <= 2048 in width, 1536 in height
1860  *
1861  * We can't assume that any compression will take place (worst case),
1862  * so the compressed buffer has to be the same size as the uncompressed
1863  * one.  It also must reside (along with the line length buffer) in
1864  * stolen memory.
1865  *
1866  * We need to enable/disable FBC on a global basis.
1867  */
1868 static void intel_update_fbc(struct drm_device *dev)
1869 {
1870 	struct drm_i915_private *dev_priv = dev->dev_private;
1871 	struct drm_crtc *crtc = NULL, *tmp_crtc;
1872 	struct intel_crtc *intel_crtc;
1873 	struct drm_framebuffer *fb;
1874 	struct intel_framebuffer *intel_fb;
1875 	struct drm_i915_gem_object *obj;
1876 	int enable_fbc;
1877 
1878 	DRM_DEBUG_KMS("\n");
1879 
1880 	if (!i915_powersave)
1881 		return;
1882 
1883 	if (!I915_HAS_FBC(dev))
1884 		return;
1885 
1886 	/*
1887 	 * If FBC is already on, we just have to verify that we can
1888 	 * keep it that way...
1889 	 * Need to disable if:
1890 	 *   - more than one pipe is active
1891 	 *   - changing FBC params (stride, fence, mode)
1892 	 *   - new fb is too large to fit in compressed buffer
1893 	 *   - going to an unsupported config (interlace, pixel multiply, etc.)
1894 	 */
1895 	list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1896 		if (tmp_crtc->enabled && tmp_crtc->fb) {
1897 			if (crtc) {
1898 				DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1899 				dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1900 				goto out_disable;
1901 			}
1902 			crtc = tmp_crtc;
1903 		}
1904 	}
1905 
1906 	if (!crtc || crtc->fb == NULL) {
1907 		DRM_DEBUG_KMS("no output, disabling\n");
1908 		dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
1909 		goto out_disable;
1910 	}
1911 
1912 	intel_crtc = to_intel_crtc(crtc);
1913 	fb = crtc->fb;
1914 	intel_fb = to_intel_framebuffer(fb);
1915 	obj = intel_fb->obj;
1916 
1917 	enable_fbc = i915_enable_fbc;
1918 	if (enable_fbc < 0) {
1919 		DRM_DEBUG_KMS("fbc set to per-chip default\n");
1920 		enable_fbc = 1;
1921 		if (INTEL_INFO(dev)->gen <= 6)
1922 			enable_fbc = 0;
1923 	}
1924 	if (!enable_fbc) {
1925 		DRM_DEBUG_KMS("fbc disabled per module param\n");
1926 		dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
1927 		goto out_disable;
1928 	}
1929 	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1930 		DRM_DEBUG_KMS("framebuffer too large, disabling "
1931 			      "compression\n");
1932 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1933 		goto out_disable;
1934 	}
1935 	if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
1936 	    (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
1937 		DRM_DEBUG_KMS("mode incompatible with compression, "
1938 			      "disabling\n");
1939 		dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
1940 		goto out_disable;
1941 	}
1942 	if ((crtc->mode.hdisplay > 2048) ||
1943 	    (crtc->mode.vdisplay > 1536)) {
1944 		DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1945 		dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
1946 		goto out_disable;
1947 	}
1948 	if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
1949 		DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1950 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1951 		goto out_disable;
1952 	}
1953 	if (obj->tiling_mode != I915_TILING_X ||
1954 	    obj->fence_reg == I915_FENCE_REG_NONE) {
1955 		DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
1956 		dev_priv->no_fbc_reason = FBC_NOT_TILED;
1957 		goto out_disable;
1958 	}
1959 
1960 #ifdef DDB
1961 	/* If the kernel debugger is active, always disable compression */
1962 	if (db_active)
1963 		goto out_disable;
1964 #endif
1965 
1966 	/* If the scanout has not changed, don't modify the FBC settings.
1967 	 * Note that we make the fundamental assumption that the fb->obj
1968 	 * cannot be unpinned (and have its GTT offset and fence revoked)
1969 	 * without first being decoupled from the scanout and FBC disabled.
1970 	 */
1971 	if (dev_priv->cfb_plane == intel_crtc->plane &&
1972 	    dev_priv->cfb_fb == fb->base.id &&
1973 	    dev_priv->cfb_y == crtc->y)
1974 		return;
1975 
1976 	if (intel_fbc_enabled(dev)) {
1977 		/* We update FBC along two paths, after changing fb/crtc
1978 		 * configuration (modeswitching) and after page-flipping
1979 		 * finishes. For the latter, we know that not only did
1980 		 * we disable the FBC at the start of the page-flip
1981 		 * sequence, but also more than one vblank has passed.
1982 		 *
1983 		 * For the former case of modeswitching, it is possible
1984 		 * to switch between two FBC valid configurations
1985 		 * instantaneously so we do need to disable the FBC
1986 		 * before we can modify its control registers. We also
1987 		 * have to wait for the next vblank for that to take
1988 		 * effect. However, since we delay enabling FBC we can
1989 		 * assume that a vblank has passed since disabling and
1990 		 * that we can safely alter the registers in the deferred
1991 		 * callback.
1992 		 *
1993 		 * In the scenario that we go from a valid to invalid
1994 		 * and then back to valid FBC configuration we have
1995 		 * no strict enforcement that a vblank occurred since
1996 		 * disabling the FBC. However, along all current pipe
1997 		 * disabling paths we do need to wait for a vblank at
1998 		 * some point. And we wait before enabling FBC anyway.
1999 		 */
2000 		DRM_DEBUG_KMS("disabling active FBC for update\n");
2001 		intel_disable_fbc(dev);
2002 	}
2003 
2004 	intel_enable_fbc(crtc, 500);
2005 	return;
2006 
2007 out_disable:
2008 	/* Multiple disables should be harmless */
2009 	if (intel_fbc_enabled(dev)) {
2010 		DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2011 		intel_disable_fbc(dev);
2012 	}
2013 }
2014 
2015 int
2016 intel_pin_and_fence_fb_obj(struct drm_device *dev,
2017 			   struct drm_i915_gem_object *obj,
2018 			   struct intel_ring_buffer *pipelined)
2019 {
2020 	struct drm_i915_private *dev_priv = dev->dev_private;
2021 	u32 alignment;
2022 	int ret;
2023 
2024 	alignment = 0; /* shut gcc */
2025 	switch (obj->tiling_mode) {
2026 	case I915_TILING_NONE:
2027 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2028 			alignment = 128 * 1024;
2029 		else if (INTEL_INFO(dev)->gen >= 4)
2030 			alignment = 4 * 1024;
2031 		else
2032 			alignment = 64 * 1024;
2033 		break;
2034 	case I915_TILING_X:
2035 		/* pin() will align the object as required by fence */
2036 		alignment = 0;
2037 		break;
2038 	case I915_TILING_Y:
2039 		/* FIXME: Is this true? */
2040 		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
2041 		return -EINVAL;
2042 	default:
2043 		KASSERT(0, ("Wrong tiling for fb obj"));
2044 	}
2045 
2046 	dev_priv->mm.interruptible = false;
2047 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
2048 	if (ret)
2049 		goto err_interruptible;
2050 
2051 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2052 	 * fence, whereas 965+ only requires a fence if using
2053 	 * framebuffer compression.  For simplicity, we always install
2054 	 * a fence as the cost is not that onerous.
2055 	 */
2056 	if (obj->tiling_mode != I915_TILING_NONE) {
2057 		ret = i915_gem_object_get_fence(obj, pipelined);
2058 		if (ret)
2059 			goto err_unpin;
2060 
2061 		i915_gem_object_pin_fence(obj);
2062 	}
2063 
2064 	dev_priv->mm.interruptible = true;
2065 	return 0;
2066 
2067 err_unpin:
2068 	i915_gem_object_unpin(obj);
2069 err_interruptible:
2070 	dev_priv->mm.interruptible = true;
2071 	return ret;
2072 }
2073 
2074 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2075 {
2076 	i915_gem_object_unpin_fence(obj);
2077 	i915_gem_object_unpin(obj);
2078 }
2079 
2080 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2081 			     int x, int y)
2082 {
2083 	struct drm_device *dev = crtc->dev;
2084 	struct drm_i915_private *dev_priv = dev->dev_private;
2085 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2086 	struct intel_framebuffer *intel_fb;
2087 	struct drm_i915_gem_object *obj;
2088 	int plane = intel_crtc->plane;
2089 	unsigned long Start, Offset;
2090 	u32 dspcntr;
2091 	u32 reg;
2092 
2093 	switch (plane) {
2094 	case 0:
2095 	case 1:
2096 		break;
2097 	default:
2098 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2099 		return -EINVAL;
2100 	}
2101 
2102 	intel_fb = to_intel_framebuffer(fb);
2103 	obj = intel_fb->obj;
2104 
2105 	reg = DSPCNTR(plane);
2106 	dspcntr = I915_READ(reg);
2107 	/* Mask out pixel format bits in case we change it */
2108 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2109 	switch (fb->bits_per_pixel) {
2110 	case 8:
2111 		dspcntr |= DISPPLANE_8BPP;
2112 		break;
2113 	case 16:
2114 		if (fb->depth == 15)
2115 			dspcntr |= DISPPLANE_15_16BPP;
2116 		else
2117 			dspcntr |= DISPPLANE_16BPP;
2118 		break;
2119 	case 24:
2120 	case 32:
2121 		dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2122 		break;
2123 	default:
2124 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2125 		return -EINVAL;
2126 	}
2127 	if (INTEL_INFO(dev)->gen >= 4) {
2128 		if (obj->tiling_mode != I915_TILING_NONE)
2129 			dspcntr |= DISPPLANE_TILED;
2130 		else
2131 			dspcntr &= ~DISPPLANE_TILED;
2132 	}
2133 
2134 	I915_WRITE(reg, dspcntr);
2135 
2136 	Start = obj->gtt_offset;
2137 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2138 
2139 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2140 		      Start, Offset, x, y, fb->pitches[0]);
2141 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2142 	if (INTEL_INFO(dev)->gen >= 4) {
2143 		I915_WRITE(DSPSURF(plane), Start);
2144 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2145 		I915_WRITE(DSPADDR(plane), Offset);
2146 	} else
2147 		I915_WRITE(DSPADDR(plane), Start + Offset);
2148 	POSTING_READ(reg);
2149 
2150 	return (0);
2151 }
2152 
2153 static int ironlake_update_plane(struct drm_crtc *crtc,
2154 				 struct drm_framebuffer *fb, int x, int y)
2155 {
2156 	struct drm_device *dev = crtc->dev;
2157 	struct drm_i915_private *dev_priv = dev->dev_private;
2158 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2159 	struct intel_framebuffer *intel_fb;
2160 	struct drm_i915_gem_object *obj;
2161 	int plane = intel_crtc->plane;
2162 	unsigned long Start, Offset;
2163 	u32 dspcntr;
2164 	u32 reg;
2165 
2166 	switch (plane) {
2167 	case 0:
2168 	case 1:
2169 	case 2:
2170 		break;
2171 	default:
2172 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2173 		return -EINVAL;
2174 	}
2175 
2176 	intel_fb = to_intel_framebuffer(fb);
2177 	obj = intel_fb->obj;
2178 
2179 	reg = DSPCNTR(plane);
2180 	dspcntr = I915_READ(reg);
2181 	/* Mask out pixel format bits in case we change it */
2182 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2183 	switch (fb->bits_per_pixel) {
2184 	case 8:
2185 		dspcntr |= DISPPLANE_8BPP;
2186 		break;
2187 	case 16:
2188 		if (fb->depth != 16) {
2189 			DRM_ERROR("bpp 16, depth %d\n", fb->depth);
2190 			return -EINVAL;
2191 		}
2192 
2193 		dspcntr |= DISPPLANE_16BPP;
2194 		break;
2195 	case 24:
2196 	case 32:
2197 		if (fb->depth == 24)
2198 			dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2199 		else if (fb->depth == 30)
2200 			dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2201 		else {
2202 			DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
2203 			    fb->depth);
2204 			return -EINVAL;
2205 		}
2206 		break;
2207 	default:
2208 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2209 		return -EINVAL;
2210 	}
2211 
2212 	if (obj->tiling_mode != I915_TILING_NONE)
2213 		dspcntr |= DISPPLANE_TILED;
2214 	else
2215 		dspcntr &= ~DISPPLANE_TILED;
2216 
2217 	/* must disable */
2218 	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2219 
2220 	I915_WRITE(reg, dspcntr);
2221 
2222 	Start = obj->gtt_offset;
2223 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2224 
2225 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2226 		      Start, Offset, x, y, fb->pitches[0]);
2227 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2228 	I915_WRITE(DSPSURF(plane), Start);
2229 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2230 	I915_WRITE(DSPADDR(plane), Offset);
2231 	POSTING_READ(reg);
2232 
2233 	return 0;
2234 }
2235 
2236 /* Assume fb object is pinned & idle & fenced and just update base pointers */
2237 static int
2238 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2239 			   int x, int y, enum mode_set_atomic state)
2240 {
2241 	struct drm_device *dev = crtc->dev;
2242 	struct drm_i915_private *dev_priv = dev->dev_private;
2243 	int ret;
2244 
2245 	ret = dev_priv->display.update_plane(crtc, fb, x, y);
2246 	if (ret)
2247 		return ret;
2248 
2249 	intel_update_fbc(dev);
2250 	intel_increase_pllclock(crtc);
2251 
2252 	return 0;
2253 }
2254 
2255 static int
2256 intel_finish_fb(struct drm_framebuffer *old_fb)
2257 {
2258 	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2259 	struct drm_device *dev = obj->base.dev;
2260 	struct drm_i915_private *dev_priv = dev->dev_private;
2261 	bool was_interruptible = dev_priv->mm.interruptible;
2262 	int ret;
2263 
2264 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2265 	while (!atomic_load_acq_int(&dev_priv->mm.wedged) &&
2266 	    atomic_load_acq_int(&obj->pending_flip) != 0) {
2267 		lksleep(&obj->pending_flip, &dev->event_lock,
2268 		    0, "915flp", 0);
2269 	}
2270 	lockmgr(&dev->event_lock, LK_RELEASE);
2271 
2272 	/* Big Hammer, we also need to ensure that any pending
2273 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2274 	 * current scanout is retired before unpinning the old
2275 	 * framebuffer.
2276 	 *
2277 	 * This should only fail upon a hung GPU, in which case we
2278 	 * can safely continue.
2279 	 */
2280 	dev_priv->mm.interruptible = false;
2281 	ret = i915_gem_object_finish_gpu(obj);
2282 	dev_priv->mm.interruptible = was_interruptible;
2283 	return ret;
2284 }
2285 
2286 static int
2287 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2288 		    struct drm_framebuffer *old_fb)
2289 {
2290 	struct drm_device *dev = crtc->dev;
2291 #if 0
2292 	struct drm_i915_master_private *master_priv;
2293 #else
2294 	drm_i915_private_t *dev_priv = dev->dev_private;
2295 #endif
2296 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2297 	int ret;
2298 
2299 	/* no fb bound */
2300 	if (!crtc->fb) {
2301 		DRM_ERROR("No FB bound\n");
2302 		return 0;
2303 	}
2304 
2305 	switch (intel_crtc->plane) {
2306 	case 0:
2307 	case 1:
2308 		break;
2309 	case 2:
2310 		if (IS_IVYBRIDGE(dev))
2311 			break;
2312 		/* fall through otherwise */
2313 	default:
2314 		DRM_ERROR("no plane for crtc\n");
2315 		return -EINVAL;
2316 	}
2317 
2318 	DRM_LOCK(dev);
2319 	ret = intel_pin_and_fence_fb_obj(dev,
2320 					 to_intel_framebuffer(crtc->fb)->obj,
2321 					 NULL);
2322 	if (ret != 0) {
2323 		DRM_UNLOCK(dev);
2324 		DRM_ERROR("pin & fence failed\n");
2325 		return ret;
2326 	}
2327 
2328 	if (old_fb)
2329 		intel_finish_fb(old_fb);
2330 
2331 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
2332 					 LEAVE_ATOMIC_MODE_SET);
2333 	if (ret) {
2334 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2335 		DRM_UNLOCK(dev);
2336 		DRM_ERROR("failed to update base address\n");
2337 		return ret;
2338 	}
2339 
2340 	if (old_fb) {
2341 		intel_wait_for_vblank(dev, intel_crtc->pipe);
2342 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2343 	}
2344 
2345 	DRM_UNLOCK(dev);
2346 
2347 #if 0
2348 	if (!dev->primary->master)
2349 		return 0;
2350 
2351 	master_priv = dev->primary->master->driver_priv;
2352 	if (!master_priv->sarea_priv)
2353 		return 0;
2354 
2355 	if (intel_crtc->pipe) {
2356 		master_priv->sarea_priv->pipeB_x = x;
2357 		master_priv->sarea_priv->pipeB_y = y;
2358 	} else {
2359 		master_priv->sarea_priv->pipeA_x = x;
2360 		master_priv->sarea_priv->pipeA_y = y;
2361 	}
2362 #else
2363 
2364 	if (!dev_priv->sarea_priv)
2365 		return 0;
2366 
2367 	if (intel_crtc->pipe) {
2368 		dev_priv->sarea_priv->planeB_x = x;
2369 		dev_priv->sarea_priv->planeB_y = y;
2370 	} else {
2371 		dev_priv->sarea_priv->planeA_x = x;
2372 		dev_priv->sarea_priv->planeA_y = y;
2373 	}
2374 #endif
2375 
2376 	return 0;
2377 }
2378 
2379 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2380 {
2381 	struct drm_device *dev = crtc->dev;
2382 	struct drm_i915_private *dev_priv = dev->dev_private;
2383 	u32 dpa_ctl;
2384 
2385 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2386 	dpa_ctl = I915_READ(DP_A);
2387 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
2388 
2389 	if (clock < 200000) {
2390 		u32 temp;
2391 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
2392 		/* workaround for 160Mhz:
2393 		   1) program 0x4600c bits 15:0 = 0x8124
2394 		   2) program 0x46010 bit 0 = 1
2395 		   3) program 0x46034 bit 24 = 1
2396 		   4) program 0x64000 bit 14 = 1
2397 		   */
2398 		temp = I915_READ(0x4600c);
2399 		temp &= 0xffff0000;
2400 		I915_WRITE(0x4600c, temp | 0x8124);
2401 
2402 		temp = I915_READ(0x46010);
2403 		I915_WRITE(0x46010, temp | 1);
2404 
2405 		temp = I915_READ(0x46034);
2406 		I915_WRITE(0x46034, temp | (1 << 24));
2407 	} else {
2408 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
2409 	}
2410 	I915_WRITE(DP_A, dpa_ctl);
2411 
2412 	POSTING_READ(DP_A);
2413 	DELAY(500);
2414 }
2415 
2416 static void intel_fdi_normal_train(struct drm_crtc *crtc)
2417 {
2418 	struct drm_device *dev = crtc->dev;
2419 	struct drm_i915_private *dev_priv = dev->dev_private;
2420 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2421 	int pipe = intel_crtc->pipe;
2422 	u32 reg, temp;
2423 
2424 	/* enable normal train */
2425 	reg = FDI_TX_CTL(pipe);
2426 	temp = I915_READ(reg);
2427 	if (IS_IVYBRIDGE(dev)) {
2428 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2429 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
2430 	} else {
2431 		temp &= ~FDI_LINK_TRAIN_NONE;
2432 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
2433 	}
2434 	I915_WRITE(reg, temp);
2435 
2436 	reg = FDI_RX_CTL(pipe);
2437 	temp = I915_READ(reg);
2438 	if (HAS_PCH_CPT(dev)) {
2439 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2440 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2441 	} else {
2442 		temp &= ~FDI_LINK_TRAIN_NONE;
2443 		temp |= FDI_LINK_TRAIN_NONE;
2444 	}
2445 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2446 
2447 	/* wait one idle pattern time */
2448 	POSTING_READ(reg);
2449 	DELAY(1000);
2450 
2451 	/* IVB wants error correction enabled */
2452 	if (IS_IVYBRIDGE(dev))
2453 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2454 			   FDI_FE_ERRC_ENABLE);
2455 }
2456 
2457 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2458 {
2459 	struct drm_i915_private *dev_priv = dev->dev_private;
2460 	u32 flags = I915_READ(SOUTH_CHICKEN1);
2461 
2462 	flags |= FDI_PHASE_SYNC_OVR(pipe);
2463 	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2464 	flags |= FDI_PHASE_SYNC_EN(pipe);
2465 	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2466 	POSTING_READ(SOUTH_CHICKEN1);
2467 }
2468 
2469 /* The FDI link training functions for ILK/Ibexpeak. */
2470 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2471 {
2472 	struct drm_device *dev = crtc->dev;
2473 	struct drm_i915_private *dev_priv = dev->dev_private;
2474 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2475 	int pipe = intel_crtc->pipe;
2476 	int plane = intel_crtc->plane;
2477 	u32 reg, temp, tries;
2478 
2479 	/* FDI needs bits from pipe & plane first */
2480 	assert_pipe_enabled(dev_priv, pipe);
2481 	assert_plane_enabled(dev_priv, plane);
2482 
2483 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2484 	   for train result */
2485 	reg = FDI_RX_IMR(pipe);
2486 	temp = I915_READ(reg);
2487 	temp &= ~FDI_RX_SYMBOL_LOCK;
2488 	temp &= ~FDI_RX_BIT_LOCK;
2489 	I915_WRITE(reg, temp);
2490 	I915_READ(reg);
2491 	DELAY(150);
2492 
2493 	/* enable CPU FDI TX and PCH FDI RX */
2494 	reg = FDI_TX_CTL(pipe);
2495 	temp = I915_READ(reg);
2496 	temp &= ~(7 << 19);
2497 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2498 	temp &= ~FDI_LINK_TRAIN_NONE;
2499 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2500 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2501 
2502 	reg = FDI_RX_CTL(pipe);
2503 	temp = I915_READ(reg);
2504 	temp &= ~FDI_LINK_TRAIN_NONE;
2505 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2506 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2507 
2508 	POSTING_READ(reg);
2509 	DELAY(150);
2510 
2511 	/* Ironlake workaround, enable clock pointer after FDI enable*/
2512 	if (HAS_PCH_IBX(dev)) {
2513 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2514 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2515 			   FDI_RX_PHASE_SYNC_POINTER_EN);
2516 	}
2517 
2518 	reg = FDI_RX_IIR(pipe);
2519 	for (tries = 0; tries < 5; tries++) {
2520 		temp = I915_READ(reg);
2521 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2522 
2523 		if ((temp & FDI_RX_BIT_LOCK)) {
2524 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2525 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2526 			break;
2527 		}
2528 	}
2529 	if (tries == 5)
2530 		DRM_ERROR("FDI train 1 fail!\n");
2531 
2532 	/* Train 2 */
2533 	reg = FDI_TX_CTL(pipe);
2534 	temp = I915_READ(reg);
2535 	temp &= ~FDI_LINK_TRAIN_NONE;
2536 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2537 	I915_WRITE(reg, temp);
2538 
2539 	reg = FDI_RX_CTL(pipe);
2540 	temp = I915_READ(reg);
2541 	temp &= ~FDI_LINK_TRAIN_NONE;
2542 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2543 	I915_WRITE(reg, temp);
2544 
2545 	POSTING_READ(reg);
2546 	DELAY(150);
2547 
2548 	reg = FDI_RX_IIR(pipe);
2549 	for (tries = 0; tries < 5; tries++) {
2550 		temp = I915_READ(reg);
2551 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2552 
2553 		if (temp & FDI_RX_SYMBOL_LOCK) {
2554 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2555 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2556 			break;
2557 		}
2558 	}
2559 	if (tries == 5)
2560 		DRM_ERROR("FDI train 2 fail!\n");
2561 
2562 	DRM_DEBUG_KMS("FDI train done\n");
2563 
2564 }
2565 
2566 static const int snb_b_fdi_train_param[] = {
2567 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2568 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2569 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2570 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2571 };
2572 
2573 /* The FDI link training functions for SNB/Cougarpoint. */
2574 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2575 {
2576 	struct drm_device *dev = crtc->dev;
2577 	struct drm_i915_private *dev_priv = dev->dev_private;
2578 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2579 	int pipe = intel_crtc->pipe;
2580 	u32 reg, temp, i;
2581 
2582 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2583 	   for train result */
2584 	reg = FDI_RX_IMR(pipe);
2585 	temp = I915_READ(reg);
2586 	temp &= ~FDI_RX_SYMBOL_LOCK;
2587 	temp &= ~FDI_RX_BIT_LOCK;
2588 	I915_WRITE(reg, temp);
2589 
2590 	POSTING_READ(reg);
2591 	DELAY(150);
2592 
2593 	/* enable CPU FDI TX and PCH FDI RX */
2594 	reg = FDI_TX_CTL(pipe);
2595 	temp = I915_READ(reg);
2596 	temp &= ~(7 << 19);
2597 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2598 	temp &= ~FDI_LINK_TRAIN_NONE;
2599 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2600 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2601 	/* SNB-B */
2602 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2603 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2604 
2605 	reg = FDI_RX_CTL(pipe);
2606 	temp = I915_READ(reg);
2607 	if (HAS_PCH_CPT(dev)) {
2608 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2609 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2610 	} else {
2611 		temp &= ~FDI_LINK_TRAIN_NONE;
2612 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2613 	}
2614 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2615 
2616 	POSTING_READ(reg);
2617 	DELAY(150);
2618 
2619 	if (HAS_PCH_CPT(dev))
2620 		cpt_phase_pointer_enable(dev, pipe);
2621 
2622 	for (i = 0; i < 4; i++) {
2623 		reg = FDI_TX_CTL(pipe);
2624 		temp = I915_READ(reg);
2625 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2626 		temp |= snb_b_fdi_train_param[i];
2627 		I915_WRITE(reg, temp);
2628 
2629 		POSTING_READ(reg);
2630 		DELAY(500);
2631 
2632 		reg = FDI_RX_IIR(pipe);
2633 		temp = I915_READ(reg);
2634 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2635 
2636 		if (temp & FDI_RX_BIT_LOCK) {
2637 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2638 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2639 			break;
2640 		}
2641 	}
2642 	if (i == 4)
2643 		DRM_ERROR("FDI train 1 fail!\n");
2644 
2645 	/* Train 2 */
2646 	reg = FDI_TX_CTL(pipe);
2647 	temp = I915_READ(reg);
2648 	temp &= ~FDI_LINK_TRAIN_NONE;
2649 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2650 	if (IS_GEN6(dev)) {
2651 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2652 		/* SNB-B */
2653 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2654 	}
2655 	I915_WRITE(reg, temp);
2656 
2657 	reg = FDI_RX_CTL(pipe);
2658 	temp = I915_READ(reg);
2659 	if (HAS_PCH_CPT(dev)) {
2660 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2661 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2662 	} else {
2663 		temp &= ~FDI_LINK_TRAIN_NONE;
2664 		temp |= FDI_LINK_TRAIN_PATTERN_2;
2665 	}
2666 	I915_WRITE(reg, temp);
2667 
2668 	POSTING_READ(reg);
2669 	DELAY(150);
2670 
2671 	for (i = 0; i < 4; i++) {
2672 		reg = FDI_TX_CTL(pipe);
2673 		temp = I915_READ(reg);
2674 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2675 		temp |= snb_b_fdi_train_param[i];
2676 		I915_WRITE(reg, temp);
2677 
2678 		POSTING_READ(reg);
2679 		DELAY(500);
2680 
2681 		reg = FDI_RX_IIR(pipe);
2682 		temp = I915_READ(reg);
2683 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2684 
2685 		if (temp & FDI_RX_SYMBOL_LOCK) {
2686 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2687 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2688 			break;
2689 		}
2690 	}
2691 	if (i == 4)
2692 		DRM_ERROR("FDI train 2 fail!\n");
2693 
2694 	DRM_DEBUG_KMS("FDI train done.\n");
2695 }
2696 
2697 /* Manual link training for Ivy Bridge A0 parts */
2698 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2699 {
2700 	struct drm_device *dev = crtc->dev;
2701 	struct drm_i915_private *dev_priv = dev->dev_private;
2702 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2703 	int pipe = intel_crtc->pipe;
2704 	u32 reg, temp, i;
2705 
2706 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2707 	   for train result */
2708 	reg = FDI_RX_IMR(pipe);
2709 	temp = I915_READ(reg);
2710 	temp &= ~FDI_RX_SYMBOL_LOCK;
2711 	temp &= ~FDI_RX_BIT_LOCK;
2712 	I915_WRITE(reg, temp);
2713 
2714 	POSTING_READ(reg);
2715 	DELAY(150);
2716 
2717 	/* enable CPU FDI TX and PCH FDI RX */
2718 	reg = FDI_TX_CTL(pipe);
2719 	temp = I915_READ(reg);
2720 	temp &= ~(7 << 19);
2721 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2722 	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2723 	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2724 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2725 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2726 	temp |= FDI_COMPOSITE_SYNC;
2727 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2728 
2729 	reg = FDI_RX_CTL(pipe);
2730 	temp = I915_READ(reg);
2731 	temp &= ~FDI_LINK_TRAIN_AUTO;
2732 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2733 	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2734 	temp |= FDI_COMPOSITE_SYNC;
2735 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2736 
2737 	POSTING_READ(reg);
2738 	DELAY(150);
2739 
2740 	for (i = 0; i < 4; i++) {
2741 		reg = FDI_TX_CTL(pipe);
2742 		temp = I915_READ(reg);
2743 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2744 		temp |= snb_b_fdi_train_param[i];
2745 		I915_WRITE(reg, temp);
2746 
2747 		POSTING_READ(reg);
2748 		DELAY(500);
2749 
2750 		reg = FDI_RX_IIR(pipe);
2751 		temp = I915_READ(reg);
2752 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2753 
2754 		if (temp & FDI_RX_BIT_LOCK ||
2755 		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2756 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2757 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2758 			break;
2759 		}
2760 	}
2761 	if (i == 4)
2762 		DRM_ERROR("FDI train 1 fail!\n");
2763 
2764 	/* Train 2 */
2765 	reg = FDI_TX_CTL(pipe);
2766 	temp = I915_READ(reg);
2767 	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2768 	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2769 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2770 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2771 	I915_WRITE(reg, temp);
2772 
2773 	reg = FDI_RX_CTL(pipe);
2774 	temp = I915_READ(reg);
2775 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2776 	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2777 	I915_WRITE(reg, temp);
2778 
2779 	POSTING_READ(reg);
2780 	DELAY(150);
2781 
2782 	for (i = 0; i < 4; i++ ) {
2783 		reg = FDI_TX_CTL(pipe);
2784 		temp = I915_READ(reg);
2785 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2786 		temp |= snb_b_fdi_train_param[i];
2787 		I915_WRITE(reg, temp);
2788 
2789 		POSTING_READ(reg);
2790 		DELAY(500);
2791 
2792 		reg = FDI_RX_IIR(pipe);
2793 		temp = I915_READ(reg);
2794 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2795 
2796 		if (temp & FDI_RX_SYMBOL_LOCK) {
2797 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2798 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2799 			break;
2800 		}
2801 	}
2802 	if (i == 4)
2803 		DRM_ERROR("FDI train 2 fail!\n");
2804 
2805 	DRM_DEBUG_KMS("FDI train done.\n");
2806 }
2807 
2808 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2809 {
2810 	struct drm_device *dev = crtc->dev;
2811 	struct drm_i915_private *dev_priv = dev->dev_private;
2812 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2813 	int pipe = intel_crtc->pipe;
2814 	u32 reg, temp;
2815 
2816 	/* Write the TU size bits so error detection works */
2817 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2818 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2819 
2820 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2821 	reg = FDI_RX_CTL(pipe);
2822 	temp = I915_READ(reg);
2823 	temp &= ~((0x7 << 19) | (0x7 << 16));
2824 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2825 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2826 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2827 
2828 	POSTING_READ(reg);
2829 	DELAY(200);
2830 
2831 	/* Switch from Rawclk to PCDclk */
2832 	temp = I915_READ(reg);
2833 	I915_WRITE(reg, temp | FDI_PCDCLK);
2834 
2835 	POSTING_READ(reg);
2836 	DELAY(200);
2837 
2838 	/* Enable CPU FDI TX PLL, always on for Ironlake */
2839 	reg = FDI_TX_CTL(pipe);
2840 	temp = I915_READ(reg);
2841 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2842 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2843 
2844 		POSTING_READ(reg);
2845 		DELAY(100);
2846 	}
2847 }
2848 
2849 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2850 {
2851 	struct drm_i915_private *dev_priv = dev->dev_private;
2852 	u32 flags = I915_READ(SOUTH_CHICKEN1);
2853 
2854 	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2855 	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2856 	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2857 	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2858 	POSTING_READ(SOUTH_CHICKEN1);
2859 }
2860 
2861 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2862 {
2863 	struct drm_device *dev = crtc->dev;
2864 	struct drm_i915_private *dev_priv = dev->dev_private;
2865 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2866 	int pipe = intel_crtc->pipe;
2867 	u32 reg, temp;
2868 
2869 	/* disable CPU FDI tx and PCH FDI rx */
2870 	reg = FDI_TX_CTL(pipe);
2871 	temp = I915_READ(reg);
2872 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2873 	POSTING_READ(reg);
2874 
2875 	reg = FDI_RX_CTL(pipe);
2876 	temp = I915_READ(reg);
2877 	temp &= ~(0x7 << 16);
2878 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2879 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2880 
2881 	POSTING_READ(reg);
2882 	DELAY(100);
2883 
2884 	/* Ironlake workaround, disable clock pointer after downing FDI */
2885 	if (HAS_PCH_IBX(dev)) {
2886 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2887 		I915_WRITE(FDI_RX_CHICKEN(pipe),
2888 			   I915_READ(FDI_RX_CHICKEN(pipe) &
2889 				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2890 	} else if (HAS_PCH_CPT(dev)) {
2891 		cpt_phase_pointer_disable(dev, pipe);
2892 	}
2893 
2894 	/* still set train pattern 1 */
2895 	reg = FDI_TX_CTL(pipe);
2896 	temp = I915_READ(reg);
2897 	temp &= ~FDI_LINK_TRAIN_NONE;
2898 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2899 	I915_WRITE(reg, temp);
2900 
2901 	reg = FDI_RX_CTL(pipe);
2902 	temp = I915_READ(reg);
2903 	if (HAS_PCH_CPT(dev)) {
2904 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2905 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2906 	} else {
2907 		temp &= ~FDI_LINK_TRAIN_NONE;
2908 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2909 	}
2910 	/* BPC in FDI rx is consistent with that in PIPECONF */
2911 	temp &= ~(0x07 << 16);
2912 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2913 	I915_WRITE(reg, temp);
2914 
2915 	POSTING_READ(reg);
2916 	DELAY(100);
2917 }
2918 
2919 /*
2920  * When we disable a pipe, we need to clear any pending scanline wait events
2921  * to avoid hanging the ring, which we assume we are waiting on.
2922  */
2923 static void intel_clear_scanline_wait(struct drm_device *dev)
2924 {
2925 	struct drm_i915_private *dev_priv = dev->dev_private;
2926 	struct intel_ring_buffer *ring;
2927 	u32 tmp;
2928 
2929 	if (IS_GEN2(dev))
2930 		/* Can't break the hang on i8xx */
2931 		return;
2932 
2933 	ring = LP_RING(dev_priv);
2934 	tmp = I915_READ_CTL(ring);
2935 	if (tmp & RING_WAIT)
2936 		I915_WRITE_CTL(ring, tmp);
2937 }
2938 
2939 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2940 {
2941 	struct drm_i915_gem_object *obj;
2942 	struct drm_i915_private *dev_priv;
2943 	struct drm_device *dev;
2944 
2945 	if (crtc->fb == NULL)
2946 		return;
2947 
2948 	obj = to_intel_framebuffer(crtc->fb)->obj;
2949 	dev = crtc->dev;
2950 	dev_priv = dev->dev_private;
2951 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2952 	while (atomic_load_acq_int(&obj->pending_flip) != 0)
2953 		lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
2954 	lockmgr(&dev->event_lock, LK_RELEASE);
2955 }
2956 
2957 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2958 {
2959 	struct drm_device *dev = crtc->dev;
2960 	struct drm_mode_config *mode_config = &dev->mode_config;
2961 	struct intel_encoder *encoder;
2962 
2963 	/*
2964 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2965 	 * must be driven by its own crtc; no sharing is possible.
2966 	 */
2967 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2968 		if (encoder->base.crtc != crtc)
2969 			continue;
2970 
2971 		switch (encoder->type) {
2972 		case INTEL_OUTPUT_EDP:
2973 			if (!intel_encoder_is_pch_edp(&encoder->base))
2974 				return false;
2975 			continue;
2976 		}
2977 	}
2978 
2979 	return true;
2980 }
2981 
2982 /*
2983  * Enable PCH resources required for PCH ports:
2984  *   - PCH PLLs
2985  *   - FDI training & RX/TX
2986  *   - update transcoder timings
2987  *   - DP transcoding bits
2988  *   - transcoder
2989  */
2990 static void ironlake_pch_enable(struct drm_crtc *crtc)
2991 {
2992 	struct drm_device *dev = crtc->dev;
2993 	struct drm_i915_private *dev_priv = dev->dev_private;
2994 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2995 	int pipe = intel_crtc->pipe;
2996 	u32 reg, temp, transc_sel;
2997 
2998 	/* For PCH output, training FDI link */
2999 	dev_priv->display.fdi_link_train(crtc);
3000 
3001 	intel_enable_pch_pll(dev_priv, pipe);
3002 
3003 	if (HAS_PCH_CPT(dev)) {
3004 		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
3005 			TRANSC_DPLLB_SEL;
3006 
3007 		/* Be sure PCH DPLL SEL is set */
3008 		temp = I915_READ(PCH_DPLL_SEL);
3009 		if (pipe == 0) {
3010 			temp &= ~(TRANSA_DPLLB_SEL);
3011 			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
3012 		} else if (pipe == 1) {
3013 			temp &= ~(TRANSB_DPLLB_SEL);
3014 			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3015 		} else if (pipe == 2) {
3016 			temp &= ~(TRANSC_DPLLB_SEL);
3017 			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
3018 		}
3019 		I915_WRITE(PCH_DPLL_SEL, temp);
3020 	}
3021 
3022 	/* set transcoder timing, panel must allow it */
3023 	assert_panel_unlocked(dev_priv, pipe);
3024 	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3025 	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3026 	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
3027 
3028 	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3029 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3030 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
3031 	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
3032 
3033 	intel_fdi_normal_train(crtc);
3034 
3035 	/* For PCH DP, enable TRANS_DP_CTL */
3036 	if (HAS_PCH_CPT(dev) &&
3037 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3038 	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3039 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
3040 		reg = TRANS_DP_CTL(pipe);
3041 		temp = I915_READ(reg);
3042 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
3043 			  TRANS_DP_SYNC_MASK |
3044 			  TRANS_DP_BPC_MASK);
3045 		temp |= (TRANS_DP_OUTPUT_ENABLE |
3046 			 TRANS_DP_ENH_FRAMING);
3047 		temp |= bpc << 9; /* same format but at 11:9 */
3048 
3049 		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
3050 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
3051 		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
3052 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
3053 
3054 		switch (intel_trans_dp_port_sel(crtc)) {
3055 		case PCH_DP_B:
3056 			temp |= TRANS_DP_PORT_SEL_B;
3057 			break;
3058 		case PCH_DP_C:
3059 			temp |= TRANS_DP_PORT_SEL_C;
3060 			break;
3061 		case PCH_DP_D:
3062 			temp |= TRANS_DP_PORT_SEL_D;
3063 			break;
3064 		default:
3065 			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
3066 			temp |= TRANS_DP_PORT_SEL_B;
3067 			break;
3068 		}
3069 
3070 		I915_WRITE(reg, temp);
3071 	}
3072 
3073 	intel_enable_transcoder(dev_priv, pipe);
3074 }
3075 
3076 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3077 {
3078 	struct drm_i915_private *dev_priv = dev->dev_private;
3079 	int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3080 	u32 temp;
3081 
3082 	temp = I915_READ(dslreg);
3083 	DELAY(500);
3084 	if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1, "915cp1")) {
3085 		/* Without this, mode sets may fail silently on FDI */
3086 		I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3087 		DELAY(250);
3088 		I915_WRITE(tc2reg, 0);
3089 		if (_intel_wait_for(dev, I915_READ(dslreg) != temp, 5, 1,
3090 		    "915cp2"))
3091 			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3092 	}
3093 }
3094 
3095 static void ironlake_crtc_enable(struct drm_crtc *crtc)
3096 {
3097 	struct drm_device *dev = crtc->dev;
3098 	struct drm_i915_private *dev_priv = dev->dev_private;
3099 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3100 	int pipe = intel_crtc->pipe;
3101 	int plane = intel_crtc->plane;
3102 	u32 temp;
3103 	bool is_pch_port;
3104 
3105 	if (intel_crtc->active)
3106 		return;
3107 
3108 	intel_crtc->active = true;
3109 	intel_update_watermarks(dev);
3110 
3111 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3112 		temp = I915_READ(PCH_LVDS);
3113 		if ((temp & LVDS_PORT_EN) == 0)
3114 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3115 	}
3116 
3117 	is_pch_port = intel_crtc_driving_pch(crtc);
3118 
3119 	if (is_pch_port)
3120 		ironlake_fdi_pll_enable(crtc);
3121 	else
3122 		ironlake_fdi_disable(crtc);
3123 
3124 	/* Enable panel fitting for LVDS */
3125 	if (dev_priv->pch_pf_size &&
3126 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3127 		/* Force use of hard-coded filter coefficients
3128 		 * as some pre-programmed values are broken,
3129 		 * e.g. x201.
3130 		 */
3131 		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3132 		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3133 		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3134 	}
3135 
3136 	intel_enable_pipe(dev_priv, pipe, is_pch_port);
3137 	intel_enable_plane(dev_priv, plane, pipe);
3138 
3139 	if (is_pch_port)
3140 		ironlake_pch_enable(crtc);
3141 
3142 	intel_crtc_load_lut(crtc);
3143 
3144 	DRM_LOCK(dev);
3145 	intel_update_fbc(dev);
3146 	DRM_UNLOCK(dev);
3147 
3148 	intel_crtc_update_cursor(crtc, true);
3149 }
3150 
3151 static void ironlake_crtc_disable(struct drm_crtc *crtc)
3152 {
3153 	struct drm_device *dev = crtc->dev;
3154 	struct drm_i915_private *dev_priv = dev->dev_private;
3155 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3156 	int pipe = intel_crtc->pipe;
3157 	int plane = intel_crtc->plane;
3158 	u32 reg, temp;
3159 
3160 	if (!intel_crtc->active)
3161 		return;
3162 
3163 	intel_crtc_wait_for_pending_flips(crtc);
3164 	drm_vblank_off(dev, pipe);
3165 	intel_crtc_update_cursor(crtc, false);
3166 
3167 	intel_disable_plane(dev_priv, plane, pipe);
3168 
3169 	if (dev_priv->cfb_plane == plane)
3170 		intel_disable_fbc(dev);
3171 
3172 	intel_disable_pipe(dev_priv, pipe);
3173 
3174 	/* Disable PF */
3175 	I915_WRITE(PF_CTL(pipe), 0);
3176 	I915_WRITE(PF_WIN_SZ(pipe), 0);
3177 
3178 	ironlake_fdi_disable(crtc);
3179 
3180 	/* This is a horrible layering violation; we should be doing this in
3181 	 * the connector/encoder ->prepare instead, but we don't always have
3182 	 * enough information there about the config to know whether it will
3183 	 * actually be necessary or just cause undesired flicker.
3184 	 */
3185 	intel_disable_pch_ports(dev_priv, pipe);
3186 
3187 	intel_disable_transcoder(dev_priv, pipe);
3188 
3189 	if (HAS_PCH_CPT(dev)) {
3190 		/* disable TRANS_DP_CTL */
3191 		reg = TRANS_DP_CTL(pipe);
3192 		temp = I915_READ(reg);
3193 		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
3194 		temp |= TRANS_DP_PORT_SEL_NONE;
3195 		I915_WRITE(reg, temp);
3196 
3197 		/* disable DPLL_SEL */
3198 		temp = I915_READ(PCH_DPLL_SEL);
3199 		switch (pipe) {
3200 		case 0:
3201 			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
3202 			break;
3203 		case 1:
3204 			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
3205 			break;
3206 		case 2:
3207 			/* C shares PLL A or B */
3208 			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
3209 			break;
3210 		default:
3211 			KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
3212 		}
3213 		I915_WRITE(PCH_DPLL_SEL, temp);
3214 	}
3215 
3216 	/* disable PCH DPLL */
3217 	if (!intel_crtc->no_pll)
3218 		intel_disable_pch_pll(dev_priv, pipe);
3219 
3220 	/* Switch from PCDclk to Rawclk */
3221 	reg = FDI_RX_CTL(pipe);
3222 	temp = I915_READ(reg);
3223 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3224 
3225 	/* Disable CPU FDI TX PLL */
3226 	reg = FDI_TX_CTL(pipe);
3227 	temp = I915_READ(reg);
3228 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3229 
3230 	POSTING_READ(reg);
3231 	DELAY(100);
3232 
3233 	reg = FDI_RX_CTL(pipe);
3234 	temp = I915_READ(reg);
3235 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3236 
3237 	/* Wait for the clocks to turn off. */
3238 	POSTING_READ(reg);
3239 	DELAY(100);
3240 
3241 	intel_crtc->active = false;
3242 	intel_update_watermarks(dev);
3243 
3244 	DRM_LOCK(dev);
3245 	intel_update_fbc(dev);
3246 	intel_clear_scanline_wait(dev);
3247 	DRM_UNLOCK(dev);
3248 }
3249 
3250 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3251 {
3252 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3253 	int pipe = intel_crtc->pipe;
3254 	int plane = intel_crtc->plane;
3255 
3256 	/* XXX: When our outputs are all unaware of DPMS modes other than off
3257 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3258 	 */
3259 	switch (mode) {
3260 	case DRM_MODE_DPMS_ON:
3261 	case DRM_MODE_DPMS_STANDBY:
3262 	case DRM_MODE_DPMS_SUSPEND:
3263 		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3264 		ironlake_crtc_enable(crtc);
3265 		break;
3266 
3267 	case DRM_MODE_DPMS_OFF:
3268 		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3269 		ironlake_crtc_disable(crtc);
3270 		break;
3271 	}
3272 }
3273 
3274 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3275 {
3276 	if (!enable && intel_crtc->overlay) {
3277 		struct drm_device *dev = intel_crtc->base.dev;
3278 		struct drm_i915_private *dev_priv = dev->dev_private;
3279 
3280 		DRM_LOCK(dev);
3281 		dev_priv->mm.interruptible = false;
3282 		(void) intel_overlay_switch_off(intel_crtc->overlay);
3283 		dev_priv->mm.interruptible = true;
3284 		DRM_UNLOCK(dev);
3285 	}
3286 
3287 	/* Let userspace switch the overlay on again. In most cases userspace
3288 	 * has to recompute where to put it anyway.
3289 	 */
3290 }
3291 
3292 static void i9xx_crtc_enable(struct drm_crtc *crtc)
3293 {
3294 	struct drm_device *dev = crtc->dev;
3295 	struct drm_i915_private *dev_priv = dev->dev_private;
3296 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3297 	int pipe = intel_crtc->pipe;
3298 	int plane = intel_crtc->plane;
3299 
3300 	if (intel_crtc->active)
3301 		return;
3302 
3303 	intel_crtc->active = true;
3304 	intel_update_watermarks(dev);
3305 
3306 	intel_enable_pll(dev_priv, pipe);
3307 	intel_enable_pipe(dev_priv, pipe, false);
3308 	intel_enable_plane(dev_priv, plane, pipe);
3309 
3310 	intel_crtc_load_lut(crtc);
3311 	intel_update_fbc(dev);
3312 
3313 	/* Give the overlay scaler a chance to enable if it's on this pipe */
3314 	intel_crtc_dpms_overlay(intel_crtc, true);
3315 	intel_crtc_update_cursor(crtc, true);
3316 }
3317 
3318 static void i9xx_crtc_disable(struct drm_crtc *crtc)
3319 {
3320 	struct drm_device *dev = crtc->dev;
3321 	struct drm_i915_private *dev_priv = dev->dev_private;
3322 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3323 	int pipe = intel_crtc->pipe;
3324 	int plane = intel_crtc->plane;
3325 
3326 	if (!intel_crtc->active)
3327 		return;
3328 
3329 	/* Give the overlay scaler a chance to disable if it's on this pipe */
3330 	intel_crtc_wait_for_pending_flips(crtc);
3331 	drm_vblank_off(dev, pipe);
3332 	intel_crtc_dpms_overlay(intel_crtc, false);
3333 	intel_crtc_update_cursor(crtc, false);
3334 
3335 	if (dev_priv->cfb_plane == plane)
3336 		intel_disable_fbc(dev);
3337 
3338 	intel_disable_plane(dev_priv, plane, pipe);
3339 	intel_disable_pipe(dev_priv, pipe);
3340 	intel_disable_pll(dev_priv, pipe);
3341 
3342 	intel_crtc->active = false;
3343 	intel_update_fbc(dev);
3344 	intel_update_watermarks(dev);
3345 	intel_clear_scanline_wait(dev);
3346 }
3347 
3348 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3349 {
3350 	/* XXX: When our outputs are all unaware of DPMS modes other than off
3351 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3352 	 */
3353 	switch (mode) {
3354 	case DRM_MODE_DPMS_ON:
3355 	case DRM_MODE_DPMS_STANDBY:
3356 	case DRM_MODE_DPMS_SUSPEND:
3357 		i9xx_crtc_enable(crtc);
3358 		break;
3359 	case DRM_MODE_DPMS_OFF:
3360 		i9xx_crtc_disable(crtc);
3361 		break;
3362 	}
3363 }
3364 
3365 /**
3366  * Sets the power management mode of the pipe and plane.
3367  */
3368 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3369 {
3370 	struct drm_device *dev = crtc->dev;
3371 	struct drm_i915_private *dev_priv = dev->dev_private;
3372 #if 0
3373 	struct drm_i915_master_private *master_priv;
3374 #endif
3375 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3376 	int pipe = intel_crtc->pipe;
3377 	bool enabled;
3378 
3379 	if (intel_crtc->dpms_mode == mode)
3380 		return;
3381 
3382 	intel_crtc->dpms_mode = mode;
3383 
3384 	dev_priv->display.dpms(crtc, mode);
3385 
3386 #if 0
3387 	if (!dev->primary->master)
3388 		return;
3389 
3390 	master_priv = dev->primary->master->driver_priv;
3391 	if (!master_priv->sarea_priv)
3392 		return;
3393 #else
3394 	if (!dev_priv->sarea_priv)
3395 		return;
3396 #endif
3397 
3398 	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3399 
3400 	switch (pipe) {
3401 	case 0:
3402 #if 0
3403 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3404 		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3405 #else
3406 		dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
3407 		dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
3408 #endif
3409 		break;
3410 	case 1:
3411 #if 0
3412 		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3413 		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3414 #else
3415 		dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
3416 		dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
3417 #endif
3418 		break;
3419 	default:
3420 		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
3421 		break;
3422 	}
3423 }
3424 
3425 static void intel_crtc_disable(struct drm_crtc *crtc)
3426 {
3427 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3428 	struct drm_device *dev = crtc->dev;
3429 
3430 	/* Flush any pending WAITs before we disable the pipe. Note that
3431 	 * we need to drop the struct_mutex in order to acquire it again
3432 	 * during the lowlevel dpms routines around a couple of the
3433 	 * operations. It does not look trivial nor desirable to move
3434 	 * that locking higher. So instead we leave a window for the
3435 	 * submission of further commands on the fb before we can actually
3436 	 * disable it. This race with userspace exists anyway, and we can
3437 	 * only rely on the pipe being disabled by userspace after it
3438 	 * receives the hotplug notification and has flushed any pending
3439 	 * batches.
3440 	 */
3441 	if (crtc->fb) {
3442 		DRM_LOCK(dev);
3443 		intel_finish_fb(crtc->fb);
3444 		DRM_UNLOCK(dev);
3445 	}
3446 
3447 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
3448  	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3449 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
3450 
3451 	if (crtc->fb) {
3452 		DRM_LOCK(dev);
3453 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
3454 		DRM_UNLOCK(dev);
3455 	}
3456 }
3457 
3458 /* Prepare for a mode set.
3459  *
3460  * Note we could be a lot smarter here.  We need to figure out which outputs
3461  * will be enabled, which disabled (in short, how the config will changes)
3462  * and perform the minimum necessary steps to accomplish that, e.g. updating
3463  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3464  * panel fitting is in the proper state, etc.
3465  */
3466 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
3467 {
3468 	i9xx_crtc_disable(crtc);
3469 }
3470 
3471 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3472 {
3473 	i9xx_crtc_enable(crtc);
3474 }
3475 
3476 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3477 {
3478 	ironlake_crtc_disable(crtc);
3479 }
3480 
3481 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3482 {
3483 	ironlake_crtc_enable(crtc);
3484 }
3485 
3486 void intel_encoder_prepare(struct drm_encoder *encoder)
3487 {
3488 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3489 	/* lvds has its own version of prepare see intel_lvds_prepare */
3490 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3491 }
3492 
3493 void intel_encoder_commit(struct drm_encoder *encoder)
3494 {
3495 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3496 	struct drm_device *dev = encoder->dev;
3497 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3498 	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3499 
3500 	/* lvds has its own version of commit see intel_lvds_commit */
3501 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3502 
3503 	if (HAS_PCH_CPT(dev))
3504 		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3505 }
3506 
3507 void intel_encoder_destroy(struct drm_encoder *encoder)
3508 {
3509 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3510 
3511 	drm_encoder_cleanup(encoder);
3512 	drm_free(intel_encoder, DRM_MEM_KMS);
3513 }
3514 
3515 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3516 				  const struct drm_display_mode *mode,
3517 				  struct drm_display_mode *adjusted_mode)
3518 {
3519 	struct drm_device *dev = crtc->dev;
3520 
3521 	if (HAS_PCH_SPLIT(dev)) {
3522 		/* FDI link clock is fixed at 2.7G */
3523 		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3524 			return false;
3525 	}
3526 
3527 	/* All interlaced capable intel hw wants timings in frames. Note though
3528 	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3529 	 * timings, so we need to be careful not to clobber these.*/
3530 	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3531 		drm_mode_set_crtcinfo(adjusted_mode, 0);
3532 
3533 	return true;
3534 }
3535 
3536 static int i945_get_display_clock_speed(struct drm_device *dev)
3537 {
3538 	return 400000;
3539 }
3540 
3541 static int i915_get_display_clock_speed(struct drm_device *dev)
3542 {
3543 	return 333000;
3544 }
3545 
3546 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3547 {
3548 	return 200000;
3549 }
3550 
3551 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3552 {
3553 	u16 gcfgc = 0;
3554 
3555 	gcfgc = pci_read_config(dev->device, GCFGC, 2);
3556 
3557 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3558 		return 133000;
3559 	else {
3560 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3561 		case GC_DISPLAY_CLOCK_333_MHZ:
3562 			return 333000;
3563 		default:
3564 		case GC_DISPLAY_CLOCK_190_200_MHZ:
3565 			return 190000;
3566 		}
3567 	}
3568 }
3569 
3570 static int i865_get_display_clock_speed(struct drm_device *dev)
3571 {
3572 	return 266000;
3573 }
3574 
3575 static int i855_get_display_clock_speed(struct drm_device *dev)
3576 {
3577 	u16 hpllcc = 0;
3578 	/* Assume that the hardware is in the high speed state.  This
3579 	 * should be the default.
3580 	 */
3581 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3582 	case GC_CLOCK_133_200:
3583 	case GC_CLOCK_100_200:
3584 		return 200000;
3585 	case GC_CLOCK_166_250:
3586 		return 250000;
3587 	case GC_CLOCK_100_133:
3588 		return 133000;
3589 	}
3590 
3591 	/* Shouldn't happen */
3592 	return 0;
3593 }
3594 
3595 static int i830_get_display_clock_speed(struct drm_device *dev)
3596 {
3597 	return 133000;
3598 }
3599 
3600 struct fdi_m_n {
3601 	u32        tu;
3602 	u32        gmch_m;
3603 	u32        gmch_n;
3604 	u32        link_m;
3605 	u32        link_n;
3606 };
3607 
3608 static void
3609 fdi_reduce_ratio(u32 *num, u32 *den)
3610 {
3611 	while (*num > 0xffffff || *den > 0xffffff) {
3612 		*num >>= 1;
3613 		*den >>= 1;
3614 	}
3615 }
3616 
3617 static void
3618 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3619 		     int link_clock, struct fdi_m_n *m_n)
3620 {
3621 	m_n->tu = 64; /* default size */
3622 
3623 	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3624 	m_n->gmch_m = bits_per_pixel * pixel_clock;
3625 	m_n->gmch_n = link_clock * nlanes * 8;
3626 	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3627 
3628 	m_n->link_m = pixel_clock;
3629 	m_n->link_n = link_clock;
3630 	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3631 }
3632 
3633 
3634 struct intel_watermark_params {
3635 	unsigned long fifo_size;
3636 	unsigned long max_wm;
3637 	unsigned long default_wm;
3638 	unsigned long guard_size;
3639 	unsigned long cacheline_size;
3640 };
3641 
3642 /* Pineview has different values for various configs */
3643 static const struct intel_watermark_params pineview_display_wm = {
3644 	PINEVIEW_DISPLAY_FIFO,
3645 	PINEVIEW_MAX_WM,
3646 	PINEVIEW_DFT_WM,
3647 	PINEVIEW_GUARD_WM,
3648 	PINEVIEW_FIFO_LINE_SIZE
3649 };
3650 static const struct intel_watermark_params pineview_display_hplloff_wm = {
3651 	PINEVIEW_DISPLAY_FIFO,
3652 	PINEVIEW_MAX_WM,
3653 	PINEVIEW_DFT_HPLLOFF_WM,
3654 	PINEVIEW_GUARD_WM,
3655 	PINEVIEW_FIFO_LINE_SIZE
3656 };
3657 static const struct intel_watermark_params pineview_cursor_wm = {
3658 	PINEVIEW_CURSOR_FIFO,
3659 	PINEVIEW_CURSOR_MAX_WM,
3660 	PINEVIEW_CURSOR_DFT_WM,
3661 	PINEVIEW_CURSOR_GUARD_WM,
3662 	PINEVIEW_FIFO_LINE_SIZE,
3663 };
3664 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3665 	PINEVIEW_CURSOR_FIFO,
3666 	PINEVIEW_CURSOR_MAX_WM,
3667 	PINEVIEW_CURSOR_DFT_WM,
3668 	PINEVIEW_CURSOR_GUARD_WM,
3669 	PINEVIEW_FIFO_LINE_SIZE
3670 };
3671 static const struct intel_watermark_params g4x_wm_info = {
3672 	G4X_FIFO_SIZE,
3673 	G4X_MAX_WM,
3674 	G4X_MAX_WM,
3675 	2,
3676 	G4X_FIFO_LINE_SIZE,
3677 };
3678 static const struct intel_watermark_params g4x_cursor_wm_info = {
3679 	I965_CURSOR_FIFO,
3680 	I965_CURSOR_MAX_WM,
3681 	I965_CURSOR_DFT_WM,
3682 	2,
3683 	G4X_FIFO_LINE_SIZE,
3684 };
3685 static const struct intel_watermark_params i965_cursor_wm_info = {
3686 	I965_CURSOR_FIFO,
3687 	I965_CURSOR_MAX_WM,
3688 	I965_CURSOR_DFT_WM,
3689 	2,
3690 	I915_FIFO_LINE_SIZE,
3691 };
3692 static const struct intel_watermark_params i945_wm_info = {
3693 	I945_FIFO_SIZE,
3694 	I915_MAX_WM,
3695 	1,
3696 	2,
3697 	I915_FIFO_LINE_SIZE
3698 };
3699 static const struct intel_watermark_params i915_wm_info = {
3700 	I915_FIFO_SIZE,
3701 	I915_MAX_WM,
3702 	1,
3703 	2,
3704 	I915_FIFO_LINE_SIZE
3705 };
3706 static const struct intel_watermark_params i855_wm_info = {
3707 	I855GM_FIFO_SIZE,
3708 	I915_MAX_WM,
3709 	1,
3710 	2,
3711 	I830_FIFO_LINE_SIZE
3712 };
3713 static const struct intel_watermark_params i830_wm_info = {
3714 	I830_FIFO_SIZE,
3715 	I915_MAX_WM,
3716 	1,
3717 	2,
3718 	I830_FIFO_LINE_SIZE
3719 };
3720 
3721 static const struct intel_watermark_params ironlake_display_wm_info = {
3722 	ILK_DISPLAY_FIFO,
3723 	ILK_DISPLAY_MAXWM,
3724 	ILK_DISPLAY_DFTWM,
3725 	2,
3726 	ILK_FIFO_LINE_SIZE
3727 };
3728 static const struct intel_watermark_params ironlake_cursor_wm_info = {
3729 	ILK_CURSOR_FIFO,
3730 	ILK_CURSOR_MAXWM,
3731 	ILK_CURSOR_DFTWM,
3732 	2,
3733 	ILK_FIFO_LINE_SIZE
3734 };
3735 static const struct intel_watermark_params ironlake_display_srwm_info = {
3736 	ILK_DISPLAY_SR_FIFO,
3737 	ILK_DISPLAY_MAX_SRWM,
3738 	ILK_DISPLAY_DFT_SRWM,
3739 	2,
3740 	ILK_FIFO_LINE_SIZE
3741 };
3742 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3743 	ILK_CURSOR_SR_FIFO,
3744 	ILK_CURSOR_MAX_SRWM,
3745 	ILK_CURSOR_DFT_SRWM,
3746 	2,
3747 	ILK_FIFO_LINE_SIZE
3748 };
3749 
3750 static const struct intel_watermark_params sandybridge_display_wm_info = {
3751 	SNB_DISPLAY_FIFO,
3752 	SNB_DISPLAY_MAXWM,
3753 	SNB_DISPLAY_DFTWM,
3754 	2,
3755 	SNB_FIFO_LINE_SIZE
3756 };
3757 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3758 	SNB_CURSOR_FIFO,
3759 	SNB_CURSOR_MAXWM,
3760 	SNB_CURSOR_DFTWM,
3761 	2,
3762 	SNB_FIFO_LINE_SIZE
3763 };
3764 static const struct intel_watermark_params sandybridge_display_srwm_info = {
3765 	SNB_DISPLAY_SR_FIFO,
3766 	SNB_DISPLAY_MAX_SRWM,
3767 	SNB_DISPLAY_DFT_SRWM,
3768 	2,
3769 	SNB_FIFO_LINE_SIZE
3770 };
3771 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3772 	SNB_CURSOR_SR_FIFO,
3773 	SNB_CURSOR_MAX_SRWM,
3774 	SNB_CURSOR_DFT_SRWM,
3775 	2,
3776 	SNB_FIFO_LINE_SIZE
3777 };
3778 
3779 
3780 /**
3781  * intel_calculate_wm - calculate watermark level
3782  * @clock_in_khz: pixel clock
3783  * @wm: chip FIFO params
3784  * @pixel_size: display pixel size
3785  * @latency_ns: memory latency for the platform
3786  *
3787  * Calculate the watermark level (the level at which the display plane will
3788  * start fetching from memory again).  Each chip has a different display
3789  * FIFO size and allocation, so the caller needs to figure that out and pass
3790  * in the correct intel_watermark_params structure.
3791  *
3792  * As the pixel clock runs, the FIFO will be drained at a rate that depends
3793  * on the pixel size.  When it reaches the watermark level, it'll start
3794  * fetching FIFO line sized based chunks from memory until the FIFO fills
3795  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
3796  * will occur, and a display engine hang could result.
3797  */
3798 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3799 					const struct intel_watermark_params *wm,
3800 					int fifo_size,
3801 					int pixel_size,
3802 					unsigned long latency_ns)
3803 {
3804 	long entries_required, wm_size;
3805 
3806 	/*
3807 	 * Note: we need to make sure we don't overflow for various clock &
3808 	 * latency values.
3809 	 * clocks go from a few thousand to several hundred thousand.
3810 	 * latency is usually a few thousand
3811 	 */
3812 	entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3813 		1000;
3814 	entries_required = howmany(entries_required, wm->cacheline_size);
3815 
3816 	DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3817 
3818 	wm_size = fifo_size - (entries_required + wm->guard_size);
3819 
3820 	DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3821 
3822 	/* Don't promote wm_size to unsigned... */
3823 	if (wm_size > (long)wm->max_wm)
3824 		wm_size = wm->max_wm;
3825 	if (wm_size <= 0)
3826 		wm_size = wm->default_wm;
3827 	return wm_size;
3828 }
3829 
3830 struct cxsr_latency {
3831 	int is_desktop;
3832 	int is_ddr3;
3833 	unsigned long fsb_freq;
3834 	unsigned long mem_freq;
3835 	unsigned long display_sr;
3836 	unsigned long display_hpll_disable;
3837 	unsigned long cursor_sr;
3838 	unsigned long cursor_hpll_disable;
3839 };
3840 
3841 static const struct cxsr_latency cxsr_latency_table[] = {
3842 	{1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
3843 	{1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
3844 	{1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
3845 	{1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
3846 	{1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
3847 
3848 	{1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
3849 	{1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
3850 	{1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
3851 	{1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
3852 	{1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
3853 
3854 	{1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
3855 	{1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
3856 	{1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
3857 	{1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
3858 	{1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
3859 
3860 	{0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
3861 	{0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
3862 	{0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
3863 	{0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
3864 	{0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
3865 
3866 	{0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
3867 	{0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
3868 	{0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
3869 	{0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
3870 	{0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
3871 
3872 	{0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
3873 	{0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
3874 	{0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
3875 	{0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
3876 	{0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
3877 };
3878 
3879 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3880 							 int is_ddr3,
3881 							 int fsb,
3882 							 int mem)
3883 {
3884 	const struct cxsr_latency *latency;
3885 	int i;
3886 
3887 	if (fsb == 0 || mem == 0)
3888 		return NULL;
3889 
3890 	for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
3891 		latency = &cxsr_latency_table[i];
3892 		if (is_desktop == latency->is_desktop &&
3893 		    is_ddr3 == latency->is_ddr3 &&
3894 		    fsb == latency->fsb_freq && mem == latency->mem_freq)
3895 			return latency;
3896 	}
3897 
3898 	DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3899 
3900 	return NULL;
3901 }
3902 
3903 static void pineview_disable_cxsr(struct drm_device *dev)
3904 {
3905 	struct drm_i915_private *dev_priv = dev->dev_private;
3906 
3907 	/* deactivate cxsr */
3908 	I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3909 }
3910 
3911 /*
3912  * Latency for FIFO fetches is dependent on several factors:
3913  *   - memory configuration (speed, channels)
3914  *   - chipset
3915  *   - current MCH state
3916  * It can be fairly high in some situations, so here we assume a fairly
3917  * pessimal value.  It's a tradeoff between extra memory fetches (if we
3918  * set this value too high, the FIFO will fetch frequently to stay full)
3919  * and power consumption (set it too low to save power and we might see
3920  * FIFO underruns and display "flicker").
3921  *
3922  * A value of 5us seems to be a good balance; safe for very low end
3923  * platforms but not overly aggressive on lower latency configs.
3924  */
3925 static const int latency_ns = 5000;
3926 
3927 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3928 {
3929 	struct drm_i915_private *dev_priv = dev->dev_private;
3930 	uint32_t dsparb = I915_READ(DSPARB);
3931 	int size;
3932 
3933 	size = dsparb & 0x7f;
3934 	if (plane)
3935 		size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3936 
3937 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3938 		      plane ? "B" : "A", size);
3939 
3940 	return size;
3941 }
3942 
3943 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3944 {
3945 	struct drm_i915_private *dev_priv = dev->dev_private;
3946 	uint32_t dsparb = I915_READ(DSPARB);
3947 	int size;
3948 
3949 	size = dsparb & 0x1ff;
3950 	if (plane)
3951 		size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
3952 	size >>= 1; /* Convert to cachelines */
3953 
3954 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3955 		      plane ? "B" : "A", size);
3956 
3957 	return size;
3958 }
3959 
3960 static int i845_get_fifo_size(struct drm_device *dev, int plane)
3961 {
3962 	struct drm_i915_private *dev_priv = dev->dev_private;
3963 	uint32_t dsparb = I915_READ(DSPARB);
3964 	int size;
3965 
3966 	size = dsparb & 0x7f;
3967 	size >>= 2; /* Convert to cachelines */
3968 
3969 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3970 		      plane ? "B" : "A",
3971 		      size);
3972 
3973 	return size;
3974 }
3975 
3976 static int i830_get_fifo_size(struct drm_device *dev, int plane)
3977 {
3978 	struct drm_i915_private *dev_priv = dev->dev_private;
3979 	uint32_t dsparb = I915_READ(DSPARB);
3980 	int size;
3981 
3982 	size = dsparb & 0x7f;
3983 	size >>= 1; /* Convert to cachelines */
3984 
3985 	DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3986 		      plane ? "B" : "A", size);
3987 
3988 	return size;
3989 }
3990 
3991 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
3992 {
3993 	struct drm_crtc *crtc, *enabled = NULL;
3994 
3995 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3996 		if (crtc->enabled && crtc->fb) {
3997 			if (enabled)
3998 				return NULL;
3999 			enabled = crtc;
4000 		}
4001 	}
4002 
4003 	return enabled;
4004 }
4005 
4006 static void pineview_update_wm(struct drm_device *dev)
4007 {
4008 	struct drm_i915_private *dev_priv = dev->dev_private;
4009 	struct drm_crtc *crtc;
4010 	const struct cxsr_latency *latency;
4011 	u32 reg;
4012 	unsigned long wm;
4013 
4014 	latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4015 					 dev_priv->fsb_freq, dev_priv->mem_freq);
4016 	if (!latency) {
4017 		DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4018 		pineview_disable_cxsr(dev);
4019 		return;
4020 	}
4021 
4022 	crtc = single_enabled_crtc(dev);
4023 	if (crtc) {
4024 		int clock = crtc->mode.clock;
4025 		int pixel_size = crtc->fb->bits_per_pixel / 8;
4026 
4027 		/* Display SR */
4028 		wm = intel_calculate_wm(clock, &pineview_display_wm,
4029 					pineview_display_wm.fifo_size,
4030 					pixel_size, latency->display_sr);
4031 		reg = I915_READ(DSPFW1);
4032 		reg &= ~DSPFW_SR_MASK;
4033 		reg |= wm << DSPFW_SR_SHIFT;
4034 		I915_WRITE(DSPFW1, reg);
4035 		DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4036 
4037 		/* cursor SR */
4038 		wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4039 					pineview_display_wm.fifo_size,
4040 					pixel_size, latency->cursor_sr);
4041 		reg = I915_READ(DSPFW3);
4042 		reg &= ~DSPFW_CURSOR_SR_MASK;
4043 		reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4044 		I915_WRITE(DSPFW3, reg);
4045 
4046 		/* Display HPLL off SR */
4047 		wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4048 					pineview_display_hplloff_wm.fifo_size,
4049 					pixel_size, latency->display_hpll_disable);
4050 		reg = I915_READ(DSPFW3);
4051 		reg &= ~DSPFW_HPLL_SR_MASK;
4052 		reg |= wm & DSPFW_HPLL_SR_MASK;
4053 		I915_WRITE(DSPFW3, reg);
4054 
4055 		/* cursor HPLL off SR */
4056 		wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4057 					pineview_display_hplloff_wm.fifo_size,
4058 					pixel_size, latency->cursor_hpll_disable);
4059 		reg = I915_READ(DSPFW3);
4060 		reg &= ~DSPFW_HPLL_CURSOR_MASK;
4061 		reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4062 		I915_WRITE(DSPFW3, reg);
4063 		DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4064 
4065 		/* activate cxsr */
4066 		I915_WRITE(DSPFW3,
4067 			   I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4068 		DRM_DEBUG_KMS("Self-refresh is enabled\n");
4069 	} else {
4070 		pineview_disable_cxsr(dev);
4071 		DRM_DEBUG_KMS("Self-refresh is disabled\n");
4072 	}
4073 }
4074 
4075 static bool g4x_compute_wm0(struct drm_device *dev,
4076 			    int plane,
4077 			    const struct intel_watermark_params *display,
4078 			    int display_latency_ns,
4079 			    const struct intel_watermark_params *cursor,
4080 			    int cursor_latency_ns,
4081 			    int *plane_wm,
4082 			    int *cursor_wm)
4083 {
4084 	struct drm_crtc *crtc;
4085 	int htotal, hdisplay, clock, pixel_size;
4086 	int line_time_us, line_count;
4087 	int entries, tlb_miss;
4088 
4089 	crtc = intel_get_crtc_for_plane(dev, plane);
4090 	if (crtc->fb == NULL || !crtc->enabled) {
4091 		*cursor_wm = cursor->guard_size;
4092 		*plane_wm = display->guard_size;
4093 		return false;
4094 	}
4095 
4096 	htotal = crtc->mode.htotal;
4097 	hdisplay = crtc->mode.hdisplay;
4098 	clock = crtc->mode.clock;
4099 	pixel_size = crtc->fb->bits_per_pixel / 8;
4100 
4101 	/* Use the small buffer method to calculate plane watermark */
4102 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4103 	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4104 	if (tlb_miss > 0)
4105 		entries += tlb_miss;
4106 	entries = howmany(entries, display->cacheline_size);
4107 	*plane_wm = entries + display->guard_size;
4108 	if (*plane_wm > (int)display->max_wm)
4109 		*plane_wm = display->max_wm;
4110 
4111 	/* Use the large buffer method to calculate cursor watermark */
4112 	line_time_us = ((htotal * 1000) / clock);
4113 	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4114 	entries = line_count * 64 * pixel_size;
4115 	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4116 	if (tlb_miss > 0)
4117 		entries += tlb_miss;
4118 	entries = howmany(entries, cursor->cacheline_size);
4119 	*cursor_wm = entries + cursor->guard_size;
4120 	if (*cursor_wm > (int)cursor->max_wm)
4121 		*cursor_wm = (int)cursor->max_wm;
4122 
4123 	return true;
4124 }
4125 
4126 /*
4127  * Check the wm result.
4128  *
4129  * If any calculated watermark values is larger than the maximum value that
4130  * can be programmed into the associated watermark register, that watermark
4131  * must be disabled.
4132  */
4133 static bool g4x_check_srwm(struct drm_device *dev,
4134 			   int display_wm, int cursor_wm,
4135 			   const struct intel_watermark_params *display,
4136 			   const struct intel_watermark_params *cursor)
4137 {
4138 	DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4139 		      display_wm, cursor_wm);
4140 
4141 	if (display_wm > display->max_wm) {
4142 		DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4143 			      display_wm, display->max_wm);
4144 		return false;
4145 	}
4146 
4147 	if (cursor_wm > cursor->max_wm) {
4148 		DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4149 			      cursor_wm, cursor->max_wm);
4150 		return false;
4151 	}
4152 
4153 	if (!(display_wm || cursor_wm)) {
4154 		DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4155 		return false;
4156 	}
4157 
4158 	return true;
4159 }
4160 
4161 static bool g4x_compute_srwm(struct drm_device *dev,
4162 			     int plane,
4163 			     int latency_ns,
4164 			     const struct intel_watermark_params *display,
4165 			     const struct intel_watermark_params *cursor,
4166 			     int *display_wm, int *cursor_wm)
4167 {
4168 	struct drm_crtc *crtc;
4169 	int hdisplay, htotal, pixel_size, clock;
4170 	unsigned long line_time_us;
4171 	int line_count, line_size;
4172 	int small, large;
4173 	int entries;
4174 
4175 	if (!latency_ns) {
4176 		*display_wm = *cursor_wm = 0;
4177 		return false;
4178 	}
4179 
4180 	crtc = intel_get_crtc_for_plane(dev, plane);
4181 	hdisplay = crtc->mode.hdisplay;
4182 	htotal = crtc->mode.htotal;
4183 	clock = crtc->mode.clock;
4184 	pixel_size = crtc->fb->bits_per_pixel / 8;
4185 
4186 	line_time_us = (htotal * 1000) / clock;
4187 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4188 	line_size = hdisplay * pixel_size;
4189 
4190 	/* Use the minimum of the small and large buffer method for primary */
4191 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4192 	large = line_count * line_size;
4193 
4194 	entries = howmany(min(small, large), display->cacheline_size);
4195 	*display_wm = entries + display->guard_size;
4196 
4197 	/* calculate the self-refresh watermark for display cursor */
4198 	entries = line_count * pixel_size * 64;
4199 	entries = howmany(entries, cursor->cacheline_size);
4200 	*cursor_wm = entries + cursor->guard_size;
4201 
4202 	return g4x_check_srwm(dev,
4203 			      *display_wm, *cursor_wm,
4204 			      display, cursor);
4205 }
4206 
4207 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
4208 
4209 static void g4x_update_wm(struct drm_device *dev)
4210 {
4211 	static const int sr_latency_ns = 12000;
4212 	struct drm_i915_private *dev_priv = dev->dev_private;
4213 	int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4214 	int plane_sr, cursor_sr;
4215 	unsigned int enabled = 0;
4216 
4217 	if (g4x_compute_wm0(dev, 0,
4218 			    &g4x_wm_info, latency_ns,
4219 			    &g4x_cursor_wm_info, latency_ns,
4220 			    &planea_wm, &cursora_wm))
4221 		enabled |= 1;
4222 
4223 	if (g4x_compute_wm0(dev, 1,
4224 			    &g4x_wm_info, latency_ns,
4225 			    &g4x_cursor_wm_info, latency_ns,
4226 			    &planeb_wm, &cursorb_wm))
4227 		enabled |= 2;
4228 
4229 	plane_sr = cursor_sr = 0;
4230 	if (single_plane_enabled(enabled) &&
4231 	    g4x_compute_srwm(dev, ffs(enabled) - 1,
4232 			     sr_latency_ns,
4233 			     &g4x_wm_info,
4234 			     &g4x_cursor_wm_info,
4235 			     &plane_sr, &cursor_sr))
4236 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4237 	else
4238 		I915_WRITE(FW_BLC_SELF,
4239 			   I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4240 
4241 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4242 		      planea_wm, cursora_wm,
4243 		      planeb_wm, cursorb_wm,
4244 		      plane_sr, cursor_sr);
4245 
4246 	I915_WRITE(DSPFW1,
4247 		   (plane_sr << DSPFW_SR_SHIFT) |
4248 		   (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4249 		   (planeb_wm << DSPFW_PLANEB_SHIFT) |
4250 		   planea_wm);
4251 	I915_WRITE(DSPFW2,
4252 		   (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4253 		   (cursora_wm << DSPFW_CURSORA_SHIFT));
4254 	/* HPLL off in SR has some issues on G4x... disable it */
4255 	I915_WRITE(DSPFW3,
4256 		   (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4257 		   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4258 }
4259 
4260 static void i965_update_wm(struct drm_device *dev)
4261 {
4262 	struct drm_i915_private *dev_priv = dev->dev_private;
4263 	struct drm_crtc *crtc;
4264 	int srwm = 1;
4265 	int cursor_sr = 16;
4266 
4267 	/* Calc sr entries for one plane configs */
4268 	crtc = single_enabled_crtc(dev);
4269 	if (crtc) {
4270 		/* self-refresh has much higher latency */
4271 		static const int sr_latency_ns = 12000;
4272 		int clock = crtc->mode.clock;
4273 		int htotal = crtc->mode.htotal;
4274 		int hdisplay = crtc->mode.hdisplay;
4275 		int pixel_size = crtc->fb->bits_per_pixel / 8;
4276 		unsigned long line_time_us;
4277 		int entries;
4278 
4279 		line_time_us = ((htotal * 1000) / clock);
4280 
4281 		/* Use ns/us then divide to preserve precision */
4282 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4283 			pixel_size * hdisplay;
4284 		entries = howmany(entries, I915_FIFO_LINE_SIZE);
4285 		srwm = I965_FIFO_SIZE - entries;
4286 		if (srwm < 0)
4287 			srwm = 1;
4288 		srwm &= 0x1ff;
4289 		DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4290 			      entries, srwm);
4291 
4292 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4293 			pixel_size * 64;
4294 		entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
4295 		cursor_sr = i965_cursor_wm_info.fifo_size -
4296 			(entries + i965_cursor_wm_info.guard_size);
4297 
4298 		if (cursor_sr > i965_cursor_wm_info.max_wm)
4299 			cursor_sr = i965_cursor_wm_info.max_wm;
4300 
4301 		DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4302 			      "cursor %d\n", srwm, cursor_sr);
4303 
4304 		if (IS_CRESTLINE(dev))
4305 			I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4306 	} else {
4307 		/* Turn off self refresh if both pipes are enabled */
4308 		if (IS_CRESTLINE(dev))
4309 			I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4310 				   & ~FW_BLC_SELF_EN);
4311 	}
4312 
4313 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4314 		      srwm);
4315 
4316 	/* 965 has limitations... */
4317 	I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4318 		   (8 << 16) | (8 << 8) | (8 << 0));
4319 	I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4320 	/* update cursor SR watermark */
4321 	I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4322 }
4323 
4324 static void i9xx_update_wm(struct drm_device *dev)
4325 {
4326 	struct drm_i915_private *dev_priv = dev->dev_private;
4327 	const struct intel_watermark_params *wm_info;
4328 	uint32_t fwater_lo;
4329 	uint32_t fwater_hi;
4330 	int cwm, srwm = 1;
4331 	int fifo_size;
4332 	int planea_wm, planeb_wm;
4333 	struct drm_crtc *crtc, *enabled = NULL;
4334 
4335 	if (IS_I945GM(dev))
4336 		wm_info = &i945_wm_info;
4337 	else if (!IS_GEN2(dev))
4338 		wm_info = &i915_wm_info;
4339 	else
4340 		wm_info = &i855_wm_info;
4341 
4342 	fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4343 	crtc = intel_get_crtc_for_plane(dev, 0);
4344 	if (crtc->enabled && crtc->fb) {
4345 		planea_wm = intel_calculate_wm(crtc->mode.clock,
4346 					       wm_info, fifo_size,
4347 					       crtc->fb->bits_per_pixel / 8,
4348 					       latency_ns);
4349 		enabled = crtc;
4350 	} else
4351 		planea_wm = fifo_size - wm_info->guard_size;
4352 
4353 	fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4354 	crtc = intel_get_crtc_for_plane(dev, 1);
4355 	if (crtc->enabled && crtc->fb) {
4356 		planeb_wm = intel_calculate_wm(crtc->mode.clock,
4357 					       wm_info, fifo_size,
4358 					       crtc->fb->bits_per_pixel / 8,
4359 					       latency_ns);
4360 		if (enabled == NULL)
4361 			enabled = crtc;
4362 		else
4363 			enabled = NULL;
4364 	} else
4365 		planeb_wm = fifo_size - wm_info->guard_size;
4366 
4367 	DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4368 
4369 	/*
4370 	 * Overlay gets an aggressive default since video jitter is bad.
4371 	 */
4372 	cwm = 2;
4373 
4374 	/* Play safe and disable self-refresh before adjusting watermarks. */
4375 	if (IS_I945G(dev) || IS_I945GM(dev))
4376 		I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4377 	else if (IS_I915GM(dev))
4378 		I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4379 
4380 	/* Calc sr entries for one plane configs */
4381 	if (HAS_FW_BLC(dev) && enabled) {
4382 		/* self-refresh has much higher latency */
4383 		static const int sr_latency_ns = 6000;
4384 		int clock = enabled->mode.clock;
4385 		int htotal = enabled->mode.htotal;
4386 		int hdisplay = enabled->mode.hdisplay;
4387 		int pixel_size = enabled->fb->bits_per_pixel / 8;
4388 		unsigned long line_time_us;
4389 		int entries;
4390 
4391 		line_time_us = (htotal * 1000) / clock;
4392 
4393 		/* Use ns/us then divide to preserve precision */
4394 		entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4395 			pixel_size * hdisplay;
4396 		entries = howmany(entries, wm_info->cacheline_size);
4397 		DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4398 		srwm = wm_info->fifo_size - entries;
4399 		if (srwm < 0)
4400 			srwm = 1;
4401 
4402 		if (IS_I945G(dev) || IS_I945GM(dev))
4403 			I915_WRITE(FW_BLC_SELF,
4404 				   FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4405 		else if (IS_I915GM(dev))
4406 			I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4407 	}
4408 
4409 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4410 		      planea_wm, planeb_wm, cwm, srwm);
4411 
4412 	fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4413 	fwater_hi = (cwm & 0x1f);
4414 
4415 	/* Set request length to 8 cachelines per fetch */
4416 	fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4417 	fwater_hi = fwater_hi | (1 << 8);
4418 
4419 	I915_WRITE(FW_BLC, fwater_lo);
4420 	I915_WRITE(FW_BLC2, fwater_hi);
4421 
4422 	if (HAS_FW_BLC(dev)) {
4423 		if (enabled) {
4424 			if (IS_I945G(dev) || IS_I945GM(dev))
4425 				I915_WRITE(FW_BLC_SELF,
4426 					   FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4427 			else if (IS_I915GM(dev))
4428 				I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4429 			DRM_DEBUG_KMS("memory self refresh enabled\n");
4430 		} else
4431 			DRM_DEBUG_KMS("memory self refresh disabled\n");
4432 	}
4433 }
4434 
4435 static void i830_update_wm(struct drm_device *dev)
4436 {
4437 	struct drm_i915_private *dev_priv = dev->dev_private;
4438 	struct drm_crtc *crtc;
4439 	uint32_t fwater_lo;
4440 	int planea_wm;
4441 
4442 	crtc = single_enabled_crtc(dev);
4443 	if (crtc == NULL)
4444 		return;
4445 
4446 	planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4447 				       dev_priv->display.get_fifo_size(dev, 0),
4448 				       crtc->fb->bits_per_pixel / 8,
4449 				       latency_ns);
4450 	fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4451 	fwater_lo |= (3<<8) | planea_wm;
4452 
4453 	DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4454 
4455 	I915_WRITE(FW_BLC, fwater_lo);
4456 }
4457 
4458 #define ILK_LP0_PLANE_LATENCY		700
4459 #define ILK_LP0_CURSOR_LATENCY		1300
4460 
4461 /*
4462  * Check the wm result.
4463  *
4464  * If any calculated watermark values is larger than the maximum value that
4465  * can be programmed into the associated watermark register, that watermark
4466  * must be disabled.
4467  */
4468 static bool ironlake_check_srwm(struct drm_device *dev, int level,
4469 				int fbc_wm, int display_wm, int cursor_wm,
4470 				const struct intel_watermark_params *display,
4471 				const struct intel_watermark_params *cursor)
4472 {
4473 	struct drm_i915_private *dev_priv = dev->dev_private;
4474 
4475 	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4476 		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4477 
4478 	if (fbc_wm > SNB_FBC_MAX_SRWM) {
4479 		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4480 			      fbc_wm, SNB_FBC_MAX_SRWM, level);
4481 
4482 		/* fbc has it's own way to disable FBC WM */
4483 		I915_WRITE(DISP_ARB_CTL,
4484 			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4485 		return false;
4486 	}
4487 
4488 	if (display_wm > display->max_wm) {
4489 		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4490 			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
4491 		return false;
4492 	}
4493 
4494 	if (cursor_wm > cursor->max_wm) {
4495 		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4496 			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4497 		return false;
4498 	}
4499 
4500 	if (!(fbc_wm || display_wm || cursor_wm)) {
4501 		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4502 		return false;
4503 	}
4504 
4505 	return true;
4506 }
4507 
4508 /*
4509  * Compute watermark values of WM[1-3],
4510  */
4511 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4512 				  int latency_ns,
4513 				  const struct intel_watermark_params *display,
4514 				  const struct intel_watermark_params *cursor,
4515 				  int *fbc_wm, int *display_wm, int *cursor_wm)
4516 {
4517 	struct drm_crtc *crtc;
4518 	unsigned long line_time_us;
4519 	int hdisplay, htotal, pixel_size, clock;
4520 	int line_count, line_size;
4521 	int small, large;
4522 	int entries;
4523 
4524 	if (!latency_ns) {
4525 		*fbc_wm = *display_wm = *cursor_wm = 0;
4526 		return false;
4527 	}
4528 
4529 	crtc = intel_get_crtc_for_plane(dev, plane);
4530 	hdisplay = crtc->mode.hdisplay;
4531 	htotal = crtc->mode.htotal;
4532 	clock = crtc->mode.clock;
4533 	pixel_size = crtc->fb->bits_per_pixel / 8;
4534 
4535 	line_time_us = (htotal * 1000) / clock;
4536 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4537 	line_size = hdisplay * pixel_size;
4538 
4539 	/* Use the minimum of the small and large buffer method for primary */
4540 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4541 	large = line_count * line_size;
4542 
4543 	entries = howmany(min(small, large), display->cacheline_size);
4544 	*display_wm = entries + display->guard_size;
4545 
4546 	/*
4547 	 * Spec says:
4548 	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4549 	 */
4550 	*fbc_wm = howmany(*display_wm * 64, line_size) + 2;
4551 
4552 	/* calculate the self-refresh watermark for display cursor */
4553 	entries = line_count * pixel_size * 64;
4554 	entries = howmany(entries, cursor->cacheline_size);
4555 	*cursor_wm = entries + cursor->guard_size;
4556 
4557 	return ironlake_check_srwm(dev, level,
4558 				   *fbc_wm, *display_wm, *cursor_wm,
4559 				   display, cursor);
4560 }
4561 
4562 static void ironlake_update_wm(struct drm_device *dev)
4563 {
4564 	struct drm_i915_private *dev_priv = dev->dev_private;
4565 	int fbc_wm, plane_wm, cursor_wm;
4566 	unsigned int enabled;
4567 
4568 	enabled = 0;
4569 	if (g4x_compute_wm0(dev, 0,
4570 			    &ironlake_display_wm_info,
4571 			    ILK_LP0_PLANE_LATENCY,
4572 			    &ironlake_cursor_wm_info,
4573 			    ILK_LP0_CURSOR_LATENCY,
4574 			    &plane_wm, &cursor_wm)) {
4575 		I915_WRITE(WM0_PIPEA_ILK,
4576 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4577 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4578 			      " plane %d, " "cursor: %d\n",
4579 			      plane_wm, cursor_wm);
4580 		enabled |= 1;
4581 	}
4582 
4583 	if (g4x_compute_wm0(dev, 1,
4584 			    &ironlake_display_wm_info,
4585 			    ILK_LP0_PLANE_LATENCY,
4586 			    &ironlake_cursor_wm_info,
4587 			    ILK_LP0_CURSOR_LATENCY,
4588 			    &plane_wm, &cursor_wm)) {
4589 		I915_WRITE(WM0_PIPEB_ILK,
4590 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4591 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4592 			      " plane %d, cursor: %d\n",
4593 			      plane_wm, cursor_wm);
4594 		enabled |= 2;
4595 	}
4596 
4597 	/*
4598 	 * Calculate and update the self-refresh watermark only when one
4599 	 * display plane is used.
4600 	 */
4601 	I915_WRITE(WM3_LP_ILK, 0);
4602 	I915_WRITE(WM2_LP_ILK, 0);
4603 	I915_WRITE(WM1_LP_ILK, 0);
4604 
4605 	if (!single_plane_enabled(enabled))
4606 		return;
4607 	enabled = ffs(enabled) - 1;
4608 
4609 	/* WM1 */
4610 	if (!ironlake_compute_srwm(dev, 1, enabled,
4611 				   ILK_READ_WM1_LATENCY() * 500,
4612 				   &ironlake_display_srwm_info,
4613 				   &ironlake_cursor_srwm_info,
4614 				   &fbc_wm, &plane_wm, &cursor_wm))
4615 		return;
4616 
4617 	I915_WRITE(WM1_LP_ILK,
4618 		   WM1_LP_SR_EN |
4619 		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4620 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4621 		   (plane_wm << WM1_LP_SR_SHIFT) |
4622 		   cursor_wm);
4623 
4624 	/* WM2 */
4625 	if (!ironlake_compute_srwm(dev, 2, enabled,
4626 				   ILK_READ_WM2_LATENCY() * 500,
4627 				   &ironlake_display_srwm_info,
4628 				   &ironlake_cursor_srwm_info,
4629 				   &fbc_wm, &plane_wm, &cursor_wm))
4630 		return;
4631 
4632 	I915_WRITE(WM2_LP_ILK,
4633 		   WM2_LP_EN |
4634 		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4635 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4636 		   (plane_wm << WM1_LP_SR_SHIFT) |
4637 		   cursor_wm);
4638 
4639 	/*
4640 	 * WM3 is unsupported on ILK, probably because we don't have latency
4641 	 * data for that power state
4642 	 */
4643 }
4644 
4645 void sandybridge_update_wm(struct drm_device *dev)
4646 {
4647 	struct drm_i915_private *dev_priv = dev->dev_private;
4648 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4649 	u32 val;
4650 	int fbc_wm, plane_wm, cursor_wm;
4651 	unsigned int enabled;
4652 
4653 	enabled = 0;
4654 	if (g4x_compute_wm0(dev, 0,
4655 			    &sandybridge_display_wm_info, latency,
4656 			    &sandybridge_cursor_wm_info, latency,
4657 			    &plane_wm, &cursor_wm)) {
4658 		val = I915_READ(WM0_PIPEA_ILK);
4659 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4660 		I915_WRITE(WM0_PIPEA_ILK, val |
4661 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4662 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4663 			      " plane %d, " "cursor: %d\n",
4664 			      plane_wm, cursor_wm);
4665 		enabled |= 1;
4666 	}
4667 
4668 	if (g4x_compute_wm0(dev, 1,
4669 			    &sandybridge_display_wm_info, latency,
4670 			    &sandybridge_cursor_wm_info, latency,
4671 			    &plane_wm, &cursor_wm)) {
4672 		val = I915_READ(WM0_PIPEB_ILK);
4673 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4674 		I915_WRITE(WM0_PIPEB_ILK, val |
4675 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4676 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4677 			      " plane %d, cursor: %d\n",
4678 			      plane_wm, cursor_wm);
4679 		enabled |= 2;
4680 	}
4681 
4682 	/* IVB has 3 pipes */
4683 	if (IS_IVYBRIDGE(dev) &&
4684 	    g4x_compute_wm0(dev, 2,
4685 			    &sandybridge_display_wm_info, latency,
4686 			    &sandybridge_cursor_wm_info, latency,
4687 			    &plane_wm, &cursor_wm)) {
4688 		val = I915_READ(WM0_PIPEC_IVB);
4689 		val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4690 		I915_WRITE(WM0_PIPEC_IVB, val |
4691 			   ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4692 		DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4693 			      " plane %d, cursor: %d\n",
4694 			      plane_wm, cursor_wm);
4695 		enabled |= 3;
4696 	}
4697 
4698 	/*
4699 	 * Calculate and update the self-refresh watermark only when one
4700 	 * display plane is used.
4701 	 *
4702 	 * SNB support 3 levels of watermark.
4703 	 *
4704 	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4705 	 * and disabled in the descending order
4706 	 *
4707 	 */
4708 	I915_WRITE(WM3_LP_ILK, 0);
4709 	I915_WRITE(WM2_LP_ILK, 0);
4710 	I915_WRITE(WM1_LP_ILK, 0);
4711 
4712 	if (!single_plane_enabled(enabled) ||
4713 	    dev_priv->sprite_scaling_enabled)
4714 		return;
4715 	enabled = ffs(enabled) - 1;
4716 
4717 	/* WM1 */
4718 	if (!ironlake_compute_srwm(dev, 1, enabled,
4719 				   SNB_READ_WM1_LATENCY() * 500,
4720 				   &sandybridge_display_srwm_info,
4721 				   &sandybridge_cursor_srwm_info,
4722 				   &fbc_wm, &plane_wm, &cursor_wm))
4723 		return;
4724 
4725 	I915_WRITE(WM1_LP_ILK,
4726 		   WM1_LP_SR_EN |
4727 		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4728 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4729 		   (plane_wm << WM1_LP_SR_SHIFT) |
4730 		   cursor_wm);
4731 
4732 	/* WM2 */
4733 	if (!ironlake_compute_srwm(dev, 2, enabled,
4734 				   SNB_READ_WM2_LATENCY() * 500,
4735 				   &sandybridge_display_srwm_info,
4736 				   &sandybridge_cursor_srwm_info,
4737 				   &fbc_wm, &plane_wm, &cursor_wm))
4738 		return;
4739 
4740 	I915_WRITE(WM2_LP_ILK,
4741 		   WM2_LP_EN |
4742 		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4743 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4744 		   (plane_wm << WM1_LP_SR_SHIFT) |
4745 		   cursor_wm);
4746 
4747 	/* WM3 */
4748 	if (!ironlake_compute_srwm(dev, 3, enabled,
4749 				   SNB_READ_WM3_LATENCY() * 500,
4750 				   &sandybridge_display_srwm_info,
4751 				   &sandybridge_cursor_srwm_info,
4752 				   &fbc_wm, &plane_wm, &cursor_wm))
4753 		return;
4754 
4755 	I915_WRITE(WM3_LP_ILK,
4756 		   WM3_LP_EN |
4757 		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4758 		   (fbc_wm << WM1_LP_FBC_SHIFT) |
4759 		   (plane_wm << WM1_LP_SR_SHIFT) |
4760 		   cursor_wm);
4761 }
4762 
4763 static bool
4764 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4765 			      uint32_t sprite_width, int pixel_size,
4766 			      const struct intel_watermark_params *display,
4767 			      int display_latency_ns, int *sprite_wm)
4768 {
4769 	struct drm_crtc *crtc;
4770 	int clock;
4771 	int entries, tlb_miss;
4772 
4773 	crtc = intel_get_crtc_for_plane(dev, plane);
4774 	if (crtc->fb == NULL || !crtc->enabled) {
4775 		*sprite_wm = display->guard_size;
4776 		return false;
4777 	}
4778 
4779 	clock = crtc->mode.clock;
4780 
4781 	/* Use the small buffer method to calculate the sprite watermark */
4782 	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4783 	tlb_miss = display->fifo_size*display->cacheline_size -
4784 		sprite_width * 8;
4785 	if (tlb_miss > 0)
4786 		entries += tlb_miss;
4787 	entries = howmany(entries, display->cacheline_size);
4788 	*sprite_wm = entries + display->guard_size;
4789 	if (*sprite_wm > (int)display->max_wm)
4790 		*sprite_wm = display->max_wm;
4791 
4792 	return true;
4793 }
4794 
4795 static bool
4796 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4797 				uint32_t sprite_width, int pixel_size,
4798 				const struct intel_watermark_params *display,
4799 				int latency_ns, int *sprite_wm)
4800 {
4801 	struct drm_crtc *crtc;
4802 	unsigned long line_time_us;
4803 	int clock;
4804 	int line_count, line_size;
4805 	int small, large;
4806 	int entries;
4807 
4808 	if (!latency_ns) {
4809 		*sprite_wm = 0;
4810 		return false;
4811 	}
4812 
4813 	crtc = intel_get_crtc_for_plane(dev, plane);
4814 	clock = crtc->mode.clock;
4815 	if (!clock) {
4816 		*sprite_wm = 0;
4817 		return false;
4818 	}
4819 
4820 	line_time_us = (sprite_width * 1000) / clock;
4821 	if (!line_time_us) {
4822 		*sprite_wm = 0;
4823 		return false;
4824 	}
4825 
4826 	line_count = (latency_ns / line_time_us + 1000) / 1000;
4827 	line_size = sprite_width * pixel_size;
4828 
4829 	/* Use the minimum of the small and large buffer method for primary */
4830 	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4831 	large = line_count * line_size;
4832 
4833 	entries = howmany(min(small, large), display->cacheline_size);
4834 	*sprite_wm = entries + display->guard_size;
4835 
4836 	return *sprite_wm > 0x3ff ? false : true;
4837 }
4838 
4839 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
4840 					 uint32_t sprite_width, int pixel_size)
4841 {
4842 	struct drm_i915_private *dev_priv = dev->dev_private;
4843 	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
4844 	u32 val;
4845 	int sprite_wm, reg;
4846 	int ret;
4847 
4848 	switch (pipe) {
4849 	case 0:
4850 		reg = WM0_PIPEA_ILK;
4851 		break;
4852 	case 1:
4853 		reg = WM0_PIPEB_ILK;
4854 		break;
4855 	case 2:
4856 		reg = WM0_PIPEC_IVB;
4857 		break;
4858 	default:
4859 		return; /* bad pipe */
4860 	}
4861 
4862 	ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
4863 					    &sandybridge_display_wm_info,
4864 					    latency, &sprite_wm);
4865 	if (!ret) {
4866 		DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
4867 			      pipe);
4868 		return;
4869 	}
4870 
4871 	val = I915_READ(reg);
4872 	val &= ~WM0_PIPE_SPRITE_MASK;
4873 	I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
4874 	DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
4875 
4876 
4877 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4878 					      pixel_size,
4879 					      &sandybridge_display_srwm_info,
4880 					      SNB_READ_WM1_LATENCY() * 500,
4881 					      &sprite_wm);
4882 	if (!ret) {
4883 		DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
4884 			      pipe);
4885 		return;
4886 	}
4887 	I915_WRITE(WM1S_LP_ILK, sprite_wm);
4888 
4889 	/* Only IVB has two more LP watermarks for sprite */
4890 	if (!IS_IVYBRIDGE(dev))
4891 		return;
4892 
4893 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4894 					      pixel_size,
4895 					      &sandybridge_display_srwm_info,
4896 					      SNB_READ_WM2_LATENCY() * 500,
4897 					      &sprite_wm);
4898 	if (!ret) {
4899 		DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
4900 			      pipe);
4901 		return;
4902 	}
4903 	I915_WRITE(WM2S_LP_IVB, sprite_wm);
4904 
4905 	ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
4906 					      pixel_size,
4907 					      &sandybridge_display_srwm_info,
4908 					      SNB_READ_WM3_LATENCY() * 500,
4909 					      &sprite_wm);
4910 	if (!ret) {
4911 		DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
4912 			      pipe);
4913 		return;
4914 	}
4915 	I915_WRITE(WM3S_LP_IVB, sprite_wm);
4916 }
4917 
4918 /**
4919  * intel_update_watermarks - update FIFO watermark values based on current modes
4920  *
4921  * Calculate watermark values for the various WM regs based on current mode
4922  * and plane configuration.
4923  *
4924  * There are several cases to deal with here:
4925  *   - normal (i.e. non-self-refresh)
4926  *   - self-refresh (SR) mode
4927  *   - lines are large relative to FIFO size (buffer can hold up to 2)
4928  *   - lines are small relative to FIFO size (buffer can hold more than 2
4929  *     lines), so need to account for TLB latency
4930  *
4931  *   The normal calculation is:
4932  *     watermark = dotclock * bytes per pixel * latency
4933  *   where latency is platform & configuration dependent (we assume pessimal
4934  *   values here).
4935  *
4936  *   The SR calculation is:
4937  *     watermark = (trunc(latency/line time)+1) * surface width *
4938  *       bytes per pixel
4939  *   where
4940  *     line time = htotal / dotclock
4941  *     surface width = hdisplay for normal plane and 64 for cursor
4942  *   and latency is assumed to be high, as above.
4943  *
4944  * The final value programmed to the register should always be rounded up,
4945  * and include an extra 2 entries to account for clock crossings.
4946  *
4947  * We don't use the sprite, so we can ignore that.  And on Crestline we have
4948  * to set the non-SR watermarks to 8.
4949  */
4950 static void intel_update_watermarks(struct drm_device *dev)
4951 {
4952 	struct drm_i915_private *dev_priv = dev->dev_private;
4953 
4954 	if (dev_priv->display.update_wm)
4955 		dev_priv->display.update_wm(dev);
4956 }
4957 
4958 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
4959 				    uint32_t sprite_width, int pixel_size)
4960 {
4961 	struct drm_i915_private *dev_priv = dev->dev_private;
4962 
4963 	if (dev_priv->display.update_sprite_wm)
4964 		dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
4965 						   pixel_size);
4966 }
4967 
4968 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4969 {
4970 	if (i915_panel_use_ssc >= 0)
4971 		return i915_panel_use_ssc != 0;
4972 	return dev_priv->lvds_use_ssc
4973 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
4974 }
4975 
4976 /**
4977  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
4978  * @crtc: CRTC structure
4979  * @mode: requested mode
4980  *
4981  * A pipe may be connected to one or more outputs.  Based on the depth of the
4982  * attached framebuffer, choose a good color depth to use on the pipe.
4983  *
4984  * If possible, match the pipe depth to the fb depth.  In some cases, this
4985  * isn't ideal, because the connected output supports a lesser or restricted
4986  * set of depths.  Resolve that here:
4987  *    LVDS typically supports only 6bpc, so clamp down in that case
4988  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
4989  *    Displays may support a restricted set as well, check EDID and clamp as
4990  *      appropriate.
4991  *    DP may want to dither down to 6bpc to fit larger modes
4992  *
4993  * RETURNS:
4994  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
4995  * true if they don't match).
4996  */
4997 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4998 					 unsigned int *pipe_bpp,
4999 					 struct drm_display_mode *mode)
5000 {
5001 	struct drm_device *dev = crtc->dev;
5002 	struct drm_i915_private *dev_priv = dev->dev_private;
5003 	struct drm_encoder *encoder;
5004 	struct drm_connector *connector;
5005 	unsigned int display_bpc = UINT_MAX, bpc;
5006 
5007 	/* Walk the encoders & connectors on this crtc, get min bpc */
5008 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
5009 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5010 
5011 		if (encoder->crtc != crtc)
5012 			continue;
5013 
5014 		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
5015 			unsigned int lvds_bpc;
5016 
5017 			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
5018 			    LVDS_A3_POWER_UP)
5019 				lvds_bpc = 8;
5020 			else
5021 				lvds_bpc = 6;
5022 
5023 			if (lvds_bpc < display_bpc) {
5024 				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5025 				display_bpc = lvds_bpc;
5026 			}
5027 			continue;
5028 		}
5029 
5030 		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
5031 			/* Use VBT settings if we have an eDP panel */
5032 			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
5033 
5034 			if (edp_bpc < display_bpc) {
5035 				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5036 				display_bpc = edp_bpc;
5037 			}
5038 			continue;
5039 		}
5040 
5041 		/* Not one of the known troublemakers, check the EDID */
5042 		list_for_each_entry(connector, &dev->mode_config.connector_list,
5043 				    head) {
5044 			if (connector->encoder != encoder)
5045 				continue;
5046 
5047 			/* Don't use an invalid EDID bpc value */
5048 			if (connector->display_info.bpc &&
5049 			    connector->display_info.bpc < display_bpc) {
5050 				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5051 				display_bpc = connector->display_info.bpc;
5052 			}
5053 		}
5054 
5055 		/*
5056 		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
5057 		 * through, clamp it down.  (Note: >12bpc will be caught below.)
5058 		 */
5059 		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
5060 			if (display_bpc > 8 && display_bpc < 12) {
5061 				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5062 				display_bpc = 12;
5063 			} else {
5064 				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5065 				display_bpc = 8;
5066 			}
5067 		}
5068 	}
5069 
5070 	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5071 		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
5072 		display_bpc = 6;
5073 	}
5074 
5075 	/*
5076 	 * We could just drive the pipe at the highest bpc all the time and
5077 	 * enable dithering as needed, but that costs bandwidth.  So choose
5078 	 * the minimum value that expresses the full color range of the fb but
5079 	 * also stays within the max display bpc discovered above.
5080 	 */
5081 
5082 	switch (crtc->fb->depth) {
5083 	case 8:
5084 		bpc = 8; /* since we go through a colormap */
5085 		break;
5086 	case 15:
5087 	case 16:
5088 		bpc = 6; /* min is 18bpp */
5089 		break;
5090 	case 24:
5091 		bpc = 8;
5092 		break;
5093 	case 30:
5094 		bpc = 10;
5095 		break;
5096 	case 48:
5097 		bpc = 12;
5098 		break;
5099 	default:
5100 		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
5101 		bpc = min((unsigned int)8, display_bpc);
5102 		break;
5103 	}
5104 
5105 	display_bpc = min(display_bpc, bpc);
5106 
5107 	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
5108 			 bpc, display_bpc);
5109 
5110 	*pipe_bpp = display_bpc * 3;
5111 
5112 	return display_bpc != bpc;
5113 }
5114 
5115 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
5116 {
5117 	struct drm_device *dev = crtc->dev;
5118 	struct drm_i915_private *dev_priv = dev->dev_private;
5119 	int refclk;
5120 
5121 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5122 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5123 		refclk = dev_priv->lvds_ssc_freq * 1000;
5124 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5125 			      refclk / 1000);
5126 	} else if (!IS_GEN2(dev)) {
5127 		refclk = 96000;
5128 	} else {
5129 		refclk = 48000;
5130 	}
5131 
5132 	return refclk;
5133 }
5134 
5135 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
5136 				      intel_clock_t *clock)
5137 {
5138 	/* SDVO TV has fixed PLL values depend on its clock range,
5139 	   this mirrors vbios setting. */
5140 	if (adjusted_mode->clock >= 100000
5141 	    && adjusted_mode->clock < 140500) {
5142 		clock->p1 = 2;
5143 		clock->p2 = 10;
5144 		clock->n = 3;
5145 		clock->m1 = 16;
5146 		clock->m2 = 8;
5147 	} else if (adjusted_mode->clock >= 140500
5148 		   && adjusted_mode->clock <= 200000) {
5149 		clock->p1 = 1;
5150 		clock->p2 = 10;
5151 		clock->n = 6;
5152 		clock->m1 = 12;
5153 		clock->m2 = 8;
5154 	}
5155 }
5156 
5157 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
5158 				     intel_clock_t *clock,
5159 				     intel_clock_t *reduced_clock)
5160 {
5161 	struct drm_device *dev = crtc->dev;
5162 	struct drm_i915_private *dev_priv = dev->dev_private;
5163 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5164 	int pipe = intel_crtc->pipe;
5165 	u32 fp, fp2 = 0;
5166 
5167 	if (IS_PINEVIEW(dev)) {
5168 		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
5169 		if (reduced_clock)
5170 			fp2 = (1 << reduced_clock->n) << 16 |
5171 				reduced_clock->m1 << 8 | reduced_clock->m2;
5172 	} else {
5173 		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
5174 		if (reduced_clock)
5175 			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
5176 				reduced_clock->m2;
5177 	}
5178 
5179 	I915_WRITE(FP0(pipe), fp);
5180 
5181 	intel_crtc->lowfreq_avail = false;
5182 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5183 	    reduced_clock && i915_powersave) {
5184 		I915_WRITE(FP1(pipe), fp2);
5185 		intel_crtc->lowfreq_avail = true;
5186 	} else {
5187 		I915_WRITE(FP1(pipe), fp);
5188 	}
5189 }
5190 
5191 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
5192 			      struct drm_display_mode *mode,
5193 			      struct drm_display_mode *adjusted_mode,
5194 			      int x, int y,
5195 			      struct drm_framebuffer *old_fb)
5196 {
5197 	struct drm_device *dev = crtc->dev;
5198 	struct drm_i915_private *dev_priv = dev->dev_private;
5199 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5200 	int pipe = intel_crtc->pipe;
5201 	int plane = intel_crtc->plane;
5202 	int refclk, num_connectors = 0;
5203 	intel_clock_t clock, reduced_clock;
5204 	u32 dpll, dspcntr, pipeconf, vsyncshift;
5205 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
5206 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5207 	struct drm_mode_config *mode_config = &dev->mode_config;
5208 	struct intel_encoder *encoder;
5209 	const intel_limit_t *limit;
5210 	int ret;
5211 	u32 temp;
5212 	u32 lvds_sync = 0;
5213 
5214 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5215 		if (encoder->base.crtc != crtc)
5216 			continue;
5217 
5218 		switch (encoder->type) {
5219 		case INTEL_OUTPUT_LVDS:
5220 			is_lvds = true;
5221 			break;
5222 		case INTEL_OUTPUT_SDVO:
5223 		case INTEL_OUTPUT_HDMI:
5224 			is_sdvo = true;
5225 			if (encoder->needs_tv_clock)
5226 				is_tv = true;
5227 			break;
5228 		case INTEL_OUTPUT_DVO:
5229 			is_dvo = true;
5230 			break;
5231 		case INTEL_OUTPUT_TVOUT:
5232 			is_tv = true;
5233 			break;
5234 		case INTEL_OUTPUT_ANALOG:
5235 			is_crt = true;
5236 			break;
5237 		case INTEL_OUTPUT_DISPLAYPORT:
5238 			is_dp = true;
5239 			break;
5240 		}
5241 
5242 		num_connectors++;
5243 	}
5244 
5245 	refclk = i9xx_get_refclk(crtc, num_connectors);
5246 
5247 	/*
5248 	 * Returns a set of divisors for the desired target clock with the given
5249 	 * refclk, or false.  The returned values represent the clock equation:
5250 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5251 	 */
5252 	limit = intel_limit(crtc, refclk);
5253 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5254 			     &clock);
5255 	if (!ok) {
5256 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5257 		return -EINVAL;
5258 	}
5259 
5260 	/* Ensure that the cursor is valid for the new mode before changing... */
5261 	intel_crtc_update_cursor(crtc, true);
5262 
5263 	if (is_lvds && dev_priv->lvds_downclock_avail) {
5264 		/*
5265 		 * Ensure we match the reduced clock's P to the target clock.
5266 		 * If the clocks don't match, we can't switch the display clock
5267 		 * by using the FP0/FP1. In such case we will disable the LVDS
5268 		 * downclock feature.
5269 		*/
5270 		has_reduced_clock = limit->find_pll(limit, crtc,
5271 						    dev_priv->lvds_downclock,
5272 						    refclk,
5273 						    &clock,
5274 						    &reduced_clock);
5275 	}
5276 
5277 	if (is_sdvo && is_tv)
5278 		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
5279 
5280 	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
5281 				 &reduced_clock : NULL);
5282 
5283 	dpll = DPLL_VGA_MODE_DIS;
5284 
5285 	if (!IS_GEN2(dev)) {
5286 		if (is_lvds)
5287 			dpll |= DPLLB_MODE_LVDS;
5288 		else
5289 			dpll |= DPLLB_MODE_DAC_SERIAL;
5290 		if (is_sdvo) {
5291 			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5292 			if (pixel_multiplier > 1) {
5293 				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5294 					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
5295 			}
5296 			dpll |= DPLL_DVO_HIGH_SPEED;
5297 		}
5298 		if (is_dp)
5299 			dpll |= DPLL_DVO_HIGH_SPEED;
5300 
5301 		/* compute bitmask from p1 value */
5302 		if (IS_PINEVIEW(dev))
5303 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
5304 		else {
5305 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5306 			if (IS_G4X(dev) && has_reduced_clock)
5307 				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5308 		}
5309 		switch (clock.p2) {
5310 		case 5:
5311 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5312 			break;
5313 		case 7:
5314 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5315 			break;
5316 		case 10:
5317 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5318 			break;
5319 		case 14:
5320 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5321 			break;
5322 		}
5323 		if (INTEL_INFO(dev)->gen >= 4)
5324 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5325 	} else {
5326 		if (is_lvds) {
5327 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5328 		} else {
5329 			if (clock.p1 == 2)
5330 				dpll |= PLL_P1_DIVIDE_BY_TWO;
5331 			else
5332 				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5333 			if (clock.p2 == 4)
5334 				dpll |= PLL_P2_DIVIDE_BY_4;
5335 		}
5336 	}
5337 
5338 	if (is_sdvo && is_tv)
5339 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5340 	else if (is_tv)
5341 		/* XXX: just matching BIOS for now */
5342 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5343 		dpll |= 3;
5344 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5345 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5346 	else
5347 		dpll |= PLL_REF_INPUT_DREFCLK;
5348 
5349 	/* setup pipeconf */
5350 	pipeconf = I915_READ(PIPECONF(pipe));
5351 
5352 	/* Set up the display plane register */
5353 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5354 
5355 	if (pipe == 0)
5356 		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5357 	else
5358 		dspcntr |= DISPPLANE_SEL_PIPE_B;
5359 
5360 	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
5361 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
5362 		 * core speed.
5363 		 *
5364 		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
5365 		 * pipe == 0 check?
5366 		 */
5367 		if (mode->clock >
5368 		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5369 			pipeconf |= PIPECONF_DOUBLE_WIDE;
5370 		else
5371 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
5372 	}
5373 
5374 	/* default to 8bpc */
5375 	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
5376 	if (is_dp) {
5377 		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
5378 			pipeconf |= PIPECONF_BPP_6 |
5379 				    PIPECONF_DITHER_EN |
5380 				    PIPECONF_DITHER_TYPE_SP;
5381 		}
5382 	}
5383 
5384 	dpll |= DPLL_VCO_ENABLE;
5385 
5386 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
5387 	drm_mode_debug_printmodeline(mode);
5388 
5389 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5390 
5391 	POSTING_READ(DPLL(pipe));
5392 	DELAY(150);
5393 
5394 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5395 	 * This is an exception to the general rule that mode_set doesn't turn
5396 	 * things on.
5397 	 */
5398 	if (is_lvds) {
5399 		temp = I915_READ(LVDS);
5400 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5401 		if (pipe == 1) {
5402 			temp |= LVDS_PIPEB_SELECT;
5403 		} else {
5404 			temp &= ~LVDS_PIPEB_SELECT;
5405 		}
5406 		/* set the corresponsding LVDS_BORDER bit */
5407 		temp |= dev_priv->lvds_border_bits;
5408 		/* Set the B0-B3 data pairs corresponding to whether we're going to
5409 		 * set the DPLLs for dual-channel mode or not.
5410 		 */
5411 		if (clock.p2 == 7)
5412 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5413 		else
5414 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5415 
5416 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5417 		 * appropriately here, but we need to look more thoroughly into how
5418 		 * panels behave in the two modes.
5419 		 */
5420 		/* set the dithering flag on LVDS as needed */
5421 		if (INTEL_INFO(dev)->gen >= 4) {
5422 			if (dev_priv->lvds_dither)
5423 				temp |= LVDS_ENABLE_DITHER;
5424 			else
5425 				temp &= ~LVDS_ENABLE_DITHER;
5426 		}
5427 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5428 			lvds_sync |= LVDS_HSYNC_POLARITY;
5429 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5430 			lvds_sync |= LVDS_VSYNC_POLARITY;
5431 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5432 		    != lvds_sync) {
5433 			char flags[2] = "-+";
5434 			DRM_INFO("Changing LVDS panel from "
5435 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5436 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
5437 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
5438 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5439 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5440 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5441 			temp |= lvds_sync;
5442 		}
5443 		I915_WRITE(LVDS, temp);
5444 	}
5445 
5446 	if (is_dp) {
5447 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
5448 	}
5449 
5450 	I915_WRITE(DPLL(pipe), dpll);
5451 
5452 	/* Wait for the clocks to stabilize. */
5453 	POSTING_READ(DPLL(pipe));
5454 	DELAY(150);
5455 
5456 	if (INTEL_INFO(dev)->gen >= 4) {
5457 		temp = 0;
5458 		if (is_sdvo) {
5459 			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
5460 			if (temp > 1)
5461 				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5462 			else
5463 				temp = 0;
5464 		}
5465 		I915_WRITE(DPLL_MD(pipe), temp);
5466 	} else {
5467 		/* The pixel multiplier can only be updated once the
5468 		 * DPLL is enabled and the clocks are stable.
5469 		 *
5470 		 * So write it again.
5471 		 */
5472 		I915_WRITE(DPLL(pipe), dpll);
5473 	}
5474 
5475 	if (HAS_PIPE_CXSR(dev)) {
5476 		if (intel_crtc->lowfreq_avail) {
5477 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
5478 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
5479 		} else {
5480 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
5481 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
5482 		}
5483 	}
5484 
5485 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
5486 	if (!IS_GEN2(dev) &&
5487 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5488 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5489 		/* the chip adds 2 halflines automatically */
5490 		adjusted_mode->crtc_vtotal -= 1;
5491 		adjusted_mode->crtc_vblank_end -= 1;
5492 		vsyncshift = adjusted_mode->crtc_hsync_start
5493 			     - adjusted_mode->crtc_htotal/2;
5494 	} else {
5495 		pipeconf |= PIPECONF_PROGRESSIVE;
5496 		vsyncshift = 0;
5497 	}
5498 
5499 	if (!IS_GEN3(dev))
5500 		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
5501 
5502 	I915_WRITE(HTOTAL(pipe),
5503 		   (adjusted_mode->crtc_hdisplay - 1) |
5504 		   ((adjusted_mode->crtc_htotal - 1) << 16));
5505 	I915_WRITE(HBLANK(pipe),
5506 		   (adjusted_mode->crtc_hblank_start - 1) |
5507 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
5508 	I915_WRITE(HSYNC(pipe),
5509 		   (adjusted_mode->crtc_hsync_start - 1) |
5510 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
5511 
5512 	I915_WRITE(VTOTAL(pipe),
5513 		   (adjusted_mode->crtc_vdisplay - 1) |
5514 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
5515 	I915_WRITE(VBLANK(pipe),
5516 		   (adjusted_mode->crtc_vblank_start - 1) |
5517 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
5518 	I915_WRITE(VSYNC(pipe),
5519 		   (adjusted_mode->crtc_vsync_start - 1) |
5520 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
5521 
5522 	/* pipesrc and dspsize control the size that is scaled from,
5523 	 * which should always be the user's requested size.
5524 	 */
5525 	I915_WRITE(DSPSIZE(plane),
5526 		   ((mode->vdisplay - 1) << 16) |
5527 		   (mode->hdisplay - 1));
5528 	I915_WRITE(DSPPOS(plane), 0);
5529 	I915_WRITE(PIPESRC(pipe),
5530 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5531 
5532 	I915_WRITE(PIPECONF(pipe), pipeconf);
5533 	POSTING_READ(PIPECONF(pipe));
5534 	intel_enable_pipe(dev_priv, pipe, false);
5535 
5536 	intel_wait_for_vblank(dev, pipe);
5537 
5538 	I915_WRITE(DSPCNTR(plane), dspcntr);
5539 	POSTING_READ(DSPCNTR(plane));
5540 	intel_enable_plane(dev_priv, plane, pipe);
5541 
5542 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
5543 
5544 	intel_update_watermarks(dev);
5545 
5546 	return ret;
5547 }
5548 
5549 /*
5550  * Initialize reference clocks when the driver loads
5551  */
5552 void ironlake_init_pch_refclk(struct drm_device *dev)
5553 {
5554 	struct drm_i915_private *dev_priv = dev->dev_private;
5555 	struct drm_mode_config *mode_config = &dev->mode_config;
5556 	struct intel_encoder *encoder;
5557 	u32 temp;
5558 	bool has_lvds = false;
5559 	bool has_cpu_edp = false;
5560 	bool has_pch_edp = false;
5561 	bool has_panel = false;
5562 	bool has_ck505 = false;
5563 	bool can_ssc = false;
5564 
5565 	/* We need to take the global config into account */
5566 	list_for_each_entry(encoder, &mode_config->encoder_list,
5567 			    base.head) {
5568 		switch (encoder->type) {
5569 		case INTEL_OUTPUT_LVDS:
5570 			has_panel = true;
5571 			has_lvds = true;
5572 			break;
5573 		case INTEL_OUTPUT_EDP:
5574 			has_panel = true;
5575 			if (intel_encoder_is_pch_edp(&encoder->base))
5576 				has_pch_edp = true;
5577 			else
5578 				has_cpu_edp = true;
5579 			break;
5580 		}
5581 	}
5582 
5583 	if (HAS_PCH_IBX(dev)) {
5584 		has_ck505 = dev_priv->display_clock_mode;
5585 		can_ssc = has_ck505;
5586 	} else {
5587 		has_ck505 = false;
5588 		can_ssc = true;
5589 	}
5590 
5591 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
5592 		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
5593 		      has_ck505);
5594 
5595 	/* Ironlake: try to setup display ref clock before DPLL
5596 	 * enabling. This is only under driver's control after
5597 	 * PCH B stepping, previous chipset stepping should be
5598 	 * ignoring this setting.
5599 	 */
5600 	temp = I915_READ(PCH_DREF_CONTROL);
5601 	/* Always enable nonspread source */
5602 	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
5603 
5604 	if (has_ck505)
5605 		temp |= DREF_NONSPREAD_CK505_ENABLE;
5606 	else
5607 		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
5608 
5609 	if (has_panel) {
5610 		temp &= ~DREF_SSC_SOURCE_MASK;
5611 		temp |= DREF_SSC_SOURCE_ENABLE;
5612 
5613 		/* SSC must be turned on before enabling the CPU output  */
5614 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5615 			DRM_DEBUG_KMS("Using SSC on panel\n");
5616 			temp |= DREF_SSC1_ENABLE;
5617 		} else
5618 			temp &= ~DREF_SSC1_ENABLE;
5619 
5620 		/* Get SSC going before enabling the outputs */
5621 		I915_WRITE(PCH_DREF_CONTROL, temp);
5622 		POSTING_READ(PCH_DREF_CONTROL);
5623 		DELAY(200);
5624 
5625 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5626 
5627 		/* Enable CPU source on CPU attached eDP */
5628 		if (has_cpu_edp) {
5629 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
5630 				DRM_DEBUG_KMS("Using SSC on eDP\n");
5631 				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5632 			}
5633 			else
5634 				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5635 		} else
5636 			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5637 
5638 		I915_WRITE(PCH_DREF_CONTROL, temp);
5639 		POSTING_READ(PCH_DREF_CONTROL);
5640 		DELAY(200);
5641 	} else {
5642 		DRM_DEBUG_KMS("Disabling SSC entirely\n");
5643 
5644 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5645 
5646 		/* Turn off CPU output */
5647 		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5648 
5649 		I915_WRITE(PCH_DREF_CONTROL, temp);
5650 		POSTING_READ(PCH_DREF_CONTROL);
5651 		DELAY(200);
5652 
5653 		/* Turn off the SSC source */
5654 		temp &= ~DREF_SSC_SOURCE_MASK;
5655 		temp |= DREF_SSC_SOURCE_DISABLE;
5656 
5657 		/* Turn off SSC1 */
5658 		temp &= ~ DREF_SSC1_ENABLE;
5659 
5660 		I915_WRITE(PCH_DREF_CONTROL, temp);
5661 		POSTING_READ(PCH_DREF_CONTROL);
5662 		DELAY(200);
5663 	}
5664 }
5665 
5666 static int ironlake_get_refclk(struct drm_crtc *crtc)
5667 {
5668 	struct drm_device *dev = crtc->dev;
5669 	struct drm_i915_private *dev_priv = dev->dev_private;
5670 	struct intel_encoder *encoder;
5671 	struct drm_mode_config *mode_config = &dev->mode_config;
5672 	struct intel_encoder *edp_encoder = NULL;
5673 	int num_connectors = 0;
5674 	bool is_lvds = false;
5675 
5676 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5677 		if (encoder->base.crtc != crtc)
5678 			continue;
5679 
5680 		switch (encoder->type) {
5681 		case INTEL_OUTPUT_LVDS:
5682 			is_lvds = true;
5683 			break;
5684 		case INTEL_OUTPUT_EDP:
5685 			edp_encoder = encoder;
5686 			break;
5687 		}
5688 		num_connectors++;
5689 	}
5690 
5691 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5692 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
5693 			      dev_priv->lvds_ssc_freq);
5694 		return dev_priv->lvds_ssc_freq * 1000;
5695 	}
5696 
5697 	return 120000;
5698 }
5699 
5700 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5701 				  struct drm_display_mode *mode,
5702 				  struct drm_display_mode *adjusted_mode,
5703 				  int x, int y,
5704 				  struct drm_framebuffer *old_fb)
5705 {
5706 	struct drm_device *dev = crtc->dev;
5707 	struct drm_i915_private *dev_priv = dev->dev_private;
5708 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5709 	int pipe = intel_crtc->pipe;
5710 	int plane = intel_crtc->plane;
5711 	int refclk, num_connectors = 0;
5712 	intel_clock_t clock, reduced_clock;
5713 	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5714 	bool ok, has_reduced_clock = false, is_sdvo = false;
5715 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5716 	struct intel_encoder *has_edp_encoder = NULL;
5717 	struct drm_mode_config *mode_config = &dev->mode_config;
5718 	struct intel_encoder *encoder;
5719 	const intel_limit_t *limit;
5720 	int ret;
5721 	struct fdi_m_n m_n = {0};
5722 	u32 temp;
5723 	u32 lvds_sync = 0;
5724 	int target_clock, pixel_multiplier, lane, link_bw, factor;
5725 	unsigned int pipe_bpp;
5726 	bool dither;
5727 
5728 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5729 		if (encoder->base.crtc != crtc)
5730 			continue;
5731 
5732 		switch (encoder->type) {
5733 		case INTEL_OUTPUT_LVDS:
5734 			is_lvds = true;
5735 			break;
5736 		case INTEL_OUTPUT_SDVO:
5737 		case INTEL_OUTPUT_HDMI:
5738 			is_sdvo = true;
5739 			if (encoder->needs_tv_clock)
5740 				is_tv = true;
5741 			break;
5742 		case INTEL_OUTPUT_TVOUT:
5743 			is_tv = true;
5744 			break;
5745 		case INTEL_OUTPUT_ANALOG:
5746 			is_crt = true;
5747 			break;
5748 		case INTEL_OUTPUT_DISPLAYPORT:
5749 			is_dp = true;
5750 			break;
5751 		case INTEL_OUTPUT_EDP:
5752 			has_edp_encoder = encoder;
5753 			break;
5754 		}
5755 
5756 		num_connectors++;
5757 	}
5758 
5759 	refclk = ironlake_get_refclk(crtc);
5760 
5761 	/*
5762 	 * Returns a set of divisors for the desired target clock with the given
5763 	 * refclk, or false.  The returned values represent the clock equation:
5764 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5765 	 */
5766 	limit = intel_limit(crtc, refclk);
5767 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
5768 			     &clock);
5769 	if (!ok) {
5770 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
5771 		return -EINVAL;
5772 	}
5773 
5774 	/* Ensure that the cursor is valid for the new mode before changing... */
5775 	intel_crtc_update_cursor(crtc, true);
5776 
5777 	if (is_lvds && dev_priv->lvds_downclock_avail) {
5778 		/*
5779 		 * Ensure we match the reduced clock's P to the target clock.
5780 		 * If the clocks don't match, we can't switch the display clock
5781 		 * by using the FP0/FP1. In such case we will disable the LVDS
5782 		 * downclock feature.
5783 		*/
5784 		has_reduced_clock = limit->find_pll(limit, crtc,
5785 						    dev_priv->lvds_downclock,
5786 						    refclk,
5787 						    &clock,
5788 						    &reduced_clock);
5789 	}
5790 	/* SDVO TV has fixed PLL values depend on its clock range,
5791 	   this mirrors vbios setting. */
5792 	if (is_sdvo && is_tv) {
5793 		if (adjusted_mode->clock >= 100000
5794 		    && adjusted_mode->clock < 140500) {
5795 			clock.p1 = 2;
5796 			clock.p2 = 10;
5797 			clock.n = 3;
5798 			clock.m1 = 16;
5799 			clock.m2 = 8;
5800 		} else if (adjusted_mode->clock >= 140500
5801 			   && adjusted_mode->clock <= 200000) {
5802 			clock.p1 = 1;
5803 			clock.p2 = 10;
5804 			clock.n = 6;
5805 			clock.m1 = 12;
5806 			clock.m2 = 8;
5807 		}
5808 	}
5809 
5810 	/* FDI link */
5811 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5812 	lane = 0;
5813 	/* CPU eDP doesn't require FDI link, so just set DP M/N
5814 	   according to current link config */
5815 	if (has_edp_encoder &&
5816 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5817 		target_clock = mode->clock;
5818 		intel_edp_link_config(has_edp_encoder,
5819 				      &lane, &link_bw);
5820 	} else {
5821 		/* [e]DP over FDI requires target mode clock
5822 		   instead of link clock */
5823 		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5824 			target_clock = mode->clock;
5825 		else
5826 			target_clock = adjusted_mode->clock;
5827 
5828 		/* FDI is a binary signal running at ~2.7GHz, encoding
5829 		 * each output octet as 10 bits. The actual frequency
5830 		 * is stored as a divider into a 100MHz clock, and the
5831 		 * mode pixel clock is stored in units of 1KHz.
5832 		 * Hence the bw of each lane in terms of the mode signal
5833 		 * is:
5834 		 */
5835 		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5836 	}
5837 
5838 	/* determine panel color depth */
5839 	temp = I915_READ(PIPECONF(pipe));
5840 	temp &= ~PIPE_BPC_MASK;
5841 	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5842 	switch (pipe_bpp) {
5843 	case 18:
5844 		temp |= PIPE_6BPC;
5845 		break;
5846 	case 24:
5847 		temp |= PIPE_8BPC;
5848 		break;
5849 	case 30:
5850 		temp |= PIPE_10BPC;
5851 		break;
5852 	case 36:
5853 		temp |= PIPE_12BPC;
5854 		break;
5855 	default:
5856 		kprintf("intel_choose_pipe_bpp returned invalid value %d\n",
5857 			pipe_bpp);
5858 		temp |= PIPE_8BPC;
5859 		pipe_bpp = 24;
5860 		break;
5861 	}
5862 
5863 	intel_crtc->bpp = pipe_bpp;
5864 	I915_WRITE(PIPECONF(pipe), temp);
5865 
5866 	if (!lane) {
5867 		/*
5868 		 * Account for spread spectrum to avoid
5869 		 * oversubscribing the link. Max center spread
5870 		 * is 2.5%; use 5% for safety's sake.
5871 		 */
5872 		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5873 		lane = bps / (link_bw * 8) + 1;
5874 	}
5875 
5876 	intel_crtc->fdi_lanes = lane;
5877 
5878 	if (pixel_multiplier > 1)
5879 		link_bw *= pixel_multiplier;
5880 	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5881 			     &m_n);
5882 
5883 	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5884 	if (has_reduced_clock)
5885 		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5886 			reduced_clock.m2;
5887 
5888 	/* Enable autotuning of the PLL clock (if permissible) */
5889 	factor = 21;
5890 	if (is_lvds) {
5891 		if ((intel_panel_use_ssc(dev_priv) &&
5892 		     dev_priv->lvds_ssc_freq == 100) ||
5893 		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5894 			factor = 25;
5895 	} else if (is_sdvo && is_tv)
5896 		factor = 20;
5897 
5898 	if (clock.m < factor * clock.n)
5899 		fp |= FP_CB_TUNE;
5900 
5901 	dpll = 0;
5902 
5903 	if (is_lvds)
5904 		dpll |= DPLLB_MODE_LVDS;
5905 	else
5906 		dpll |= DPLLB_MODE_DAC_SERIAL;
5907 	if (is_sdvo) {
5908 		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
5909 		if (pixel_multiplier > 1) {
5910 			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
5911 		}
5912 		dpll |= DPLL_DVO_HIGH_SPEED;
5913 	}
5914 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
5915 		dpll |= DPLL_DVO_HIGH_SPEED;
5916 
5917 	/* compute bitmask from p1 value */
5918 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5919 	/* also FPA1 */
5920 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
5921 
5922 	switch (clock.p2) {
5923 	case 5:
5924 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5925 		break;
5926 	case 7:
5927 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5928 		break;
5929 	case 10:
5930 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5931 		break;
5932 	case 14:
5933 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5934 		break;
5935 	}
5936 
5937 	if (is_sdvo && is_tv)
5938 		dpll |= PLL_REF_INPUT_TVCLKINBC;
5939 	else if (is_tv)
5940 		/* XXX: just matching BIOS for now */
5941 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
5942 		dpll |= 3;
5943 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5944 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5945 	else
5946 		dpll |= PLL_REF_INPUT_DREFCLK;
5947 
5948 	/* setup pipeconf */
5949 	pipeconf = I915_READ(PIPECONF(pipe));
5950 
5951 	/* Set up the display plane register */
5952 	dspcntr = DISPPLANE_GAMMA_ENABLE;
5953 
5954 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5955 	drm_mode_debug_printmodeline(mode);
5956 
5957 	/* PCH eDP needs FDI, but CPU eDP does not */
5958 	if (!intel_crtc->no_pll) {
5959 		if (!has_edp_encoder ||
5960 		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5961 			I915_WRITE(PCH_FP0(pipe), fp);
5962 			I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
5963 
5964 			POSTING_READ(PCH_DPLL(pipe));
5965 			DELAY(150);
5966 		}
5967 	} else {
5968 		if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) &&
5969 		    fp == I915_READ(PCH_FP0(0))) {
5970 			intel_crtc->use_pll_a = true;
5971 			DRM_DEBUG_KMS("using pipe a dpll\n");
5972 		} else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) &&
5973 			   fp == I915_READ(PCH_FP0(1))) {
5974 			intel_crtc->use_pll_a = false;
5975 			DRM_DEBUG_KMS("using pipe b dpll\n");
5976 		} else {
5977 			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
5978 			return -EINVAL;
5979 		}
5980 	}
5981 
5982 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
5983 	 * This is an exception to the general rule that mode_set doesn't turn
5984 	 * things on.
5985 	 */
5986 	if (is_lvds) {
5987 		temp = I915_READ(PCH_LVDS);
5988 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5989 		if (HAS_PCH_CPT(dev)) {
5990 			temp &= ~PORT_TRANS_SEL_MASK;
5991 			temp |= PORT_TRANS_SEL_CPT(pipe);
5992 		} else {
5993 			if (pipe == 1)
5994 				temp |= LVDS_PIPEB_SELECT;
5995 			else
5996 				temp &= ~LVDS_PIPEB_SELECT;
5997 		}
5998 
5999 		/* set the corresponsding LVDS_BORDER bit */
6000 		temp |= dev_priv->lvds_border_bits;
6001 		/* Set the B0-B3 data pairs corresponding to whether we're going to
6002 		 * set the DPLLs for dual-channel mode or not.
6003 		 */
6004 		if (clock.p2 == 7)
6005 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
6006 		else
6007 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
6008 
6009 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
6010 		 * appropriately here, but we need to look more thoroughly into how
6011 		 * panels behave in the two modes.
6012 		 */
6013 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
6014 			lvds_sync |= LVDS_HSYNC_POLARITY;
6015 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
6016 			lvds_sync |= LVDS_VSYNC_POLARITY;
6017 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
6018 		    != lvds_sync) {
6019 			char flags[2] = "-+";
6020 			DRM_INFO("Changing LVDS panel from "
6021 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
6022 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
6023 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
6024 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
6025 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
6026 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6027 			temp |= lvds_sync;
6028 		}
6029 		I915_WRITE(PCH_LVDS, temp);
6030 	}
6031 
6032 	pipeconf &= ~PIPECONF_DITHER_EN;
6033 	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
6034 	if ((is_lvds && dev_priv->lvds_dither) || dither) {
6035 		pipeconf |= PIPECONF_DITHER_EN;
6036 		pipeconf |= PIPECONF_DITHER_TYPE_SP;
6037 	}
6038 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6039 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
6040 	} else {
6041 		/* For non-DP output, clear any trans DP clock recovery setting.*/
6042 		I915_WRITE(TRANSDATA_M1(pipe), 0);
6043 		I915_WRITE(TRANSDATA_N1(pipe), 0);
6044 		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
6045 		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6046 	}
6047 
6048 	if (!intel_crtc->no_pll &&
6049 	    (!has_edp_encoder ||
6050 	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6051 		I915_WRITE(PCH_DPLL(pipe), dpll);
6052 
6053 		/* Wait for the clocks to stabilize. */
6054 		POSTING_READ(PCH_DPLL(pipe));
6055 		DELAY(150);
6056 
6057 		/* The pixel multiplier can only be updated once the
6058 		 * DPLL is enabled and the clocks are stable.
6059 		 *
6060 		 * So write it again.
6061 		 */
6062 		I915_WRITE(PCH_DPLL(pipe), dpll);
6063 	}
6064 
6065 	intel_crtc->lowfreq_avail = false;
6066 	if (!intel_crtc->no_pll) {
6067 		if (is_lvds && has_reduced_clock && i915_powersave) {
6068 			I915_WRITE(PCH_FP1(pipe), fp2);
6069 			intel_crtc->lowfreq_avail = true;
6070 			if (HAS_PIPE_CXSR(dev)) {
6071 				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
6072 				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
6073 			}
6074 		} else {
6075 			I915_WRITE(PCH_FP1(pipe), fp);
6076 			if (HAS_PIPE_CXSR(dev)) {
6077 				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
6078 				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
6079 			}
6080 		}
6081 	}
6082 
6083 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
6084 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
6085 		pipeconf |= PIPECONF_INTERLACED_ILK;
6086 		/* the chip adds 2 halflines automatically */
6087 		adjusted_mode->crtc_vtotal -= 1;
6088 		adjusted_mode->crtc_vblank_end -= 1;
6089 		I915_WRITE(VSYNCSHIFT(pipe),
6090 			   adjusted_mode->crtc_hsync_start
6091 			   - adjusted_mode->crtc_htotal/2);
6092 	} else {
6093 		pipeconf |= PIPECONF_PROGRESSIVE;
6094 		I915_WRITE(VSYNCSHIFT(pipe), 0);
6095 	}
6096 
6097 	I915_WRITE(HTOTAL(pipe),
6098 		   (adjusted_mode->crtc_hdisplay - 1) |
6099 		   ((adjusted_mode->crtc_htotal - 1) << 16));
6100 	I915_WRITE(HBLANK(pipe),
6101 		   (adjusted_mode->crtc_hblank_start - 1) |
6102 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
6103 	I915_WRITE(HSYNC(pipe),
6104 		   (adjusted_mode->crtc_hsync_start - 1) |
6105 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
6106 
6107 	I915_WRITE(VTOTAL(pipe),
6108 		   (adjusted_mode->crtc_vdisplay - 1) |
6109 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
6110 	I915_WRITE(VBLANK(pipe),
6111 		   (adjusted_mode->crtc_vblank_start - 1) |
6112 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
6113 	I915_WRITE(VSYNC(pipe),
6114 		   (adjusted_mode->crtc_vsync_start - 1) |
6115 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
6116 
6117 	/* pipesrc controls the size that is scaled from, which should
6118 	 * always be the user's requested size.
6119 	 */
6120 	I915_WRITE(PIPESRC(pipe),
6121 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
6122 
6123 	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
6124 	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
6125 	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6126 	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6127 
6128 	if (has_edp_encoder &&
6129 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6130 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6131 	}
6132 
6133 	I915_WRITE(PIPECONF(pipe), pipeconf);
6134 	POSTING_READ(PIPECONF(pipe));
6135 
6136 	intel_wait_for_vblank(dev, pipe);
6137 
6138 	I915_WRITE(DSPCNTR(plane), dspcntr);
6139 	POSTING_READ(DSPCNTR(plane));
6140 
6141 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
6142 
6143 	intel_update_watermarks(dev);
6144 
6145 	return ret;
6146 }
6147 
6148 static int intel_crtc_mode_set(struct drm_crtc *crtc,
6149 			       struct drm_display_mode *mode,
6150 			       struct drm_display_mode *adjusted_mode,
6151 			       int x, int y,
6152 			       struct drm_framebuffer *old_fb)
6153 {
6154 	struct drm_device *dev = crtc->dev;
6155 	struct drm_i915_private *dev_priv = dev->dev_private;
6156 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6157 	int pipe = intel_crtc->pipe;
6158 	int ret;
6159 
6160 	drm_vblank_pre_modeset(dev, pipe);
6161 
6162 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
6163 					      x, y, old_fb);
6164 	drm_vblank_post_modeset(dev, pipe);
6165 
6166 	if (ret)
6167 		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
6168 	else
6169 		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
6170 
6171 	return ret;
6172 }
6173 
6174 static bool intel_eld_uptodate(struct drm_connector *connector,
6175 			       int reg_eldv, uint32_t bits_eldv,
6176 			       int reg_elda, uint32_t bits_elda,
6177 			       int reg_edid)
6178 {
6179 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6180 	uint8_t *eld = connector->eld;
6181 	uint32_t i;
6182 
6183 	i = I915_READ(reg_eldv);
6184 	i &= bits_eldv;
6185 
6186 	if (!eld[0])
6187 		return !i;
6188 
6189 	if (!i)
6190 		return false;
6191 
6192 	i = I915_READ(reg_elda);
6193 	i &= ~bits_elda;
6194 	I915_WRITE(reg_elda, i);
6195 
6196 	for (i = 0; i < eld[2]; i++)
6197 		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6198 			return false;
6199 
6200 	return true;
6201 }
6202 
6203 static void g4x_write_eld(struct drm_connector *connector,
6204 			  struct drm_crtc *crtc)
6205 {
6206 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6207 	uint8_t *eld = connector->eld;
6208 	uint32_t eldv;
6209 	uint32_t len;
6210 	uint32_t i;
6211 
6212 	i = I915_READ(G4X_AUD_VID_DID);
6213 
6214 	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6215 		eldv = G4X_ELDV_DEVCL_DEVBLC;
6216 	else
6217 		eldv = G4X_ELDV_DEVCTG;
6218 
6219 	if (intel_eld_uptodate(connector,
6220 			       G4X_AUD_CNTL_ST, eldv,
6221 			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6222 			       G4X_HDMIW_HDMIEDID))
6223 		return;
6224 
6225 	i = I915_READ(G4X_AUD_CNTL_ST);
6226 	i &= ~(eldv | G4X_ELD_ADDR);
6227 	len = (i >> 9) & 0x1f;		/* ELD buffer size */
6228 	I915_WRITE(G4X_AUD_CNTL_ST, i);
6229 
6230 	if (!eld[0])
6231 		return;
6232 
6233 	if (eld[2] < (uint8_t)len)
6234 		len = eld[2];
6235 	DRM_DEBUG_KMS("ELD size %d\n", len);
6236 	for (i = 0; i < len; i++)
6237 		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6238 
6239 	i = I915_READ(G4X_AUD_CNTL_ST);
6240 	i |= eldv;
6241 	I915_WRITE(G4X_AUD_CNTL_ST, i);
6242 }
6243 
6244 static void ironlake_write_eld(struct drm_connector *connector,
6245 				     struct drm_crtc *crtc)
6246 {
6247 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
6248 	uint8_t *eld = connector->eld;
6249 	uint32_t eldv;
6250 	uint32_t i;
6251 	int len;
6252 	int hdmiw_hdmiedid;
6253 	int aud_config;
6254 	int aud_cntl_st;
6255 	int aud_cntrl_st2;
6256 
6257 	if (HAS_PCH_IBX(connector->dev)) {
6258 		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
6259 		aud_config = IBX_AUD_CONFIG_A;
6260 		aud_cntl_st = IBX_AUD_CNTL_ST_A;
6261 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
6262 	} else {
6263 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
6264 		aud_config = CPT_AUD_CONFIG_A;
6265 		aud_cntl_st = CPT_AUD_CNTL_ST_A;
6266 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
6267 	}
6268 
6269 	i = to_intel_crtc(crtc)->pipe;
6270 	hdmiw_hdmiedid += i * 0x100;
6271 	aud_cntl_st += i * 0x100;
6272 	aud_config += i * 0x100;
6273 
6274 	DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
6275 
6276 	i = I915_READ(aud_cntl_st);
6277 	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
6278 	if (!i) {
6279 		DRM_DEBUG_KMS("Audio directed to unknown port\n");
6280 		/* operate blindly on all ports */
6281 		eldv = IBX_ELD_VALIDB;
6282 		eldv |= IBX_ELD_VALIDB << 4;
6283 		eldv |= IBX_ELD_VALIDB << 8;
6284 	} else {
6285 		DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
6286 		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
6287 	}
6288 
6289 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6290 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6291 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
6292 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6293 	} else
6294 		I915_WRITE(aud_config, 0);
6295 
6296 	if (intel_eld_uptodate(connector,
6297 			       aud_cntrl_st2, eldv,
6298 			       aud_cntl_st, IBX_ELD_ADDRESS,
6299 			       hdmiw_hdmiedid))
6300 		return;
6301 
6302 	i = I915_READ(aud_cntrl_st2);
6303 	i &= ~eldv;
6304 	I915_WRITE(aud_cntrl_st2, i);
6305 
6306 	if (!eld[0])
6307 		return;
6308 
6309 	i = I915_READ(aud_cntl_st);
6310 	i &= ~IBX_ELD_ADDRESS;
6311 	I915_WRITE(aud_cntl_st, i);
6312 
6313 	/* 84 bytes of hw ELD buffer */
6314 	len = 21;
6315 	if (eld[2] < (uint8_t)len)
6316 		len = eld[2];
6317 	DRM_DEBUG_KMS("ELD size %d\n", len);
6318 	for (i = 0; i < len; i++)
6319 		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6320 
6321 	i = I915_READ(aud_cntrl_st2);
6322 	i |= eldv;
6323 	I915_WRITE(aud_cntrl_st2, i);
6324 }
6325 
6326 void intel_write_eld(struct drm_encoder *encoder,
6327 		     struct drm_display_mode *mode)
6328 {
6329 	struct drm_crtc *crtc = encoder->crtc;
6330 	struct drm_connector *connector;
6331 	struct drm_device *dev = encoder->dev;
6332 	struct drm_i915_private *dev_priv = dev->dev_private;
6333 
6334 	connector = drm_select_eld(encoder, mode);
6335 	if (!connector)
6336 		return;
6337 
6338 	DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6339 			 connector->base.id,
6340 			 drm_get_connector_name(connector),
6341 			 connector->encoder->base.id,
6342 			 drm_get_encoder_name(connector->encoder));
6343 
6344 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6345 
6346 	if (dev_priv->display.write_eld)
6347 		dev_priv->display.write_eld(connector, crtc);
6348 }
6349 
6350 /** Loads the palette/gamma unit for the CRTC with the prepared values */
6351 void intel_crtc_load_lut(struct drm_crtc *crtc)
6352 {
6353 	struct drm_device *dev = crtc->dev;
6354 	struct drm_i915_private *dev_priv = dev->dev_private;
6355 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6356 	int palreg = PALETTE(intel_crtc->pipe);
6357 	int i;
6358 
6359 	/* The clocks have to be on to load the palette. */
6360 	if (!crtc->enabled || !intel_crtc->active)
6361 		return;
6362 
6363 	/* use legacy palette for Ironlake */
6364 	if (HAS_PCH_SPLIT(dev))
6365 		palreg = LGC_PALETTE(intel_crtc->pipe);
6366 
6367 	for (i = 0; i < 256; i++) {
6368 		I915_WRITE(palreg + 4 * i,
6369 			   (intel_crtc->lut_r[i] << 16) |
6370 			   (intel_crtc->lut_g[i] << 8) |
6371 			   intel_crtc->lut_b[i]);
6372 	}
6373 }
6374 
6375 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6376 {
6377 	struct drm_device *dev = crtc->dev;
6378 	struct drm_i915_private *dev_priv = dev->dev_private;
6379 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6380 	bool visible = base != 0;
6381 	u32 cntl;
6382 
6383 	if (intel_crtc->cursor_visible == visible)
6384 		return;
6385 
6386 	cntl = I915_READ(_CURACNTR);
6387 	if (visible) {
6388 		/* On these chipsets we can only modify the base whilst
6389 		 * the cursor is disabled.
6390 		 */
6391 		I915_WRITE(_CURABASE, base);
6392 
6393 		cntl &= ~(CURSOR_FORMAT_MASK);
6394 		/* XXX width must be 64, stride 256 => 0x00 << 28 */
6395 		cntl |= CURSOR_ENABLE |
6396 			CURSOR_GAMMA_ENABLE |
6397 			CURSOR_FORMAT_ARGB;
6398 	} else
6399 		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
6400 	I915_WRITE(_CURACNTR, cntl);
6401 
6402 	intel_crtc->cursor_visible = visible;
6403 }
6404 
6405 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6406 {
6407 	struct drm_device *dev = crtc->dev;
6408 	struct drm_i915_private *dev_priv = dev->dev_private;
6409 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6410 	int pipe = intel_crtc->pipe;
6411 	bool visible = base != 0;
6412 
6413 	if (intel_crtc->cursor_visible != visible) {
6414 		uint32_t cntl = I915_READ(CURCNTR(pipe));
6415 		if (base) {
6416 			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6417 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6418 			cntl |= pipe << 28; /* Connect to correct pipe */
6419 		} else {
6420 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6421 			cntl |= CURSOR_MODE_DISABLE;
6422 		}
6423 		I915_WRITE(CURCNTR(pipe), cntl);
6424 
6425 		intel_crtc->cursor_visible = visible;
6426 	}
6427 	/* and commit changes on next vblank */
6428 	I915_WRITE(CURBASE(pipe), base);
6429 }
6430 
6431 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6432 {
6433 	struct drm_device *dev = crtc->dev;
6434 	struct drm_i915_private *dev_priv = dev->dev_private;
6435 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6436 	int pipe = intel_crtc->pipe;
6437 	bool visible = base != 0;
6438 
6439 	if (intel_crtc->cursor_visible != visible) {
6440 		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
6441 		if (base) {
6442 			cntl &= ~CURSOR_MODE;
6443 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6444 		} else {
6445 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6446 			cntl |= CURSOR_MODE_DISABLE;
6447 		}
6448 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
6449 
6450 		intel_crtc->cursor_visible = visible;
6451 	}
6452 	/* and commit changes on next vblank */
6453 	I915_WRITE(CURBASE_IVB(pipe), base);
6454 }
6455 
6456 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6457 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6458 				     bool on)
6459 {
6460 	struct drm_device *dev = crtc->dev;
6461 	struct drm_i915_private *dev_priv = dev->dev_private;
6462 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6463 	int pipe = intel_crtc->pipe;
6464 	int x = intel_crtc->cursor_x;
6465 	int y = intel_crtc->cursor_y;
6466 	u32 base, pos;
6467 	bool visible;
6468 
6469 	pos = 0;
6470 
6471 	if (on && crtc->enabled && crtc->fb) {
6472 		base = intel_crtc->cursor_addr;
6473 		if (x > (int) crtc->fb->width)
6474 			base = 0;
6475 
6476 		if (y > (int) crtc->fb->height)
6477 			base = 0;
6478 	} else
6479 		base = 0;
6480 
6481 	if (x < 0) {
6482 		if (x + intel_crtc->cursor_width < 0)
6483 			base = 0;
6484 
6485 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
6486 		x = -x;
6487 	}
6488 	pos |= x << CURSOR_X_SHIFT;
6489 
6490 	if (y < 0) {
6491 		if (y + intel_crtc->cursor_height < 0)
6492 			base = 0;
6493 
6494 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
6495 		y = -y;
6496 	}
6497 	pos |= y << CURSOR_Y_SHIFT;
6498 
6499 	visible = base != 0;
6500 	if (!visible && !intel_crtc->cursor_visible)
6501 		return;
6502 
6503 	if (IS_IVYBRIDGE(dev)) {
6504 		I915_WRITE(CURPOS_IVB(pipe), pos);
6505 		ivb_update_cursor(crtc, base);
6506 	} else {
6507 		I915_WRITE(CURPOS(pipe), pos);
6508 		if (IS_845G(dev) || IS_I865G(dev))
6509 			i845_update_cursor(crtc, base);
6510 		else
6511 			i9xx_update_cursor(crtc, base);
6512 	}
6513 
6514 	if (visible)
6515 		intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
6516 }
6517 
6518 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
6519 				 struct drm_file *file,
6520 				 uint32_t handle,
6521 				 uint32_t width, uint32_t height)
6522 {
6523 	struct drm_device *dev = crtc->dev;
6524 	struct drm_i915_private *dev_priv = dev->dev_private;
6525 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6526 	struct drm_i915_gem_object *obj;
6527 	uint32_t addr;
6528 	int ret;
6529 
6530 	DRM_DEBUG_KMS("\n");
6531 
6532 	/* if we want to turn off the cursor ignore width and height */
6533 	if (!handle) {
6534 		DRM_DEBUG_KMS("cursor off\n");
6535 		addr = 0;
6536 		obj = NULL;
6537 		DRM_LOCK(dev);
6538 		goto finish;
6539 	}
6540 
6541 	/* Currently we only support 64x64 cursors */
6542 	if (width != 64 || height != 64) {
6543 		DRM_ERROR("we currently only support 64x64 cursors\n");
6544 		return -EINVAL;
6545 	}
6546 
6547 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
6548 	if (&obj->base == NULL)
6549 		return -ENOENT;
6550 
6551 	if (obj->base.size < width * height * 4) {
6552 		DRM_ERROR("buffer is to small\n");
6553 		ret = -ENOMEM;
6554 		goto fail;
6555 	}
6556 
6557 	/* we only need to pin inside GTT if cursor is non-phy */
6558 	DRM_LOCK(dev);
6559 	if (!dev_priv->info->cursor_needs_physical) {
6560 		if (obj->tiling_mode) {
6561 			DRM_ERROR("cursor cannot be tiled\n");
6562 			ret = -EINVAL;
6563 			goto fail_locked;
6564 		}
6565 
6566 		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
6567 		if (ret) {
6568 			DRM_ERROR("failed to move cursor bo into the GTT\n");
6569 			goto fail_locked;
6570 		}
6571 
6572 		ret = i915_gem_object_put_fence(obj);
6573 		if (ret) {
6574 			DRM_ERROR("failed to release fence for cursor\n");
6575 			goto fail_unpin;
6576 		}
6577 
6578 		addr = obj->gtt_offset;
6579 	} else {
6580 		int align = IS_I830(dev) ? 16 * 1024 : 256;
6581 		ret = i915_gem_attach_phys_object(dev, obj,
6582 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6583 						  align);
6584 		if (ret) {
6585 			DRM_ERROR("failed to attach phys object\n");
6586 			goto fail_locked;
6587 		}
6588 		addr = obj->phys_obj->handle->busaddr;
6589 	}
6590 
6591 	if (IS_GEN2(dev))
6592 		I915_WRITE(CURSIZE, (height << 12) | width);
6593 
6594  finish:
6595 	if (intel_crtc->cursor_bo) {
6596 		if (dev_priv->info->cursor_needs_physical) {
6597 			if (intel_crtc->cursor_bo != obj)
6598 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6599 		} else
6600 			i915_gem_object_unpin(intel_crtc->cursor_bo);
6601 		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
6602 	}
6603 
6604 	DRM_UNLOCK(dev);
6605 
6606 	intel_crtc->cursor_addr = addr;
6607 	intel_crtc->cursor_bo = obj;
6608 	intel_crtc->cursor_width = width;
6609 	intel_crtc->cursor_height = height;
6610 
6611 	intel_crtc_update_cursor(crtc, true);
6612 
6613 	return 0;
6614 fail_unpin:
6615 	i915_gem_object_unpin(obj);
6616 fail_locked:
6617 	DRM_UNLOCK(dev);
6618 fail:
6619 	drm_gem_object_unreference_unlocked(&obj->base);
6620 	return ret;
6621 }
6622 
6623 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6624 {
6625 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6626 
6627 	intel_crtc->cursor_x = x;
6628 	intel_crtc->cursor_y = y;
6629 
6630 	intel_crtc_update_cursor(crtc, true);
6631 
6632 	return 0;
6633 }
6634 
6635 /** Sets the color ramps on behalf of RandR */
6636 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6637 				 u16 blue, int regno)
6638 {
6639 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6640 
6641 	intel_crtc->lut_r[regno] = red >> 8;
6642 	intel_crtc->lut_g[regno] = green >> 8;
6643 	intel_crtc->lut_b[regno] = blue >> 8;
6644 }
6645 
6646 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6647 			     u16 *blue, int regno)
6648 {
6649 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6650 
6651 	*red = intel_crtc->lut_r[regno] << 8;
6652 	*green = intel_crtc->lut_g[regno] << 8;
6653 	*blue = intel_crtc->lut_b[regno] << 8;
6654 }
6655 
6656 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
6657 				 u16 *blue, uint32_t start, uint32_t size)
6658 {
6659 	int end = (start + size > 256) ? 256 : start + size, i;
6660 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6661 
6662 	for (i = start; i < end; i++) {
6663 		intel_crtc->lut_r[i] = red[i] >> 8;
6664 		intel_crtc->lut_g[i] = green[i] >> 8;
6665 		intel_crtc->lut_b[i] = blue[i] >> 8;
6666 	}
6667 
6668 	intel_crtc_load_lut(crtc);
6669 }
6670 
6671 /**
6672  * Get a pipe with a simple mode set on it for doing load-based monitor
6673  * detection.
6674  *
6675  * It will be up to the load-detect code to adjust the pipe as appropriate for
6676  * its requirements.  The pipe will be connected to no other encoders.
6677  *
6678  * Currently this code will only succeed if there is a pipe with no encoders
6679  * configured for it.  In the future, it could choose to temporarily disable
6680  * some outputs to free up a pipe for its use.
6681  *
6682  * \return crtc, or NULL if no pipes are available.
6683  */
6684 
6685 /* VESA 640x480x72Hz mode to set on the pipe */
6686 static struct drm_display_mode load_detect_mode = {
6687 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6688 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6689 };
6690 
6691 static int
6692 intel_framebuffer_create(struct drm_device *dev,
6693     struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj,
6694      struct drm_framebuffer **res)
6695 {
6696 	struct intel_framebuffer *intel_fb;
6697 	int ret;
6698 
6699 	intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
6700 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6701 	if (ret) {
6702 		drm_gem_object_unreference_unlocked(&obj->base);
6703 		drm_free(intel_fb, DRM_MEM_KMS);
6704 		return (ret);
6705 	}
6706 
6707 	*res = &intel_fb->base;
6708 	return (0);
6709 }
6710 
6711 static u32
6712 intel_framebuffer_pitch_for_width(int width, int bpp)
6713 {
6714 	u32 pitch = howmany(width * bpp, 8);
6715 	return roundup2(pitch, 64);
6716 }
6717 
6718 static u32
6719 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6720 {
6721 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6722 	return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
6723 }
6724 
6725 static int
6726 intel_framebuffer_create_for_mode(struct drm_device *dev,
6727     struct drm_display_mode *mode, int depth, int bpp,
6728     struct drm_framebuffer **res)
6729 {
6730 	struct drm_i915_gem_object *obj;
6731 	struct drm_mode_fb_cmd2 mode_cmd;
6732 
6733 	obj = i915_gem_alloc_object(dev,
6734 				    intel_framebuffer_size_for_mode(mode, bpp));
6735 	if (obj == NULL)
6736 		return (-ENOMEM);
6737 
6738 	mode_cmd.width = mode->hdisplay;
6739 	mode_cmd.height = mode->vdisplay;
6740 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6741 								bpp);
6742 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
6743 
6744 	return (intel_framebuffer_create(dev, &mode_cmd, obj, res));
6745 }
6746 
6747 static int
6748 mode_fits_in_fbdev(struct drm_device *dev,
6749     struct drm_display_mode *mode, struct drm_framebuffer **res)
6750 {
6751 	struct drm_i915_private *dev_priv = dev->dev_private;
6752 	struct drm_i915_gem_object *obj;
6753 	struct drm_framebuffer *fb;
6754 
6755 	if (dev_priv->fbdev == NULL) {
6756 		*res = NULL;
6757 		return (0);
6758 	}
6759 
6760 	obj = dev_priv->fbdev->ifb.obj;
6761 	if (obj == NULL) {
6762 		*res = NULL;
6763 		return (0);
6764 	}
6765 
6766 	fb = &dev_priv->fbdev->ifb.base;
6767 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6768 	    fb->bits_per_pixel)) {
6769 		*res = NULL;
6770 		return (0);
6771 	}
6772 
6773 	if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
6774 		*res = NULL;
6775 		return (0);
6776 	}
6777 
6778 	*res = fb;
6779 	return (0);
6780 }
6781 
6782 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
6783 				struct drm_connector *connector,
6784 				struct drm_display_mode *mode,
6785 				struct intel_load_detect_pipe *old)
6786 {
6787 	struct intel_crtc *intel_crtc;
6788 	struct drm_crtc *possible_crtc;
6789 	struct drm_encoder *encoder = &intel_encoder->base;
6790 	struct drm_crtc *crtc = NULL;
6791 	struct drm_device *dev = encoder->dev;
6792 	struct drm_framebuffer *old_fb;
6793 	int i = -1, r;
6794 
6795 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6796 		      connector->base.id, drm_get_connector_name(connector),
6797 		      encoder->base.id, drm_get_encoder_name(encoder));
6798 
6799 	/*
6800 	 * Algorithm gets a little messy:
6801 	 *
6802 	 *   - if the connector already has an assigned crtc, use it (but make
6803 	 *     sure it's on first)
6804 	 *
6805 	 *   - try to find the first unused crtc that can drive this connector,
6806 	 *     and use that if we find one
6807 	 */
6808 
6809 	/* See if we already have a CRTC for this connector */
6810 	if (encoder->crtc) {
6811 		crtc = encoder->crtc;
6812 
6813 		intel_crtc = to_intel_crtc(crtc);
6814 		old->dpms_mode = intel_crtc->dpms_mode;
6815 		old->load_detect_temp = false;
6816 
6817 		/* Make sure the crtc and connector are running */
6818 		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6819 			struct drm_encoder_helper_funcs *encoder_funcs;
6820 			struct drm_crtc_helper_funcs *crtc_funcs;
6821 
6822 			crtc_funcs = crtc->helper_private;
6823 			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6824 
6825 			encoder_funcs = encoder->helper_private;
6826 			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
6827 		}
6828 
6829 		return true;
6830 	}
6831 
6832 	/* Find an unused one (if possible) */
6833 	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6834 		i++;
6835 		if (!(encoder->possible_crtcs & (1 << i)))
6836 			continue;
6837 		if (!possible_crtc->enabled) {
6838 			crtc = possible_crtc;
6839 			break;
6840 		}
6841 	}
6842 
6843 	/*
6844 	 * If we didn't find an unused CRTC, don't use any.
6845 	 */
6846 	if (!crtc) {
6847 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
6848 		return false;
6849 	}
6850 
6851 	encoder->crtc = crtc;
6852 	connector->encoder = encoder;
6853 
6854 	intel_crtc = to_intel_crtc(crtc);
6855 	old->dpms_mode = intel_crtc->dpms_mode;
6856 	old->load_detect_temp = true;
6857 	old->release_fb = NULL;
6858 
6859 	if (!mode)
6860 		mode = &load_detect_mode;
6861 
6862 	old_fb = crtc->fb;
6863 
6864 	/* We need a framebuffer large enough to accommodate all accesses
6865 	 * that the plane may generate whilst we perform load detection.
6866 	 * We can not rely on the fbcon either being present (we get called
6867 	 * during its initialisation to detect all boot displays, or it may
6868 	 * not even exist) or that it is large enough to satisfy the
6869 	 * requested mode.
6870 	 */
6871 	r = mode_fits_in_fbdev(dev, mode, &crtc->fb);
6872 	if (crtc->fb == NULL) {
6873 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
6874 		r = intel_framebuffer_create_for_mode(dev, mode, 24, 32,
6875 		    &crtc->fb);
6876 		old->release_fb = crtc->fb;
6877 	} else
6878 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6879 	if (r != 0) {
6880 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6881 		crtc->fb = old_fb;
6882 		return false;
6883 	}
6884 
6885 	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6886 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6887 		if (old->release_fb)
6888 			old->release_fb->funcs->destroy(old->release_fb);
6889 		crtc->fb = old_fb;
6890 		return false;
6891 	}
6892 
6893 	/* let the connector get through one full cycle before testing */
6894 	intel_wait_for_vblank(dev, intel_crtc->pipe);
6895 
6896 	return true;
6897 }
6898 
6899 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
6900 				    struct drm_connector *connector,
6901 				    struct intel_load_detect_pipe *old)
6902 {
6903 	struct drm_encoder *encoder = &intel_encoder->base;
6904 	struct drm_device *dev = encoder->dev;
6905 	struct drm_crtc *crtc = encoder->crtc;
6906 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
6907 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
6908 
6909 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6910 		      connector->base.id, drm_get_connector_name(connector),
6911 		      encoder->base.id, drm_get_encoder_name(encoder));
6912 
6913 	if (old->load_detect_temp) {
6914 		connector->encoder = NULL;
6915 		drm_helper_disable_unused_functions(dev);
6916 
6917 		if (old->release_fb)
6918 			old->release_fb->funcs->destroy(old->release_fb);
6919 
6920 		return;
6921 	}
6922 
6923 	/* Switch crtc and encoder back off if necessary */
6924 	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
6925 		encoder_funcs->dpms(encoder, old->dpms_mode);
6926 		crtc_funcs->dpms(crtc, old->dpms_mode);
6927 	}
6928 }
6929 
6930 /* Returns the clock of the currently programmed mode of the given pipe. */
6931 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6932 {
6933 	struct drm_i915_private *dev_priv = dev->dev_private;
6934 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6935 	int pipe = intel_crtc->pipe;
6936 	u32 dpll = I915_READ(DPLL(pipe));
6937 	u32 fp;
6938 	intel_clock_t clock;
6939 
6940 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6941 		fp = I915_READ(FP0(pipe));
6942 	else
6943 		fp = I915_READ(FP1(pipe));
6944 
6945 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6946 	if (IS_PINEVIEW(dev)) {
6947 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6948 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6949 	} else {
6950 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6951 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6952 	}
6953 
6954 	if (!IS_GEN2(dev)) {
6955 		if (IS_PINEVIEW(dev))
6956 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6957 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6958 		else
6959 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6960 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6961 
6962 		switch (dpll & DPLL_MODE_MASK) {
6963 		case DPLLB_MODE_DAC_SERIAL:
6964 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6965 				5 : 10;
6966 			break;
6967 		case DPLLB_MODE_LVDS:
6968 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6969 				7 : 14;
6970 			break;
6971 		default:
6972 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
6973 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
6974 			return 0;
6975 		}
6976 
6977 		/* XXX: Handle the 100Mhz refclk */
6978 		intel_clock(dev, 96000, &clock);
6979 	} else {
6980 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6981 
6982 		if (is_lvds) {
6983 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6984 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6985 			clock.p2 = 14;
6986 
6987 			if ((dpll & PLL_REF_INPUT_MASK) ==
6988 			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6989 				/* XXX: might not be 66MHz */
6990 				intel_clock(dev, 66000, &clock);
6991 			} else
6992 				intel_clock(dev, 48000, &clock);
6993 		} else {
6994 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6995 				clock.p1 = 2;
6996 			else {
6997 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6998 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6999 			}
7000 			if (dpll & PLL_P2_DIVIDE_BY_4)
7001 				clock.p2 = 4;
7002 			else
7003 				clock.p2 = 2;
7004 
7005 			intel_clock(dev, 48000, &clock);
7006 		}
7007 	}
7008 
7009 	/* XXX: It would be nice to validate the clocks, but we can't reuse
7010 	 * i830PllIsValid() because it relies on the xf86_config connector
7011 	 * configuration being accurate, which it isn't necessarily.
7012 	 */
7013 
7014 	return clock.dot;
7015 }
7016 
7017 /** Returns the currently programmed mode of the given pipe. */
7018 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7019 					     struct drm_crtc *crtc)
7020 {
7021 	struct drm_i915_private *dev_priv = dev->dev_private;
7022 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7023 	int pipe = intel_crtc->pipe;
7024 	struct drm_display_mode *mode;
7025 	int htot = I915_READ(HTOTAL(pipe));
7026 	int hsync = I915_READ(HSYNC(pipe));
7027 	int vtot = I915_READ(VTOTAL(pipe));
7028 	int vsync = I915_READ(VSYNC(pipe));
7029 
7030 	mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
7031 
7032 	mode->clock = intel_crtc_clock_get(dev, crtc);
7033 	mode->hdisplay = (htot & 0xffff) + 1;
7034 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7035 	mode->hsync_start = (hsync & 0xffff) + 1;
7036 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7037 	mode->vdisplay = (vtot & 0xffff) + 1;
7038 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7039 	mode->vsync_start = (vsync & 0xffff) + 1;
7040 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7041 
7042 	drm_mode_set_name(mode);
7043 	drm_mode_set_crtcinfo(mode, 0);
7044 
7045 	return mode;
7046 }
7047 
7048 #define GPU_IDLE_TIMEOUT (500 /* ms */ * 1000 / hz)
7049 
7050 /* When this timer fires, we've been idle for awhile */
7051 static void intel_gpu_idle_timer(void *arg)
7052 {
7053 	struct drm_device *dev = arg;
7054 	drm_i915_private_t *dev_priv = dev->dev_private;
7055 
7056 	if (!list_empty(&dev_priv->mm.active_list)) {
7057 		/* Still processing requests, so just re-arm the timer. */
7058 		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
7059 		    i915_hangcheck_elapsed, dev);
7060 		return;
7061 	}
7062 
7063 	dev_priv->busy = false;
7064 	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
7065 }
7066 
7067 #define CRTC_IDLE_TIMEOUT (1000 /* ms */ * 1000 / hz)
7068 
7069 static void intel_crtc_idle_timer(void *arg)
7070 {
7071 	struct intel_crtc *intel_crtc = arg;
7072 	struct drm_crtc *crtc = &intel_crtc->base;
7073 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
7074 	struct intel_framebuffer *intel_fb;
7075 
7076 	intel_fb = to_intel_framebuffer(crtc->fb);
7077 	if (intel_fb && intel_fb->obj->active) {
7078 		/* The framebuffer is still being accessed by the GPU. */
7079 		callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
7080 		    i915_hangcheck_elapsed, crtc->dev);
7081 		return;
7082 	}
7083 
7084 	intel_crtc->busy = false;
7085 	taskqueue_enqueue(dev_priv->tq, &dev_priv->idle_task);
7086 }
7087 
7088 static void intel_increase_pllclock(struct drm_crtc *crtc)
7089 {
7090 	struct drm_device *dev = crtc->dev;
7091 	drm_i915_private_t *dev_priv = dev->dev_private;
7092 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7093 	int pipe = intel_crtc->pipe;
7094 	int dpll_reg = DPLL(pipe);
7095 	int dpll;
7096 
7097 	if (HAS_PCH_SPLIT(dev))
7098 		return;
7099 
7100 	if (!dev_priv->lvds_downclock_avail)
7101 		return;
7102 
7103 	dpll = I915_READ(dpll_reg);
7104 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
7105 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
7106 
7107 		assert_panel_unlocked(dev_priv, pipe);
7108 
7109 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7110 		I915_WRITE(dpll_reg, dpll);
7111 		intel_wait_for_vblank(dev, pipe);
7112 
7113 		dpll = I915_READ(dpll_reg);
7114 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
7115 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
7116 	}
7117 
7118 	/* Schedule downclock */
7119 	callout_reset(&intel_crtc->idle_callout, CRTC_IDLE_TIMEOUT,
7120 	    intel_crtc_idle_timer, intel_crtc);
7121 }
7122 
7123 static void intel_decrease_pllclock(struct drm_crtc *crtc)
7124 {
7125 	struct drm_device *dev = crtc->dev;
7126 	drm_i915_private_t *dev_priv = dev->dev_private;
7127 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7128 
7129 	if (HAS_PCH_SPLIT(dev))
7130 		return;
7131 
7132 	if (!dev_priv->lvds_downclock_avail)
7133 		return;
7134 
7135 	/*
7136 	 * Since this is called by a timer, we should never get here in
7137 	 * the manual case.
7138 	 */
7139 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
7140 		int pipe = intel_crtc->pipe;
7141 		int dpll_reg = DPLL(pipe);
7142 		u32 dpll;
7143 
7144 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
7145 
7146 		assert_panel_unlocked(dev_priv, pipe);
7147 
7148 		dpll = I915_READ(dpll_reg);
7149 		dpll |= DISPLAY_RATE_SELECT_FPA1;
7150 		I915_WRITE(dpll_reg, dpll);
7151 		intel_wait_for_vblank(dev, pipe);
7152 		dpll = I915_READ(dpll_reg);
7153 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
7154 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
7155 	}
7156 }
7157 
7158 /**
7159  * intel_idle_update - adjust clocks for idleness
7160  * @work: work struct
7161  *
7162  * Either the GPU or display (or both) went idle.  Check the busy status
7163  * here and adjust the CRTC and GPU clocks as necessary.
7164  */
7165 static void intel_idle_update(void *arg, int pending)
7166 {
7167 	drm_i915_private_t *dev_priv = arg;
7168 	struct drm_device *dev = dev_priv->dev;
7169 	struct drm_crtc *crtc;
7170 	struct intel_crtc *intel_crtc;
7171 
7172 	if (!i915_powersave)
7173 		return;
7174 
7175 	DRM_LOCK(dev);
7176 
7177 	i915_update_gfx_val(dev_priv);
7178 
7179 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7180 		/* Skip inactive CRTCs */
7181 		if (!crtc->fb)
7182 			continue;
7183 
7184 		intel_crtc = to_intel_crtc(crtc);
7185 		if (!intel_crtc->busy)
7186 			intel_decrease_pllclock(crtc);
7187 	}
7188 
7189 	DRM_UNLOCK(dev);
7190 }
7191 
7192 /**
7193  * intel_mark_busy - mark the GPU and possibly the display busy
7194  * @dev: drm device
7195  * @obj: object we're operating on
7196  *
7197  * Callers can use this function to indicate that the GPU is busy processing
7198  * commands.  If @obj matches one of the CRTC objects (i.e. it's a scanout
7199  * buffer), we'll also mark the display as busy, so we know to increase its
7200  * clock frequency.
7201  */
7202 void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
7203 {
7204 	drm_i915_private_t *dev_priv = dev->dev_private;
7205 	struct drm_crtc *crtc = NULL;
7206 	struct intel_framebuffer *intel_fb;
7207 	struct intel_crtc *intel_crtc;
7208 
7209 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
7210 		return;
7211 
7212 	if (!dev_priv->busy)
7213 		dev_priv->busy = true;
7214 	else
7215 		callout_reset(&dev_priv->idle_callout, GPU_IDLE_TIMEOUT,
7216 		    intel_gpu_idle_timer, dev);
7217 
7218 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7219 		if (!crtc->fb)
7220 			continue;
7221 
7222 		intel_crtc = to_intel_crtc(crtc);
7223 		intel_fb = to_intel_framebuffer(crtc->fb);
7224 		if (intel_fb->obj == obj) {
7225 			if (!intel_crtc->busy) {
7226 				/* Non-busy -> busy, upclock */
7227 				intel_increase_pllclock(crtc);
7228 				intel_crtc->busy = true;
7229 			} else {
7230 				/* Busy -> busy, put off timer */
7231 				callout_reset(&intel_crtc->idle_callout,
7232 				    CRTC_IDLE_TIMEOUT, intel_crtc_idle_timer,
7233 				    intel_crtc);
7234 			}
7235 		}
7236 	}
7237 }
7238 
7239 static void intel_crtc_destroy(struct drm_crtc *crtc)
7240 {
7241 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7242 	struct drm_device *dev = crtc->dev;
7243 	struct drm_i915_private *dev_priv = dev->dev_private;
7244 	struct intel_unpin_work *work;
7245 
7246 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7247 	work = intel_crtc->unpin_work;
7248 	intel_crtc->unpin_work = NULL;
7249 	lockmgr(&dev->event_lock, LK_RELEASE);
7250 
7251 	if (work) {
7252 		taskqueue_cancel(dev_priv->tq, &work->task, NULL);
7253 		taskqueue_drain(dev_priv->tq, &work->task);
7254 		drm_free(work, DRM_MEM_KMS);
7255 	}
7256 
7257 	drm_crtc_cleanup(crtc);
7258 
7259 	drm_free(intel_crtc, DRM_MEM_KMS);
7260 }
7261 
7262 static void intel_unpin_work_fn(void *arg, int pending)
7263 {
7264 	struct intel_unpin_work *work = arg;
7265 	struct drm_device *dev;
7266 
7267 	dev = work->dev;
7268 	DRM_LOCK(dev);
7269 	intel_unpin_fb_obj(work->old_fb_obj);
7270 	drm_gem_object_unreference(&work->pending_flip_obj->base);
7271 	drm_gem_object_unreference(&work->old_fb_obj->base);
7272 
7273 	intel_update_fbc(work->dev);
7274 	DRM_UNLOCK(dev);
7275 	drm_free(work, DRM_MEM_KMS);
7276 }
7277 
7278 static void do_intel_finish_page_flip(struct drm_device *dev,
7279 				      struct drm_crtc *crtc)
7280 {
7281 	drm_i915_private_t *dev_priv = dev->dev_private;
7282 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7283 	struct intel_unpin_work *work;
7284 	struct drm_i915_gem_object *obj;
7285 	struct drm_pending_vblank_event *e;
7286 	struct timeval tnow, tvbl;
7287 
7288 	/* Ignore early vblank irqs */
7289 	if (intel_crtc == NULL)
7290 		return;
7291 
7292 	microtime(&tnow);
7293 
7294 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7295 	work = intel_crtc->unpin_work;
7296 	if (work == NULL || !work->pending) {
7297 		lockmgr(&dev->event_lock, LK_RELEASE);
7298 		return;
7299 	}
7300 
7301 	intel_crtc->unpin_work = NULL;
7302 
7303 	if (work->event) {
7304 		e = work->event;
7305 		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
7306 
7307 		/* Called before vblank count and timestamps have
7308 		 * been updated for the vblank interval of flip
7309 		 * completion? Need to increment vblank count and
7310 		 * add one videorefresh duration to returned timestamp
7311 		 * to account for this. We assume this happened if we
7312 		 * get called over 0.9 frame durations after the last
7313 		 * timestamped vblank.
7314 		 *
7315 		 * This calculation can not be used with vrefresh rates
7316 		 * below 5Hz (10Hz to be on the safe side) without
7317 		 * promoting to 64 integers.
7318 		 */
7319 		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
7320 		    9 * crtc->framedur_ns) {
7321 			e->event.sequence++;
7322 			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
7323 					     crtc->framedur_ns);
7324 		}
7325 
7326 		e->event.tv_sec = tvbl.tv_sec;
7327 		e->event.tv_usec = tvbl.tv_usec;
7328 
7329 		list_add_tail(&e->base.link,
7330 			      &e->base.file_priv->event_list);
7331 		drm_event_wakeup(&e->base);
7332 	}
7333 
7334 	drm_vblank_put(dev, intel_crtc->pipe);
7335 
7336 	obj = work->old_fb_obj;
7337 
7338 	atomic_clear_int(&obj->pending_flip, 1 << intel_crtc->plane);
7339 	if (atomic_load_acq_int(&obj->pending_flip) == 0)
7340 		wakeup(&obj->pending_flip);
7341 	lockmgr(&dev->event_lock, LK_RELEASE);
7342 
7343 	taskqueue_enqueue(dev_priv->tq, &work->task);
7344 }
7345 
7346 void intel_finish_page_flip(struct drm_device *dev, int pipe)
7347 {
7348 	drm_i915_private_t *dev_priv = dev->dev_private;
7349 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7350 
7351 	do_intel_finish_page_flip(dev, crtc);
7352 }
7353 
7354 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7355 {
7356 	drm_i915_private_t *dev_priv = dev->dev_private;
7357 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7358 
7359 	do_intel_finish_page_flip(dev, crtc);
7360 }
7361 
7362 void intel_prepare_page_flip(struct drm_device *dev, int plane)
7363 {
7364 	drm_i915_private_t *dev_priv = dev->dev_private;
7365 	struct intel_crtc *intel_crtc =
7366 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7367 
7368 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7369 	if (intel_crtc->unpin_work) {
7370 		if ((++intel_crtc->unpin_work->pending) > 1)
7371 			DRM_ERROR("Prepared flip multiple times\n");
7372 	} else {
7373 		DRM_DEBUG("preparing flip with no unpin work?\n");
7374 	}
7375 	lockmgr(&dev->event_lock, LK_RELEASE);
7376 }
7377 
7378 static int intel_gen2_queue_flip(struct drm_device *dev,
7379 				 struct drm_crtc *crtc,
7380 				 struct drm_framebuffer *fb,
7381 				 struct drm_i915_gem_object *obj)
7382 {
7383 	struct drm_i915_private *dev_priv = dev->dev_private;
7384 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7385 	unsigned long offset;
7386 	u32 flip_mask;
7387 	int ret;
7388 
7389 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7390 	if (ret)
7391 		goto out;
7392 
7393 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7394 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7395 
7396 	ret = BEGIN_LP_RING(6);
7397 	if (ret)
7398 		goto out;
7399 
7400 	/* Can't queue multiple flips, so wait for the previous
7401 	 * one to finish before executing the next.
7402 	 */
7403 	if (intel_crtc->plane)
7404 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7405 	else
7406 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7407 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7408 	OUT_RING(MI_NOOP);
7409 	OUT_RING(MI_DISPLAY_FLIP |
7410 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7411 	OUT_RING(fb->pitches[0]);
7412 	OUT_RING(obj->gtt_offset + offset);
7413 	OUT_RING(0); /* aux display base address, unused */
7414 	ADVANCE_LP_RING();
7415 out:
7416 	return ret;
7417 }
7418 
7419 static int intel_gen3_queue_flip(struct drm_device *dev,
7420 				 struct drm_crtc *crtc,
7421 				 struct drm_framebuffer *fb,
7422 				 struct drm_i915_gem_object *obj)
7423 {
7424 	struct drm_i915_private *dev_priv = dev->dev_private;
7425 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7426 	unsigned long offset;
7427 	u32 flip_mask;
7428 	int ret;
7429 
7430 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7431 	if (ret)
7432 		goto out;
7433 
7434 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
7435 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7436 
7437 	ret = BEGIN_LP_RING(6);
7438 	if (ret)
7439 		goto out;
7440 
7441 	if (intel_crtc->plane)
7442 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7443 	else
7444 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
7445 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
7446 	OUT_RING(MI_NOOP);
7447 	OUT_RING(MI_DISPLAY_FLIP_I915 |
7448 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7449 	OUT_RING(fb->pitches[0]);
7450 	OUT_RING(obj->gtt_offset + offset);
7451 	OUT_RING(MI_NOOP);
7452 
7453 	ADVANCE_LP_RING();
7454 out:
7455 	return ret;
7456 }
7457 
7458 static int intel_gen4_queue_flip(struct drm_device *dev,
7459 				 struct drm_crtc *crtc,
7460 				 struct drm_framebuffer *fb,
7461 				 struct drm_i915_gem_object *obj)
7462 {
7463 	struct drm_i915_private *dev_priv = dev->dev_private;
7464 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7465 	uint32_t pf, pipesrc;
7466 	int ret;
7467 
7468 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7469 	if (ret)
7470 		goto out;
7471 
7472 	ret = BEGIN_LP_RING(4);
7473 	if (ret)
7474 		goto out;
7475 
7476 	/* i965+ uses the linear or tiled offsets from the
7477 	 * Display Registers (which do not change across a page-flip)
7478 	 * so we need only reprogram the base address.
7479 	 */
7480 	OUT_RING(MI_DISPLAY_FLIP |
7481 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7482 	OUT_RING(fb->pitches[0]);
7483 	OUT_RING(obj->gtt_offset | obj->tiling_mode);
7484 
7485 	/* XXX Enabling the panel-fitter across page-flip is so far
7486 	 * untested on non-native modes, so ignore it for now.
7487 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
7488 	 */
7489 	pf = 0;
7490 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7491 	OUT_RING(pf | pipesrc);
7492 	ADVANCE_LP_RING();
7493 out:
7494 	return ret;
7495 }
7496 
7497 static int intel_gen6_queue_flip(struct drm_device *dev,
7498 				 struct drm_crtc *crtc,
7499 				 struct drm_framebuffer *fb,
7500 				 struct drm_i915_gem_object *obj)
7501 {
7502 	struct drm_i915_private *dev_priv = dev->dev_private;
7503 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7504 	uint32_t pf, pipesrc;
7505 	int ret;
7506 
7507 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7508 	if (ret)
7509 		goto out;
7510 
7511 	ret = BEGIN_LP_RING(4);
7512 	if (ret)
7513 		goto out;
7514 
7515 	OUT_RING(MI_DISPLAY_FLIP |
7516 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7517 	OUT_RING(fb->pitches[0] | obj->tiling_mode);
7518 	OUT_RING(obj->gtt_offset);
7519 
7520 	/* Contrary to the suggestions in the documentation,
7521 	 * "Enable Panel Fitter" does not seem to be required when page
7522 	 * flipping with a non-native mode, and worse causes a normal
7523 	 * modeset to fail.
7524 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
7525 	 */
7526 	pf = 0;
7527 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7528 	OUT_RING(pf | pipesrc);
7529 	ADVANCE_LP_RING();
7530 out:
7531 	return ret;
7532 }
7533 
7534 /*
7535  * On gen7 we currently use the blit ring because (in early silicon at least)
7536  * the render ring doesn't give us interrpts for page flip completion, which
7537  * means clients will hang after the first flip is queued.  Fortunately the
7538  * blit ring generates interrupts properly, so use it instead.
7539  */
7540 static int intel_gen7_queue_flip(struct drm_device *dev,
7541 				 struct drm_crtc *crtc,
7542 				 struct drm_framebuffer *fb,
7543 				 struct drm_i915_gem_object *obj)
7544 {
7545 	struct drm_i915_private *dev_priv = dev->dev_private;
7546 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7547 	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
7548 	int ret;
7549 
7550 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7551 	if (ret)
7552 		goto out;
7553 
7554 	ret = intel_ring_begin(ring, 4);
7555 	if (ret)
7556 		goto out;
7557 
7558 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7559 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7560 	intel_ring_emit(ring, (obj->gtt_offset));
7561 	intel_ring_emit(ring, (MI_NOOP));
7562 	intel_ring_advance(ring);
7563 out:
7564 	return ret;
7565 }
7566 
7567 static int intel_default_queue_flip(struct drm_device *dev,
7568 				    struct drm_crtc *crtc,
7569 				    struct drm_framebuffer *fb,
7570 				    struct drm_i915_gem_object *obj)
7571 {
7572 	return -ENODEV;
7573 }
7574 
7575 static int intel_crtc_page_flip(struct drm_crtc *crtc,
7576 				struct drm_framebuffer *fb,
7577 				struct drm_pending_vblank_event *event)
7578 {
7579 	struct drm_device *dev = crtc->dev;
7580 	struct drm_i915_private *dev_priv = dev->dev_private;
7581 	struct intel_framebuffer *intel_fb;
7582 	struct drm_i915_gem_object *obj;
7583 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7584 	struct intel_unpin_work *work;
7585 	int ret;
7586 
7587 	work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
7588 
7589 	work->event = event;
7590 	work->dev = crtc->dev;
7591 	intel_fb = to_intel_framebuffer(crtc->fb);
7592 	work->old_fb_obj = intel_fb->obj;
7593 	TASK_INIT(&work->task, 0, intel_unpin_work_fn, work);
7594 
7595 	ret = drm_vblank_get(dev, intel_crtc->pipe);
7596 	if (ret)
7597 		goto free_work;
7598 
7599 	/* We borrow the event spin lock for protecting unpin_work */
7600 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7601 	if (intel_crtc->unpin_work) {
7602 		lockmgr(&dev->event_lock, LK_RELEASE);
7603 		drm_free(work, DRM_MEM_KMS);
7604 		drm_vblank_put(dev, intel_crtc->pipe);
7605 
7606 		DRM_DEBUG("flip queue: crtc already busy\n");
7607 		return -EBUSY;
7608 	}
7609 	intel_crtc->unpin_work = work;
7610 	lockmgr(&dev->event_lock, LK_RELEASE);
7611 
7612 	intel_fb = to_intel_framebuffer(fb);
7613 	obj = intel_fb->obj;
7614 
7615 	DRM_LOCK(dev);
7616 
7617 	/* Reference the objects for the scheduled work. */
7618 	drm_gem_object_reference(&work->old_fb_obj->base);
7619 	drm_gem_object_reference(&obj->base);
7620 
7621 	crtc->fb = fb;
7622 
7623 	work->pending_flip_obj = obj;
7624 
7625 	work->enable_stall_check = true;
7626 
7627 	/* Block clients from rendering to the new back buffer until
7628 	 * the flip occurs and the object is no longer visible.
7629 	 */
7630 	atomic_set_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7631 
7632 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7633 	if (ret)
7634 		goto cleanup_pending;
7635 	intel_disable_fbc(dev);
7636 	DRM_UNLOCK(dev);
7637 
7638 	return 0;
7639 
7640 cleanup_pending:
7641 	atomic_clear_int(&work->old_fb_obj->pending_flip, 1 << intel_crtc->plane);
7642 	drm_gem_object_unreference(&work->old_fb_obj->base);
7643 	drm_gem_object_unreference(&obj->base);
7644 	DRM_UNLOCK(dev);
7645 
7646 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
7647 	intel_crtc->unpin_work = NULL;
7648 	lockmgr(&dev->event_lock, LK_RELEASE);
7649 
7650 	drm_vblank_put(dev, intel_crtc->pipe);
7651 free_work:
7652 	drm_free(work, DRM_MEM_KMS);
7653 
7654 	return ret;
7655 }
7656 
7657 static void intel_sanitize_modesetting(struct drm_device *dev,
7658 				       int pipe, int plane)
7659 {
7660 	struct drm_i915_private *dev_priv = dev->dev_private;
7661 	u32 reg, val;
7662 
7663 	/* Clear any frame start delays used for debugging left by the BIOS */
7664 	for_each_pipe(pipe) {
7665 		reg = PIPECONF(pipe);
7666 		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
7667 	}
7668 
7669 	if (HAS_PCH_SPLIT(dev))
7670 		return;
7671 
7672 	/* Who knows what state these registers were left in by the BIOS or
7673 	 * grub?
7674 	 *
7675 	 * If we leave the registers in a conflicting state (e.g. with the
7676 	 * display plane reading from the other pipe than the one we intend
7677 	 * to use) then when we attempt to teardown the active mode, we will
7678 	 * not disable the pipes and planes in the correct order -- leaving
7679 	 * a plane reading from a disabled pipe and possibly leading to
7680 	 * undefined behaviour.
7681 	 */
7682 
7683 	reg = DSPCNTR(plane);
7684 	val = I915_READ(reg);
7685 
7686 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
7687 		return;
7688 	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
7689 		return;
7690 
7691 	/* This display plane is active and attached to the other CPU pipe. */
7692 	pipe = !pipe;
7693 
7694 	/* Disable the plane and wait for it to stop reading from the pipe. */
7695 	intel_disable_plane(dev_priv, plane, pipe);
7696 	intel_disable_pipe(dev_priv, pipe);
7697 }
7698 
7699 static void intel_crtc_reset(struct drm_crtc *crtc)
7700 {
7701 	struct drm_device *dev = crtc->dev;
7702 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7703 
7704 	/* Reset flags back to the 'unknown' status so that they
7705 	 * will be correctly set on the initial modeset.
7706 	 */
7707 	intel_crtc->dpms_mode = -1;
7708 
7709 	/* We need to fix up any BIOS configuration that conflicts with
7710 	 * our expectations.
7711 	 */
7712 	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
7713 }
7714 
7715 static struct drm_crtc_helper_funcs intel_helper_funcs = {
7716 	.dpms = intel_crtc_dpms,
7717 	.mode_fixup = intel_crtc_mode_fixup,
7718 	.mode_set = intel_crtc_mode_set,
7719 	.mode_set_base = intel_pipe_set_base,
7720 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
7721 	.load_lut = intel_crtc_load_lut,
7722 	.disable = intel_crtc_disable,
7723 };
7724 
7725 static const struct drm_crtc_funcs intel_crtc_funcs = {
7726 	.reset = intel_crtc_reset,
7727 	.cursor_set = intel_crtc_cursor_set,
7728 	.cursor_move = intel_crtc_cursor_move,
7729 	.gamma_set = intel_crtc_gamma_set,
7730 	.set_config = drm_crtc_helper_set_config,
7731 	.destroy = intel_crtc_destroy,
7732 	.page_flip = intel_crtc_page_flip,
7733 };
7734 
7735 static void intel_crtc_init(struct drm_device *dev, int pipe)
7736 {
7737 	drm_i915_private_t *dev_priv = dev->dev_private;
7738 	struct intel_crtc *intel_crtc;
7739 	int i;
7740 
7741 	intel_crtc = kmalloc(sizeof(struct intel_crtc) +
7742 	    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
7743 	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
7744 
7745 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7746 
7747 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
7748 	for (i = 0; i < 256; i++) {
7749 		intel_crtc->lut_r[i] = i;
7750 		intel_crtc->lut_g[i] = i;
7751 		intel_crtc->lut_b[i] = i;
7752 	}
7753 
7754 	/* Swap pipes & planes for FBC on pre-965 */
7755 	intel_crtc->pipe = pipe;
7756 	intel_crtc->plane = pipe;
7757 	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7758 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7759 		intel_crtc->plane = !pipe;
7760 	}
7761 
7762 	KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
7763 	    dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
7764 	    ("plane_to_crtc is already initialized"));
7765 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7766 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7767 
7768 	intel_crtc_reset(&intel_crtc->base);
7769 	intel_crtc->active = true; /* force the pipe off on setup_init_config */
7770 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
7771 
7772 	if (HAS_PCH_SPLIT(dev)) {
7773 		if (pipe == 2 && IS_IVYBRIDGE(dev))
7774 			intel_crtc->no_pll = true;
7775 		intel_helper_funcs.prepare = ironlake_crtc_prepare;
7776 		intel_helper_funcs.commit = ironlake_crtc_commit;
7777 	} else {
7778 		intel_helper_funcs.prepare = i9xx_crtc_prepare;
7779 		intel_helper_funcs.commit = i9xx_crtc_commit;
7780 	}
7781 
7782 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
7783 
7784 	intel_crtc->busy = false;
7785 
7786 	callout_init_mp(&intel_crtc->idle_callout);
7787 }
7788 
7789 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7790 				struct drm_file *file)
7791 {
7792 	drm_i915_private_t *dev_priv = dev->dev_private;
7793 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7794 	struct drm_mode_object *drmmode_obj;
7795 	struct intel_crtc *crtc;
7796 
7797 	if (!dev_priv) {
7798 		DRM_ERROR("called with no initialization\n");
7799 		return -EINVAL;
7800 	}
7801 
7802 	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7803 			DRM_MODE_OBJECT_CRTC);
7804 
7805 	if (!drmmode_obj) {
7806 		DRM_ERROR("no such CRTC id\n");
7807 		return -EINVAL;
7808 	}
7809 
7810 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7811 	pipe_from_crtc_id->pipe = crtc->pipe;
7812 
7813 	return 0;
7814 }
7815 
7816 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
7817 {
7818 	struct intel_encoder *encoder;
7819 	int index_mask = 0;
7820 	int entry = 0;
7821 
7822 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7823 		if (type_mask & encoder->clone_mask)
7824 			index_mask |= (1 << entry);
7825 		entry++;
7826 	}
7827 
7828 	return index_mask;
7829 }
7830 
7831 static bool has_edp_a(struct drm_device *dev)
7832 {
7833 	struct drm_i915_private *dev_priv = dev->dev_private;
7834 
7835 	if (!IS_MOBILE(dev))
7836 		return false;
7837 
7838 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7839 		return false;
7840 
7841 	if (IS_GEN5(dev) &&
7842 	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7843 		return false;
7844 
7845 	return true;
7846 }
7847 
7848 static void intel_setup_outputs(struct drm_device *dev)
7849 {
7850 	struct drm_i915_private *dev_priv = dev->dev_private;
7851 	struct intel_encoder *encoder;
7852 	bool dpd_is_edp = false;
7853 	bool has_lvds;
7854 
7855 	has_lvds = intel_lvds_init(dev);
7856 	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7857 		/* disable the panel fitter on everything but LVDS */
7858 		I915_WRITE(PFIT_CONTROL, 0);
7859 	}
7860 
7861 	if (HAS_PCH_SPLIT(dev)) {
7862 		dpd_is_edp = intel_dpd_is_edp(dev);
7863 
7864 		if (has_edp_a(dev))
7865 			intel_dp_init(dev, DP_A);
7866 
7867 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7868 			intel_dp_init(dev, PCH_DP_D);
7869 	}
7870 
7871 	intel_crt_init(dev);
7872 
7873 	if (HAS_PCH_SPLIT(dev)) {
7874 		int found;
7875 
7876 		DRM_DEBUG_KMS(
7877 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
7878 		    (I915_READ(HDMIB) & PORT_DETECTED) != 0,
7879 		    (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
7880 		    (I915_READ(HDMIC) & PORT_DETECTED) != 0,
7881 		    (I915_READ(HDMID) & PORT_DETECTED) != 0,
7882 		    (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
7883 		    (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
7884 		    (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
7885 
7886 		if (I915_READ(HDMIB) & PORT_DETECTED) {
7887 			/* PCH SDVOB multiplex with HDMIB */
7888 			found = intel_sdvo_init(dev, PCH_SDVOB);
7889 			if (!found)
7890 				intel_hdmi_init(dev, HDMIB);
7891 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
7892 				intel_dp_init(dev, PCH_DP_B);
7893 		}
7894 
7895 		if (I915_READ(HDMIC) & PORT_DETECTED)
7896 			intel_hdmi_init(dev, HDMIC);
7897 
7898 		if (I915_READ(HDMID) & PORT_DETECTED)
7899 			intel_hdmi_init(dev, HDMID);
7900 
7901 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
7902 			intel_dp_init(dev, PCH_DP_C);
7903 
7904 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7905 			intel_dp_init(dev, PCH_DP_D);
7906 
7907 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7908 		bool found = false;
7909 
7910 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7911 			DRM_DEBUG_KMS("probing SDVOB\n");
7912 			found = intel_sdvo_init(dev, SDVOB);
7913 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
7914 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
7915 				intel_hdmi_init(dev, SDVOB);
7916 			}
7917 
7918 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
7919 				DRM_DEBUG_KMS("probing DP_B\n");
7920 				intel_dp_init(dev, DP_B);
7921 			}
7922 		}
7923 
7924 		/* Before G4X SDVOC doesn't have its own detect register */
7925 
7926 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
7927 			DRM_DEBUG_KMS("probing SDVOC\n");
7928 			found = intel_sdvo_init(dev, SDVOC);
7929 		}
7930 
7931 		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
7932 
7933 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
7934 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
7935 				intel_hdmi_init(dev, SDVOC);
7936 			}
7937 			if (SUPPORTS_INTEGRATED_DP(dev)) {
7938 				DRM_DEBUG_KMS("probing DP_C\n");
7939 				intel_dp_init(dev, DP_C);
7940 			}
7941 		}
7942 
7943 		if (SUPPORTS_INTEGRATED_DP(dev) &&
7944 		    (I915_READ(DP_D) & DP_DETECTED)) {
7945 			DRM_DEBUG_KMS("probing DP_D\n");
7946 			intel_dp_init(dev, DP_D);
7947 		}
7948 	} else if (IS_GEN2(dev)) {
7949 #if 1
7950 		KIB_NOTYET();
7951 #else
7952 		intel_dvo_init(dev);
7953 #endif
7954 	}
7955 
7956 	if (SUPPORTS_TV(dev))
7957 		intel_tv_init(dev);
7958 
7959 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7960 		encoder->base.possible_crtcs = encoder->crtc_mask;
7961 		encoder->base.possible_clones =
7962 			intel_encoder_clones(dev, encoder->clone_mask);
7963 	}
7964 
7965 	/* disable all the possible outputs/crtcs before entering KMS mode */
7966 	drm_helper_disable_unused_functions(dev);
7967 
7968 	if (HAS_PCH_SPLIT(dev))
7969 		ironlake_init_pch_refclk(dev);
7970 }
7971 
7972 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
7973 {
7974 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7975 
7976 	drm_framebuffer_cleanup(fb);
7977 	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
7978 
7979 	drm_free(intel_fb, DRM_MEM_KMS);
7980 }
7981 
7982 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
7983 						struct drm_file *file,
7984 						unsigned int *handle)
7985 {
7986 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
7987 	struct drm_i915_gem_object *obj = intel_fb->obj;
7988 
7989 	return drm_gem_handle_create(file, &obj->base, handle);
7990 }
7991 
7992 static const struct drm_framebuffer_funcs intel_fb_funcs = {
7993 	.destroy = intel_user_framebuffer_destroy,
7994 	.create_handle = intel_user_framebuffer_create_handle,
7995 };
7996 
7997 int intel_framebuffer_init(struct drm_device *dev,
7998 			   struct intel_framebuffer *intel_fb,
7999 			   struct drm_mode_fb_cmd2 *mode_cmd,
8000 			   struct drm_i915_gem_object *obj)
8001 {
8002 	int ret;
8003 
8004 	if (obj->tiling_mode == I915_TILING_Y)
8005 		return -EINVAL;
8006 
8007 	if (mode_cmd->pitches[0] & 63)
8008 		return -EINVAL;
8009 
8010 	switch (mode_cmd->pixel_format) {
8011 	case DRM_FORMAT_RGB332:
8012 	case DRM_FORMAT_RGB565:
8013 	case DRM_FORMAT_XRGB8888:
8014 	case DRM_FORMAT_XBGR8888:
8015 	case DRM_FORMAT_ARGB8888:
8016 	case DRM_FORMAT_XRGB2101010:
8017 	case DRM_FORMAT_ARGB2101010:
8018 		/* RGB formats are common across chipsets */
8019 		break;
8020 	case DRM_FORMAT_YUYV:
8021 	case DRM_FORMAT_UYVY:
8022 	case DRM_FORMAT_YVYU:
8023 	case DRM_FORMAT_VYUY:
8024 		break;
8025 	default:
8026 		DRM_DEBUG_KMS("unsupported pixel format %u\n",
8027 				mode_cmd->pixel_format);
8028 		return -EINVAL;
8029 	}
8030 
8031 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8032 	if (ret) {
8033 		DRM_ERROR("framebuffer init failed %d\n", ret);
8034 		return ret;
8035 	}
8036 
8037 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8038 	intel_fb->obj = obj;
8039 	return 0;
8040 }
8041 
8042 static int
8043 intel_user_framebuffer_create(struct drm_device *dev,
8044     struct drm_file *filp, struct drm_mode_fb_cmd2 *mode_cmd,
8045     struct drm_framebuffer **res)
8046 {
8047 	struct drm_i915_gem_object *obj;
8048 
8049 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8050 						mode_cmd->handles[0]));
8051 	if (&obj->base == NULL)
8052 		return (-ENOENT);
8053 
8054 	return (intel_framebuffer_create(dev, mode_cmd, obj, res));
8055 }
8056 
8057 static const struct drm_mode_config_funcs intel_mode_funcs = {
8058 	.fb_create = intel_user_framebuffer_create,
8059 	.output_poll_changed = intel_fb_output_poll_changed,
8060 };
8061 
8062 static struct drm_i915_gem_object *
8063 intel_alloc_context_page(struct drm_device *dev)
8064 {
8065 	struct drm_i915_gem_object *ctx;
8066 	int ret;
8067 
8068 	DRM_LOCK_ASSERT(dev);
8069 
8070 	ctx = i915_gem_alloc_object(dev, 4096);
8071 	if (!ctx) {
8072 		DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8073 		return NULL;
8074 	}
8075 
8076 	ret = i915_gem_object_pin(ctx, 4096, true);
8077 	if (ret) {
8078 		DRM_ERROR("failed to pin power context: %d\n", ret);
8079 		goto err_unref;
8080 	}
8081 
8082 	ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8083 	if (ret) {
8084 		DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8085 		goto err_unpin;
8086 	}
8087 
8088 	return ctx;
8089 
8090 err_unpin:
8091 	i915_gem_object_unpin(ctx);
8092 err_unref:
8093 	drm_gem_object_unreference(&ctx->base);
8094 	DRM_UNLOCK(dev);
8095 	return NULL;
8096 }
8097 
8098 bool ironlake_set_drps(struct drm_device *dev, u8 val)
8099 {
8100 	struct drm_i915_private *dev_priv = dev->dev_private;
8101 	u16 rgvswctl;
8102 
8103 	rgvswctl = I915_READ16(MEMSWCTL);
8104 	if (rgvswctl & MEMCTL_CMD_STS) {
8105 		DRM_DEBUG("gpu busy, RCS change rejected\n");
8106 		return false; /* still busy with another command */
8107 	}
8108 
8109 	rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8110 		(val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8111 	I915_WRITE16(MEMSWCTL, rgvswctl);
8112 	POSTING_READ16(MEMSWCTL);
8113 
8114 	rgvswctl |= MEMCTL_CMD_STS;
8115 	I915_WRITE16(MEMSWCTL, rgvswctl);
8116 
8117 	return true;
8118 }
8119 
8120 void ironlake_enable_drps(struct drm_device *dev)
8121 {
8122 	struct drm_i915_private *dev_priv = dev->dev_private;
8123 	u32 rgvmodectl = I915_READ(MEMMODECTL);
8124 	u8 fmax, fmin, fstart, vstart;
8125 
8126 	/* Enable temp reporting */
8127 	I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8128 	I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8129 
8130 	/* 100ms RC evaluation intervals */
8131 	I915_WRITE(RCUPEI, 100000);
8132 	I915_WRITE(RCDNEI, 100000);
8133 
8134 	/* Set max/min thresholds to 90ms and 80ms respectively */
8135 	I915_WRITE(RCBMAXAVG, 90000);
8136 	I915_WRITE(RCBMINAVG, 80000);
8137 
8138 	I915_WRITE(MEMIHYST, 1);
8139 
8140 	/* Set up min, max, and cur for interrupt handling */
8141 	fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8142 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8143 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8144 		MEMMODE_FSTART_SHIFT;
8145 
8146 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8147 		PXVFREQ_PX_SHIFT;
8148 
8149 	dev_priv->fmax = fmax; /* IPS callback will increase this */
8150 	dev_priv->fstart = fstart;
8151 
8152 	dev_priv->max_delay = fstart;
8153 	dev_priv->min_delay = fmin;
8154 	dev_priv->cur_delay = fstart;
8155 
8156 	DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
8157 			 fmax, fmin, fstart);
8158 
8159 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8160 
8161 	/*
8162 	 * Interrupts will be enabled in ironlake_irq_postinstall
8163 	 */
8164 
8165 	I915_WRITE(VIDSTART, vstart);
8166 	POSTING_READ(VIDSTART);
8167 
8168 	rgvmodectl |= MEMMODE_SWMODE_EN;
8169 	I915_WRITE(MEMMODECTL, rgvmodectl);
8170 
8171 	if (_intel_wait_for(dev,
8172 	    (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
8173 	    1, "915per"))
8174 		DRM_ERROR("stuck trying to change perf mode\n");
8175 	DELAY(1000);
8176 
8177 	ironlake_set_drps(dev, fstart);
8178 
8179 	dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8180 		I915_READ(0x112e0);
8181 	dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8182 	dev_priv->last_count2 = I915_READ(0x112f4);
8183 	nanotime(&dev_priv->last_time2);
8184 }
8185 
8186 void ironlake_disable_drps(struct drm_device *dev)
8187 {
8188 	struct drm_i915_private *dev_priv = dev->dev_private;
8189 	u16 rgvswctl = I915_READ16(MEMSWCTL);
8190 
8191 	/* Ack interrupts, disable EFC interrupt */
8192 	I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8193 	I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8194 	I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8195 	I915_WRITE(DEIIR, DE_PCU_EVENT);
8196 	I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8197 
8198 	/* Go back to the starting frequency */
8199 	ironlake_set_drps(dev, dev_priv->fstart);
8200 	DELAY(1000);
8201 	rgvswctl |= MEMCTL_CMD_STS;
8202 	I915_WRITE(MEMSWCTL, rgvswctl);
8203 	DELAY(1000);
8204 
8205 }
8206 
8207 void gen6_set_rps(struct drm_device *dev, u8 val)
8208 {
8209 	struct drm_i915_private *dev_priv = dev->dev_private;
8210 	u32 swreq;
8211 
8212 	swreq = (val & 0x3ff) << 25;
8213 	I915_WRITE(GEN6_RPNSWREQ, swreq);
8214 }
8215 
8216 void gen6_disable_rps(struct drm_device *dev)
8217 {
8218 	struct drm_i915_private *dev_priv = dev->dev_private;
8219 
8220 	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8221 	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8222 	I915_WRITE(GEN6_PMIER, 0);
8223 	/* Complete PM interrupt masking here doesn't race with the rps work
8224 	 * item again unmasking PM interrupts because that is using a different
8225 	 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8226 	 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8227 
8228 	lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
8229 	dev_priv->pm_iir = 0;
8230 	lockmgr(&dev_priv->rps_lock, LK_RELEASE);
8231 
8232 	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8233 }
8234 
8235 static unsigned long intel_pxfreq(u32 vidfreq)
8236 {
8237 	unsigned long freq;
8238 	int div = (vidfreq & 0x3f0000) >> 16;
8239 	int post = (vidfreq & 0x3000) >> 12;
8240 	int pre = (vidfreq & 0x7);
8241 
8242 	if (!pre)
8243 		return 0;
8244 
8245 	freq = ((div * 133333) / ((1<<post) * pre));
8246 
8247 	return freq;
8248 }
8249 
8250 void intel_init_emon(struct drm_device *dev)
8251 {
8252 	struct drm_i915_private *dev_priv = dev->dev_private;
8253 	u32 lcfuse;
8254 	u8 pxw[16];
8255 	int i;
8256 
8257 	/* Disable to program */
8258 	I915_WRITE(ECR, 0);
8259 	POSTING_READ(ECR);
8260 
8261 	/* Program energy weights for various events */
8262 	I915_WRITE(SDEW, 0x15040d00);
8263 	I915_WRITE(CSIEW0, 0x007f0000);
8264 	I915_WRITE(CSIEW1, 0x1e220004);
8265 	I915_WRITE(CSIEW2, 0x04000004);
8266 
8267 	for (i = 0; i < 5; i++)
8268 		I915_WRITE(PEW + (i * 4), 0);
8269 	for (i = 0; i < 3; i++)
8270 		I915_WRITE(DEW + (i * 4), 0);
8271 
8272 	/* Program P-state weights to account for frequency power adjustment */
8273 	for (i = 0; i < 16; i++) {
8274 		u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8275 		unsigned long freq = intel_pxfreq(pxvidfreq);
8276 		unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8277 			PXVFREQ_PX_SHIFT;
8278 		unsigned long val;
8279 
8280 		val = vid * vid;
8281 		val *= (freq / 1000);
8282 		val *= 255;
8283 		val /= (127*127*900);
8284 		if (val > 0xff)
8285 			DRM_ERROR("bad pxval: %ld\n", val);
8286 		pxw[i] = val;
8287 	}
8288 	/* Render standby states get 0 weight */
8289 	pxw[14] = 0;
8290 	pxw[15] = 0;
8291 
8292 	for (i = 0; i < 4; i++) {
8293 		u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8294 			(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8295 		I915_WRITE(PXW + (i * 4), val);
8296 	}
8297 
8298 	/* Adjust magic regs to magic values (more experimental results) */
8299 	I915_WRITE(OGW0, 0);
8300 	I915_WRITE(OGW1, 0);
8301 	I915_WRITE(EG0, 0x00007f00);
8302 	I915_WRITE(EG1, 0x0000000e);
8303 	I915_WRITE(EG2, 0x000e0000);
8304 	I915_WRITE(EG3, 0x68000300);
8305 	I915_WRITE(EG4, 0x42000000);
8306 	I915_WRITE(EG5, 0x00140031);
8307 	I915_WRITE(EG6, 0);
8308 	I915_WRITE(EG7, 0);
8309 
8310 	for (i = 0; i < 8; i++)
8311 		I915_WRITE(PXWL + (i * 4), 0);
8312 
8313 	/* Enable PMON + select events */
8314 	I915_WRITE(ECR, 0x80000019);
8315 
8316 	lcfuse = I915_READ(LCFUSE02);
8317 
8318 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8319 }
8320 
8321 static int intel_enable_rc6(struct drm_device *dev)
8322 {
8323 	/*
8324 	 * Respect the kernel parameter if it is set
8325 	 */
8326 	if (i915_enable_rc6 >= 0)
8327 		return i915_enable_rc6;
8328 
8329 	/*
8330 	 * Disable RC6 on Ironlake
8331 	 */
8332 	if (INTEL_INFO(dev)->gen == 5)
8333 		return 0;
8334 
8335 	/*
8336 	 * Enable rc6 on Sandybridge if DMA remapping is disabled
8337 	 */
8338 	if (INTEL_INFO(dev)->gen == 6) {
8339 		DRM_DEBUG_DRIVER(
8340 		    "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
8341 		     intel_iommu_enabled ? "true" : "false",
8342 		     !intel_iommu_enabled ? "en" : "dis");
8343 		return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
8344 	}
8345 	DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8346 	return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8347 }
8348 
8349 void gen6_enable_rps(struct drm_i915_private *dev_priv)
8350 {
8351 	struct drm_device *dev = dev_priv->dev;
8352 	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8353 	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8354 	u32 pcu_mbox, rc6_mask = 0;
8355 	u32 gtfifodbg;
8356 	int cur_freq, min_freq, max_freq;
8357 	int rc6_mode;
8358 	int i;
8359 
8360 	/* Here begins a magic sequence of register writes to enable
8361 	 * auto-downclocking.
8362 	 *
8363 	 * Perhaps there might be some value in exposing these to
8364 	 * userspace...
8365 	 */
8366 	I915_WRITE(GEN6_RC_STATE, 0);
8367 	DRM_LOCK(dev);
8368 
8369 	/* Clear the DBG now so we don't confuse earlier errors */
8370 	if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8371 		DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8372 		I915_WRITE(GTFIFODBG, gtfifodbg);
8373 	}
8374 
8375 	gen6_gt_force_wake_get(dev_priv);
8376 
8377 	/* disable the counters and set deterministic thresholds */
8378 	I915_WRITE(GEN6_RC_CONTROL, 0);
8379 
8380 	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8381 	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8382 	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8383 	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8384 	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8385 
8386 	for (i = 0; i < I915_NUM_RINGS; i++)
8387 		I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
8388 
8389 	I915_WRITE(GEN6_RC_SLEEP, 0);
8390 	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8391 	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8392 	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8393 	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8394 
8395 	rc6_mode = intel_enable_rc6(dev_priv->dev);
8396 	if (rc6_mode & INTEL_RC6_ENABLE)
8397 		rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8398 
8399 	if (rc6_mode & INTEL_RC6p_ENABLE)
8400 		rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8401 
8402 	if (rc6_mode & INTEL_RC6pp_ENABLE)
8403 		rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8404 
8405 	DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8406 			(rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8407 			(rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8408 			(rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8409 
8410 	I915_WRITE(GEN6_RC_CONTROL,
8411 		   rc6_mask |
8412 		   GEN6_RC_CTL_EI_MODE(1) |
8413 		   GEN6_RC_CTL_HW_ENABLE);
8414 
8415 	I915_WRITE(GEN6_RPNSWREQ,
8416 		   GEN6_FREQUENCY(10) |
8417 		   GEN6_OFFSET(0) |
8418 		   GEN6_AGGRESSIVE_TURBO);
8419 	I915_WRITE(GEN6_RC_VIDEO_FREQ,
8420 		   GEN6_FREQUENCY(12));
8421 
8422 	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8423 	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8424 		   18 << 24 |
8425 		   6 << 16);
8426 	I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8427 	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8428 	I915_WRITE(GEN6_RP_UP_EI, 100000);
8429 	I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8430 	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8431 	I915_WRITE(GEN6_RP_CONTROL,
8432 		   GEN6_RP_MEDIA_TURBO |
8433 		   GEN6_RP_MEDIA_HW_MODE |
8434 		   GEN6_RP_MEDIA_IS_GFX |
8435 		   GEN6_RP_ENABLE |
8436 		   GEN6_RP_UP_BUSY_AVG |
8437 		   GEN6_RP_DOWN_IDLE_CONT);
8438 
8439 	if (_intel_wait_for(dev,
8440 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8441 	    1, "915pr1"))
8442 		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8443 
8444 	I915_WRITE(GEN6_PCODE_DATA, 0);
8445 	I915_WRITE(GEN6_PCODE_MAILBOX,
8446 		   GEN6_PCODE_READY |
8447 		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8448 	if (_intel_wait_for(dev,
8449 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8450 	    1, "915pr2"))
8451 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8452 
8453 	min_freq = (rp_state_cap & 0xff0000) >> 16;
8454 	max_freq = rp_state_cap & 0xff;
8455 	cur_freq = (gt_perf_status & 0xff00) >> 8;
8456 
8457 	/* Check for overclock support */
8458 	if (_intel_wait_for(dev,
8459 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8460 	    1, "915pr3"))
8461 		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8462 	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8463 	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8464 	if (_intel_wait_for(dev,
8465 	    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
8466 	    1, "915pr4"))
8467 		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8468 	if (pcu_mbox & (1<<31)) { /* OC supported */
8469 		max_freq = pcu_mbox & 0xff;
8470 		DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8471 	}
8472 
8473 	/* In units of 100MHz */
8474 	dev_priv->max_delay = max_freq;
8475 	dev_priv->min_delay = min_freq;
8476 	dev_priv->cur_delay = cur_freq;
8477 
8478 	/* requires MSI enabled */
8479 	I915_WRITE(GEN6_PMIER,
8480 		   GEN6_PM_MBOX_EVENT |
8481 		   GEN6_PM_THERMAL_EVENT |
8482 		   GEN6_PM_RP_DOWN_TIMEOUT |
8483 		   GEN6_PM_RP_UP_THRESHOLD |
8484 		   GEN6_PM_RP_DOWN_THRESHOLD |
8485 		   GEN6_PM_RP_UP_EI_EXPIRED |
8486 		   GEN6_PM_RP_DOWN_EI_EXPIRED);
8487 	lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
8488 	if (dev_priv->pm_iir != 0)
8489 		kprintf("pm_iir %x\n", dev_priv->pm_iir);
8490 	I915_WRITE(GEN6_PMIMR, 0);
8491 	lockmgr(&dev_priv->rps_lock, LK_RELEASE);
8492 	/* enable all PM interrupts */
8493 	I915_WRITE(GEN6_PMINTRMSK, 0);
8494 
8495 	gen6_gt_force_wake_put(dev_priv);
8496 	DRM_UNLOCK(dev);
8497 }
8498 
8499 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8500 {
8501 	struct drm_device *dev;
8502 	int min_freq = 15;
8503 	int gpu_freq, ia_freq, max_ia_freq;
8504 	int scaling_factor = 180;
8505 	uint64_t tsc_freq;
8506 
8507 	dev = dev_priv->dev;
8508 #if 0
8509 	max_ia_freq = cpufreq_quick_get_max(0);
8510 	/*
8511 	 * Default to measured freq if none found, PCU will ensure we don't go
8512 	 * over
8513 	 */
8514 	if (!max_ia_freq)
8515 		max_ia_freq = tsc_freq;
8516 
8517 	/* Convert from Hz to MHz */
8518 	max_ia_freq /= 1000;
8519 #else
8520 	tsc_freq = atomic_load_acq_64(&tsc_freq);
8521 	max_ia_freq = tsc_freq / 1000 / 1000;
8522 #endif
8523 
8524 	DRM_LOCK(dev);
8525 
8526 	/*
8527 	 * For each potential GPU frequency, load a ring frequency we'd like
8528 	 * to use for memory access.  We do this by specifying the IA frequency
8529 	 * the PCU should use as a reference to determine the ring frequency.
8530 	 */
8531 	for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8532 	     gpu_freq--) {
8533 		int diff = dev_priv->max_delay - gpu_freq;
8534 		int d;
8535 
8536 		/*
8537 		 * For GPU frequencies less than 750MHz, just use the lowest
8538 		 * ring freq.
8539 		 */
8540 		if (gpu_freq < min_freq)
8541 			ia_freq = 800;
8542 		else
8543 			ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8544 		d = 100;
8545 		ia_freq = (ia_freq + d / 2) / d;
8546 
8547 		I915_WRITE(GEN6_PCODE_DATA,
8548 			   (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8549 			   gpu_freq);
8550 		I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8551 			   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8552 		if (_intel_wait_for(dev,
8553 		    (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8554 		    10, 1, "915frq")) {
8555 			DRM_ERROR("pcode write of freq table timed out\n");
8556 			continue;
8557 		}
8558 	}
8559 
8560 	DRM_UNLOCK(dev);
8561 }
8562 
8563 static void ironlake_init_clock_gating(struct drm_device *dev)
8564 {
8565 	struct drm_i915_private *dev_priv = dev->dev_private;
8566 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8567 
8568 	/* Required for FBC */
8569 	dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8570 		DPFCRUNIT_CLOCK_GATE_DISABLE |
8571 		DPFDUNIT_CLOCK_GATE_DISABLE;
8572 	/* Required for CxSR */
8573 	dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8574 
8575 	I915_WRITE(PCH_3DCGDIS0,
8576 		   MARIUNIT_CLOCK_GATE_DISABLE |
8577 		   SVSMUNIT_CLOCK_GATE_DISABLE);
8578 	I915_WRITE(PCH_3DCGDIS1,
8579 		   VFMUNIT_CLOCK_GATE_DISABLE);
8580 
8581 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8582 
8583 	/*
8584 	 * According to the spec the following bits should be set in
8585 	 * order to enable memory self-refresh
8586 	 * The bit 22/21 of 0x42004
8587 	 * The bit 5 of 0x42020
8588 	 * The bit 15 of 0x45000
8589 	 */
8590 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8591 		   (I915_READ(ILK_DISPLAY_CHICKEN2) |
8592 		    ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8593 	I915_WRITE(ILK_DSPCLK_GATE,
8594 		   (I915_READ(ILK_DSPCLK_GATE) |
8595 		    ILK_DPARB_CLK_GATE));
8596 	I915_WRITE(DISP_ARB_CTL,
8597 		   (I915_READ(DISP_ARB_CTL) |
8598 		    DISP_FBC_WM_DIS));
8599 	I915_WRITE(WM3_LP_ILK, 0);
8600 	I915_WRITE(WM2_LP_ILK, 0);
8601 	I915_WRITE(WM1_LP_ILK, 0);
8602 
8603 	/*
8604 	 * Based on the document from hardware guys the following bits
8605 	 * should be set unconditionally in order to enable FBC.
8606 	 * The bit 22 of 0x42000
8607 	 * The bit 22 of 0x42004
8608 	 * The bit 7,8,9 of 0x42020.
8609 	 */
8610 	if (IS_IRONLAKE_M(dev)) {
8611 		I915_WRITE(ILK_DISPLAY_CHICKEN1,
8612 			   I915_READ(ILK_DISPLAY_CHICKEN1) |
8613 			   ILK_FBCQ_DIS);
8614 		I915_WRITE(ILK_DISPLAY_CHICKEN2,
8615 			   I915_READ(ILK_DISPLAY_CHICKEN2) |
8616 			   ILK_DPARB_GATE);
8617 		I915_WRITE(ILK_DSPCLK_GATE,
8618 			   I915_READ(ILK_DSPCLK_GATE) |
8619 			   ILK_DPFC_DIS1 |
8620 			   ILK_DPFC_DIS2 |
8621 			   ILK_CLK_FBC);
8622 	}
8623 
8624 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8625 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8626 		   ILK_ELPIN_409_SELECT);
8627 	I915_WRITE(_3D_CHICKEN2,
8628 		   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8629 		   _3D_CHICKEN2_WM_READ_PIPELINED);
8630 }
8631 
8632 static void gen6_init_clock_gating(struct drm_device *dev)
8633 {
8634 	struct drm_i915_private *dev_priv = dev->dev_private;
8635 	int pipe;
8636 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8637 
8638 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8639 
8640 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8641 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8642 		   ILK_ELPIN_409_SELECT);
8643 
8644 	I915_WRITE(WM3_LP_ILK, 0);
8645 	I915_WRITE(WM2_LP_ILK, 0);
8646 	I915_WRITE(WM1_LP_ILK, 0);
8647 
8648 	I915_WRITE(GEN6_UCGCTL1,
8649 		   I915_READ(GEN6_UCGCTL1) |
8650 		   GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
8651 
8652 	/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8653 	 * gating disable must be set.  Failure to set it results in
8654 	 * flickering pixels due to Z write ordering failures after
8655 	 * some amount of runtime in the Mesa "fire" demo, and Unigine
8656 	 * Sanctuary and Tropics, and apparently anything else with
8657 	 * alpha test or pixel discard.
8658 	 *
8659 	 * According to the spec, bit 11 (RCCUNIT) must also be set,
8660 	 * but we didn't debug actual testcases to find it out.
8661 	 */
8662 	I915_WRITE(GEN6_UCGCTL2,
8663 		   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8664 		   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8665 
8666 	/*
8667 	 * According to the spec the following bits should be
8668 	 * set in order to enable memory self-refresh and fbc:
8669 	 * The bit21 and bit22 of 0x42000
8670 	 * The bit21 and bit22 of 0x42004
8671 	 * The bit5 and bit7 of 0x42020
8672 	 * The bit14 of 0x70180
8673 	 * The bit14 of 0x71180
8674 	 */
8675 	I915_WRITE(ILK_DISPLAY_CHICKEN1,
8676 		   I915_READ(ILK_DISPLAY_CHICKEN1) |
8677 		   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8678 	I915_WRITE(ILK_DISPLAY_CHICKEN2,
8679 		   I915_READ(ILK_DISPLAY_CHICKEN2) |
8680 		   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8681 	I915_WRITE(ILK_DSPCLK_GATE,
8682 		   I915_READ(ILK_DSPCLK_GATE) |
8683 		   ILK_DPARB_CLK_GATE  |
8684 		   ILK_DPFD_CLK_GATE);
8685 
8686 	for_each_pipe(pipe) {
8687 		I915_WRITE(DSPCNTR(pipe),
8688 			   I915_READ(DSPCNTR(pipe)) |
8689 			   DISPPLANE_TRICKLE_FEED_DISABLE);
8690 		intel_flush_display_plane(dev_priv, pipe);
8691 	}
8692 }
8693 
8694 static void ivybridge_init_clock_gating(struct drm_device *dev)
8695 {
8696 	struct drm_i915_private *dev_priv = dev->dev_private;
8697 	int pipe;
8698 	uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8699 
8700 	I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8701 
8702 	I915_WRITE(WM3_LP_ILK, 0);
8703 	I915_WRITE(WM2_LP_ILK, 0);
8704 	I915_WRITE(WM1_LP_ILK, 0);
8705 
8706 	/* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8707 	 * This implements the WaDisableRCZUnitClockGating workaround.
8708 	 */
8709 	I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8710 
8711 	I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8712 
8713 	I915_WRITE(IVB_CHICKEN3,
8714 		   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8715 		   CHICKEN3_DGMG_DONE_FIX_DISABLE);
8716 
8717 	/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8718 	I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8719 		   GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8720 
8721 	/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8722 	I915_WRITE(GEN7_L3CNTLREG1,
8723 			GEN7_WA_FOR_GEN7_L3_CONTROL);
8724 	I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8725 			GEN7_WA_L3_CHICKEN_MODE);
8726 
8727 	/* This is required by WaCatErrorRejectionIssue */
8728 	I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8729 			I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8730 			GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8731 
8732 	for_each_pipe(pipe) {
8733 		I915_WRITE(DSPCNTR(pipe),
8734 			   I915_READ(DSPCNTR(pipe)) |
8735 			   DISPPLANE_TRICKLE_FEED_DISABLE);
8736 		intel_flush_display_plane(dev_priv, pipe);
8737 	}
8738 }
8739 
8740 static void g4x_init_clock_gating(struct drm_device *dev)
8741 {
8742 	struct drm_i915_private *dev_priv = dev->dev_private;
8743 	uint32_t dspclk_gate;
8744 
8745 	I915_WRITE(RENCLK_GATE_D1, 0);
8746 	I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
8747 		   GS_UNIT_CLOCK_GATE_DISABLE |
8748 		   CL_UNIT_CLOCK_GATE_DISABLE);
8749 	I915_WRITE(RAMCLK_GATE_D, 0);
8750 	dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
8751 		OVRUNIT_CLOCK_GATE_DISABLE |
8752 		OVCUNIT_CLOCK_GATE_DISABLE;
8753 	if (IS_GM45(dev))
8754 		dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
8755 	I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
8756 }
8757 
8758 static void crestline_init_clock_gating(struct drm_device *dev)
8759 {
8760 	struct drm_i915_private *dev_priv = dev->dev_private;
8761 
8762 	I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
8763 	I915_WRITE(RENCLK_GATE_D2, 0);
8764 	I915_WRITE(DSPCLK_GATE_D, 0);
8765 	I915_WRITE(RAMCLK_GATE_D, 0);
8766 	I915_WRITE16(DEUC, 0);
8767 }
8768 
8769 static void broadwater_init_clock_gating(struct drm_device *dev)
8770 {
8771 	struct drm_i915_private *dev_priv = dev->dev_private;
8772 
8773 	I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
8774 		   I965_RCC_CLOCK_GATE_DISABLE |
8775 		   I965_RCPB_CLOCK_GATE_DISABLE |
8776 		   I965_ISC_CLOCK_GATE_DISABLE |
8777 		   I965_FBC_CLOCK_GATE_DISABLE);
8778 	I915_WRITE(RENCLK_GATE_D2, 0);
8779 }
8780 
8781 static void gen3_init_clock_gating(struct drm_device *dev)
8782 {
8783 	struct drm_i915_private *dev_priv = dev->dev_private;
8784 	u32 dstate = I915_READ(D_STATE);
8785 
8786 	dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
8787 		DSTATE_DOT_CLOCK_GATING;
8788 	I915_WRITE(D_STATE, dstate);
8789 }
8790 
8791 static void i85x_init_clock_gating(struct drm_device *dev)
8792 {
8793 	struct drm_i915_private *dev_priv = dev->dev_private;
8794 
8795 	I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
8796 }
8797 
8798 static void i830_init_clock_gating(struct drm_device *dev)
8799 {
8800 	struct drm_i915_private *dev_priv = dev->dev_private;
8801 
8802 	I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
8803 }
8804 
8805 static void ibx_init_clock_gating(struct drm_device *dev)
8806 {
8807 	struct drm_i915_private *dev_priv = dev->dev_private;
8808 
8809 	/*
8810 	 * On Ibex Peak and Cougar Point, we need to disable clock
8811 	 * gating for the panel power sequencer or it will fail to
8812 	 * start up when no ports are active.
8813 	 */
8814 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8815 }
8816 
8817 static void cpt_init_clock_gating(struct drm_device *dev)
8818 {
8819 	struct drm_i915_private *dev_priv = dev->dev_private;
8820 	int pipe;
8821 
8822 	/*
8823 	 * On Ibex Peak and Cougar Point, we need to disable clock
8824 	 * gating for the panel power sequencer or it will fail to
8825 	 * start up when no ports are active.
8826 	 */
8827 	I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
8828 	I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
8829 		   DPLS_EDP_PPS_FIX_DIS);
8830 	/* Without this, mode sets may fail silently on FDI */
8831 	for_each_pipe(pipe)
8832 		I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
8833 }
8834 
8835 static void ironlake_teardown_rc6(struct drm_device *dev)
8836 {
8837 	struct drm_i915_private *dev_priv = dev->dev_private;
8838 
8839 	if (dev_priv->renderctx) {
8840 		i915_gem_object_unpin(dev_priv->renderctx);
8841 		drm_gem_object_unreference(&dev_priv->renderctx->base);
8842 		dev_priv->renderctx = NULL;
8843 	}
8844 
8845 	if (dev_priv->pwrctx) {
8846 		i915_gem_object_unpin(dev_priv->pwrctx);
8847 		drm_gem_object_unreference(&dev_priv->pwrctx->base);
8848 		dev_priv->pwrctx = NULL;
8849 	}
8850 }
8851 
8852 static void ironlake_disable_rc6(struct drm_device *dev)
8853 {
8854 	struct drm_i915_private *dev_priv = dev->dev_private;
8855 
8856 	if (I915_READ(PWRCTXA)) {
8857 		/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
8858 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
8859 		(void)_intel_wait_for(dev,
8860 		    ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
8861 		    50, 1, "915pro");
8862 
8863 		I915_WRITE(PWRCTXA, 0);
8864 		POSTING_READ(PWRCTXA);
8865 
8866 		I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8867 		POSTING_READ(RSTDBYCTL);
8868 	}
8869 
8870 	ironlake_teardown_rc6(dev);
8871 }
8872 
8873 static int ironlake_setup_rc6(struct drm_device *dev)
8874 {
8875 	struct drm_i915_private *dev_priv = dev->dev_private;
8876 
8877 	if (dev_priv->renderctx == NULL)
8878 		dev_priv->renderctx = intel_alloc_context_page(dev);
8879 	if (!dev_priv->renderctx)
8880 		return -ENOMEM;
8881 
8882 	if (dev_priv->pwrctx == NULL)
8883 		dev_priv->pwrctx = intel_alloc_context_page(dev);
8884 	if (!dev_priv->pwrctx) {
8885 		ironlake_teardown_rc6(dev);
8886 		return -ENOMEM;
8887 	}
8888 
8889 	return 0;
8890 }
8891 
8892 void ironlake_enable_rc6(struct drm_device *dev)
8893 {
8894 	struct drm_i915_private *dev_priv = dev->dev_private;
8895 	int ret;
8896 
8897 	/* rc6 disabled by default due to repeated reports of hanging during
8898 	 * boot and resume.
8899 	 */
8900 	if (!intel_enable_rc6(dev))
8901 		return;
8902 
8903 	DRM_LOCK(dev);
8904 	ret = ironlake_setup_rc6(dev);
8905 	if (ret) {
8906 		DRM_UNLOCK(dev);
8907 		return;
8908 	}
8909 
8910 	/*
8911 	 * GPU can automatically power down the render unit if given a page
8912 	 * to save state.
8913 	 */
8914 	ret = BEGIN_LP_RING(6);
8915 	if (ret) {
8916 		ironlake_teardown_rc6(dev);
8917 		DRM_UNLOCK(dev);
8918 		return;
8919 	}
8920 
8921 	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
8922 	OUT_RING(MI_SET_CONTEXT);
8923 	OUT_RING(dev_priv->renderctx->gtt_offset |
8924 		 MI_MM_SPACE_GTT |
8925 		 MI_SAVE_EXT_STATE_EN |
8926 		 MI_RESTORE_EXT_STATE_EN |
8927 		 MI_RESTORE_INHIBIT);
8928 	OUT_RING(MI_SUSPEND_FLUSH);
8929 	OUT_RING(MI_NOOP);
8930 	OUT_RING(MI_FLUSH);
8931 	ADVANCE_LP_RING();
8932 
8933 	/*
8934 	 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
8935 	 * does an implicit flush, combined with MI_FLUSH above, it should be
8936 	 * safe to assume that renderctx is valid
8937 	 */
8938 	ret = intel_wait_ring_idle(LP_RING(dev_priv));
8939 	if (ret) {
8940 		DRM_ERROR("failed to enable ironlake power savings\n");
8941 		ironlake_teardown_rc6(dev);
8942 		DRM_UNLOCK(dev);
8943 		return;
8944 	}
8945 
8946 	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
8947 	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
8948 	DRM_UNLOCK(dev);
8949 }
8950 
8951 void intel_init_clock_gating(struct drm_device *dev)
8952 {
8953 	struct drm_i915_private *dev_priv = dev->dev_private;
8954 
8955 	dev_priv->display.init_clock_gating(dev);
8956 
8957 	if (dev_priv->display.init_pch_clock_gating)
8958 		dev_priv->display.init_pch_clock_gating(dev);
8959 }
8960 
8961 /* Set up chip specific display functions */
8962 static void intel_init_display(struct drm_device *dev)
8963 {
8964 	struct drm_i915_private *dev_priv = dev->dev_private;
8965 
8966 	/* We always want a DPMS function */
8967 	if (HAS_PCH_SPLIT(dev)) {
8968 		dev_priv->display.dpms = ironlake_crtc_dpms;
8969 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
8970 		dev_priv->display.update_plane = ironlake_update_plane;
8971 	} else {
8972 		dev_priv->display.dpms = i9xx_crtc_dpms;
8973 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
8974 		dev_priv->display.update_plane = i9xx_update_plane;
8975 	}
8976 
8977 	if (I915_HAS_FBC(dev)) {
8978 		if (HAS_PCH_SPLIT(dev)) {
8979 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
8980 			dev_priv->display.enable_fbc = ironlake_enable_fbc;
8981 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
8982 		} else if (IS_GM45(dev)) {
8983 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
8984 			dev_priv->display.enable_fbc = g4x_enable_fbc;
8985 			dev_priv->display.disable_fbc = g4x_disable_fbc;
8986 		} else if (IS_CRESTLINE(dev)) {
8987 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
8988 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
8989 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
8990 		}
8991 		/* 855GM needs testing */
8992 	}
8993 
8994 	/* Returns the core display clock speed */
8995 	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
8996 		dev_priv->display.get_display_clock_speed =
8997 			i945_get_display_clock_speed;
8998 	else if (IS_I915G(dev))
8999 		dev_priv->display.get_display_clock_speed =
9000 			i915_get_display_clock_speed;
9001 	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
9002 		dev_priv->display.get_display_clock_speed =
9003 			i9xx_misc_get_display_clock_speed;
9004 	else if (IS_I915GM(dev))
9005 		dev_priv->display.get_display_clock_speed =
9006 			i915gm_get_display_clock_speed;
9007 	else if (IS_I865G(dev))
9008 		dev_priv->display.get_display_clock_speed =
9009 			i865_get_display_clock_speed;
9010 	else if (IS_I85X(dev))
9011 		dev_priv->display.get_display_clock_speed =
9012 			i855_get_display_clock_speed;
9013 	else /* 852, 830 */
9014 		dev_priv->display.get_display_clock_speed =
9015 			i830_get_display_clock_speed;
9016 
9017 	/* For FIFO watermark updates */
9018 	if (HAS_PCH_SPLIT(dev)) {
9019 		dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9020 		dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9021 
9022 		/* IVB configs may use multi-threaded forcewake */
9023 		if (IS_IVYBRIDGE(dev)) {
9024 			u32	ecobus;
9025 
9026 			/* A small trick here - if the bios hasn't configured MT forcewake,
9027 			 * and if the device is in RC6, then force_wake_mt_get will not wake
9028 			 * the device and the ECOBUS read will return zero. Which will be
9029 			 * (correctly) interpreted by the test below as MT forcewake being
9030 			 * disabled.
9031 			 */
9032 			DRM_LOCK(dev);
9033 			__gen6_gt_force_wake_mt_get(dev_priv);
9034 			ecobus = I915_READ_NOTRACE(ECOBUS);
9035 			__gen6_gt_force_wake_mt_put(dev_priv);
9036 			DRM_UNLOCK(dev);
9037 
9038 			if (ecobus & FORCEWAKE_MT_ENABLE) {
9039 				DRM_DEBUG_KMS("Using MT version of forcewake\n");
9040 				dev_priv->display.force_wake_get =
9041 					__gen6_gt_force_wake_mt_get;
9042 				dev_priv->display.force_wake_put =
9043 					__gen6_gt_force_wake_mt_put;
9044 			}
9045 		}
9046 
9047 		if (HAS_PCH_IBX(dev))
9048 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9049 		else if (HAS_PCH_CPT(dev))
9050 			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9051 
9052 		if (IS_GEN5(dev)) {
9053 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9054 				dev_priv->display.update_wm = ironlake_update_wm;
9055 			else {
9056 				DRM_DEBUG_KMS("Failed to get proper latency. "
9057 					      "Disable CxSR\n");
9058 				dev_priv->display.update_wm = NULL;
9059 			}
9060 			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9061 			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9062 			dev_priv->display.write_eld = ironlake_write_eld;
9063 		} else if (IS_GEN6(dev)) {
9064 			if (SNB_READ_WM0_LATENCY()) {
9065 				dev_priv->display.update_wm = sandybridge_update_wm;
9066 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9067 			} else {
9068 				DRM_DEBUG_KMS("Failed to read display plane latency. "
9069 					      "Disable CxSR\n");
9070 				dev_priv->display.update_wm = NULL;
9071 			}
9072 			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9073 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9074 			dev_priv->display.write_eld = ironlake_write_eld;
9075 		} else if (IS_IVYBRIDGE(dev)) {
9076 			/* FIXME: detect B0+ stepping and use auto training */
9077 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9078 			if (SNB_READ_WM0_LATENCY()) {
9079 				dev_priv->display.update_wm = sandybridge_update_wm;
9080 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9081 			} else {
9082 				DRM_DEBUG_KMS("Failed to read display plane latency. "
9083 					      "Disable CxSR\n");
9084 				dev_priv->display.update_wm = NULL;
9085 			}
9086 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9087 			dev_priv->display.write_eld = ironlake_write_eld;
9088 		} else
9089 			dev_priv->display.update_wm = NULL;
9090 	} else if (IS_PINEVIEW(dev)) {
9091 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9092 					    dev_priv->is_ddr3,
9093 					    dev_priv->fsb_freq,
9094 					    dev_priv->mem_freq)) {
9095 			DRM_INFO("failed to find known CxSR latency "
9096 				 "(found ddr%s fsb freq %d, mem freq %d), "
9097 				 "disabling CxSR\n",
9098 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9099 				 dev_priv->fsb_freq, dev_priv->mem_freq);
9100 			/* Disable CxSR and never update its watermark again */
9101 			pineview_disable_cxsr(dev);
9102 			dev_priv->display.update_wm = NULL;
9103 		} else
9104 			dev_priv->display.update_wm = pineview_update_wm;
9105 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9106 	} else if (IS_G4X(dev)) {
9107 		dev_priv->display.write_eld = g4x_write_eld;
9108 		dev_priv->display.update_wm = g4x_update_wm;
9109 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9110 	} else if (IS_GEN4(dev)) {
9111 		dev_priv->display.update_wm = i965_update_wm;
9112 		if (IS_CRESTLINE(dev))
9113 			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9114 		else if (IS_BROADWATER(dev))
9115 			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9116 	} else if (IS_GEN3(dev)) {
9117 		dev_priv->display.update_wm = i9xx_update_wm;
9118 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9119 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9120 	} else if (IS_I865G(dev)) {
9121 		dev_priv->display.update_wm = i830_update_wm;
9122 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9123 		dev_priv->display.get_fifo_size = i830_get_fifo_size;
9124 	} else if (IS_I85X(dev)) {
9125 		dev_priv->display.update_wm = i9xx_update_wm;
9126 		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9127 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9128 	} else {
9129 		dev_priv->display.update_wm = i830_update_wm;
9130 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
9131 		if (IS_845G(dev))
9132 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
9133 		else
9134 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
9135 	}
9136 
9137 	/* Default just returns -ENODEV to indicate unsupported */
9138 	dev_priv->display.queue_flip = intel_default_queue_flip;
9139 
9140 	switch (INTEL_INFO(dev)->gen) {
9141 	case 2:
9142 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
9143 		break;
9144 
9145 	case 3:
9146 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
9147 		break;
9148 
9149 	case 4:
9150 	case 5:
9151 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
9152 		break;
9153 
9154 	case 6:
9155 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
9156 		break;
9157 	case 7:
9158 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
9159 		break;
9160 	}
9161 }
9162 
9163 /*
9164  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
9165  * resume, or other times.  This quirk makes sure that's the case for
9166  * affected systems.
9167  */
9168 static void quirk_pipea_force(struct drm_device *dev)
9169 {
9170 	struct drm_i915_private *dev_priv = dev->dev_private;
9171 
9172 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
9173 	DRM_DEBUG("applying pipe a force quirk\n");
9174 }
9175 
9176 /*
9177  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
9178  */
9179 static void quirk_ssc_force_disable(struct drm_device *dev)
9180 {
9181 	struct drm_i915_private *dev_priv = dev->dev_private;
9182 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
9183 }
9184 
9185 struct intel_quirk {
9186 	int device;
9187 	int subsystem_vendor;
9188 	int subsystem_device;
9189 	void (*hook)(struct drm_device *dev);
9190 };
9191 
9192 #define	PCI_ANY_ID	(~0u)
9193 
9194 struct intel_quirk intel_quirks[] = {
9195 	/* HP Mini needs pipe A force quirk (LP: #322104) */
9196 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9197 
9198 	/* Thinkpad R31 needs pipe A force quirk */
9199 	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
9200 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
9201 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
9202 
9203 	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
9204 	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
9205 	/* ThinkPad X40 needs pipe A force quirk */
9206 
9207 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
9208 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
9209 
9210 	/* 855 & before need to leave pipe A & dpll A up */
9211 	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9212 	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
9213 
9214 	/* Lenovo U160 cannot use SSC on LVDS */
9215 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
9216 
9217 	/* Sony Vaio Y cannot use SSC on LVDS */
9218 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
9219 };
9220 
9221 static void intel_init_quirks(struct drm_device *dev)
9222 {
9223 	struct intel_quirk *q;
9224 	device_t d;
9225 	int i;
9226 
9227 	d = dev->device;
9228 	for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
9229 		q = &intel_quirks[i];
9230 		if (pci_get_device(d) == q->device &&
9231 		    (pci_get_subvendor(d) == q->subsystem_vendor ||
9232 		     q->subsystem_vendor == PCI_ANY_ID) &&
9233 		    (pci_get_subdevice(d) == q->subsystem_device ||
9234 		     q->subsystem_device == PCI_ANY_ID))
9235 			q->hook(dev);
9236 	}
9237 }
9238 
9239 /* Disable the VGA plane that we never use */
9240 static void i915_disable_vga(struct drm_device *dev)
9241 {
9242 	struct drm_i915_private *dev_priv = dev->dev_private;
9243 	u8 sr1;
9244 	u32 vga_reg;
9245 
9246 	if (HAS_PCH_SPLIT(dev))
9247 		vga_reg = CPU_VGACNTRL;
9248 	else
9249 		vga_reg = VGACNTRL;
9250 
9251 #if 0
9252 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9253 #endif
9254 	outb(VGA_SR_INDEX, 1);
9255 	sr1 = inb(VGA_SR_DATA);
9256 	outb(VGA_SR_DATA, sr1 | 1 << 5);
9257 #if 0
9258 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
9259 #endif
9260 	DELAY(300);
9261 
9262 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9263 	POSTING_READ(vga_reg);
9264 }
9265 
9266 void intel_modeset_init(struct drm_device *dev)
9267 {
9268 	struct drm_i915_private *dev_priv = dev->dev_private;
9269 	int i, ret;
9270 
9271 	drm_mode_config_init(dev);
9272 
9273 	dev->mode_config.min_width = 0;
9274 	dev->mode_config.min_height = 0;
9275 
9276 	dev->mode_config.preferred_depth = 24;
9277 	dev->mode_config.prefer_shadow = 1;
9278 
9279 	dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
9280 	    &intel_mode_funcs);
9281 
9282 	intel_init_quirks(dev);
9283 
9284 	intel_init_display(dev);
9285 
9286 	if (IS_GEN2(dev)) {
9287 		dev->mode_config.max_width = 2048;
9288 		dev->mode_config.max_height = 2048;
9289 	} else if (IS_GEN3(dev)) {
9290 		dev->mode_config.max_width = 4096;
9291 		dev->mode_config.max_height = 4096;
9292 	} else {
9293 		dev->mode_config.max_width = 8192;
9294 		dev->mode_config.max_height = 8192;
9295 	}
9296 	dev->mode_config.fb_base = dev->agp->base;
9297 
9298 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
9299 		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
9300 
9301 	for (i = 0; i < dev_priv->num_pipe; i++) {
9302 		intel_crtc_init(dev, i);
9303 		ret = intel_plane_init(dev, i);
9304 		if (ret)
9305 			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
9306 	}
9307 
9308 	/* Just disable it once at startup */
9309 	i915_disable_vga(dev);
9310 	intel_setup_outputs(dev);
9311 
9312 	intel_init_clock_gating(dev);
9313 
9314 	if (IS_IRONLAKE_M(dev)) {
9315 		ironlake_enable_drps(dev);
9316 		intel_init_emon(dev);
9317 	}
9318 
9319 	if (IS_GEN6(dev)) {
9320 		gen6_enable_rps(dev_priv);
9321 		gen6_update_ring_freq(dev_priv);
9322 	}
9323 
9324 	TASK_INIT(&dev_priv->idle_task, 0, intel_idle_update, dev_priv);
9325 	callout_init_mp(&dev_priv->idle_callout);
9326 }
9327 
9328 void intel_modeset_gem_init(struct drm_device *dev)
9329 {
9330 	if (IS_IRONLAKE_M(dev))
9331 		ironlake_enable_rc6(dev);
9332 
9333 	intel_setup_overlay(dev);
9334 }
9335 
9336 void intel_modeset_cleanup(struct drm_device *dev)
9337 {
9338 	struct drm_i915_private *dev_priv = dev->dev_private;
9339 	struct drm_crtc *crtc;
9340 	struct intel_crtc *intel_crtc;
9341 
9342 	drm_kms_helper_poll_fini(dev);
9343 	DRM_LOCK(dev);
9344 
9345 #if 0
9346 	intel_unregister_dsm_handler();
9347 #endif
9348 
9349 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9350 		/* Skip inactive CRTCs */
9351 		if (!crtc->fb)
9352 			continue;
9353 
9354 		intel_crtc = to_intel_crtc(crtc);
9355 		intel_increase_pllclock(crtc);
9356 	}
9357 
9358 	intel_disable_fbc(dev);
9359 
9360 	if (IS_IRONLAKE_M(dev))
9361 		ironlake_disable_drps(dev);
9362 	if (IS_GEN6(dev))
9363 		gen6_disable_rps(dev);
9364 
9365 	if (IS_IRONLAKE_M(dev))
9366 		ironlake_disable_rc6(dev);
9367 
9368 	/* Disable the irq before mode object teardown, for the irq might
9369 	 * enqueue unpin/hotplug work. */
9370 	drm_irq_uninstall(dev);
9371 	DRM_UNLOCK(dev);
9372 
9373 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->hotplug_task, NULL))
9374 		taskqueue_drain(dev_priv->tq, &dev_priv->hotplug_task);
9375 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->rps_task, NULL))
9376 		taskqueue_drain(dev_priv->tq, &dev_priv->rps_task);
9377 
9378 	/* Shut off idle work before the crtcs get freed. */
9379 	if (taskqueue_cancel(dev_priv->tq, &dev_priv->idle_task, NULL))
9380 		taskqueue_drain(dev_priv->tq, &dev_priv->idle_task);
9381 
9382 	drm_mode_config_cleanup(dev);
9383 }
9384 
9385 /*
9386  * Return which encoder is currently attached for connector.
9387  */
9388 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
9389 {
9390 	return &intel_attached_encoder(connector)->base;
9391 }
9392 
9393 void intel_connector_attach_encoder(struct intel_connector *connector,
9394 				    struct intel_encoder *encoder)
9395 {
9396 	connector->encoder = encoder;
9397 	drm_mode_connector_attach_encoder(&connector->base,
9398 					  &encoder->base);
9399 }
9400 
9401 /*
9402  * set vga decode state - true == enable VGA decode
9403  */
9404 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
9405 {
9406 	struct drm_i915_private *dev_priv;
9407 	device_t bridge_dev;
9408 	u16 gmch_ctrl;
9409 
9410 	dev_priv = dev->dev_private;
9411 	bridge_dev = intel_gtt_get_bridge_device();
9412 	gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
9413 	if (state)
9414 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
9415 	else
9416 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
9417 	pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
9418 	return (0);
9419 }
9420 
9421 struct intel_display_error_state {
9422 	struct intel_cursor_error_state {
9423 		u32 control;
9424 		u32 position;
9425 		u32 base;
9426 		u32 size;
9427 	} cursor[2];
9428 
9429 	struct intel_pipe_error_state {
9430 		u32 conf;
9431 		u32 source;
9432 
9433 		u32 htotal;
9434 		u32 hblank;
9435 		u32 hsync;
9436 		u32 vtotal;
9437 		u32 vblank;
9438 		u32 vsync;
9439 	} pipe[2];
9440 
9441 	struct intel_plane_error_state {
9442 		u32 control;
9443 		u32 stride;
9444 		u32 size;
9445 		u32 pos;
9446 		u32 addr;
9447 		u32 surface;
9448 		u32 tile_offset;
9449 	} plane[2];
9450 };
9451 
9452 struct intel_display_error_state *
9453 intel_display_capture_error_state(struct drm_device *dev)
9454 {
9455 	drm_i915_private_t *dev_priv = dev->dev_private;
9456 	struct intel_display_error_state *error;
9457 	int i;
9458 
9459 	error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
9460 	if (error == NULL)
9461 		return NULL;
9462 
9463 	for (i = 0; i < 2; i++) {
9464 		error->cursor[i].control = I915_READ(CURCNTR(i));
9465 		error->cursor[i].position = I915_READ(CURPOS(i));
9466 		error->cursor[i].base = I915_READ(CURBASE(i));
9467 
9468 		error->plane[i].control = I915_READ(DSPCNTR(i));
9469 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
9470 		error->plane[i].size = I915_READ(DSPSIZE(i));
9471 		error->plane[i].pos = I915_READ(DSPPOS(i));
9472 		error->plane[i].addr = I915_READ(DSPADDR(i));
9473 		if (INTEL_INFO(dev)->gen >= 4) {
9474 			error->plane[i].surface = I915_READ(DSPSURF(i));
9475 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
9476 		}
9477 
9478 		error->pipe[i].conf = I915_READ(PIPECONF(i));
9479 		error->pipe[i].source = I915_READ(PIPESRC(i));
9480 		error->pipe[i].htotal = I915_READ(HTOTAL(i));
9481 		error->pipe[i].hblank = I915_READ(HBLANK(i));
9482 		error->pipe[i].hsync = I915_READ(HSYNC(i));
9483 		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
9484 		error->pipe[i].vblank = I915_READ(VBLANK(i));
9485 		error->pipe[i].vsync = I915_READ(VSYNC(i));
9486 	}
9487 
9488 	return error;
9489 }
9490 
9491 void
9492 intel_display_print_error_state(struct sbuf *m,
9493 				struct drm_device *dev,
9494 				struct intel_display_error_state *error)
9495 {
9496 	int i;
9497 
9498 	for (i = 0; i < 2; i++) {
9499 		sbuf_printf(m, "Pipe [%d]:\n", i);
9500 		sbuf_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
9501 		sbuf_printf(m, "  SRC: %08x\n", error->pipe[i].source);
9502 		sbuf_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
9503 		sbuf_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
9504 		sbuf_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
9505 		sbuf_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
9506 		sbuf_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
9507 		sbuf_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
9508 
9509 		sbuf_printf(m, "Plane [%d]:\n", i);
9510 		sbuf_printf(m, "  CNTR: %08x\n", error->plane[i].control);
9511 		sbuf_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
9512 		sbuf_printf(m, "  SIZE: %08x\n", error->plane[i].size);
9513 		sbuf_printf(m, "  POS: %08x\n", error->plane[i].pos);
9514 		sbuf_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
9515 		if (INTEL_INFO(dev)->gen >= 4) {
9516 			sbuf_printf(m, "  SURF: %08x\n", error->plane[i].surface);
9517 			sbuf_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
9518 		}
9519 
9520 		sbuf_printf(m, "Cursor [%d]:\n", i);
9521 		sbuf_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
9522 		sbuf_printf(m, "  POS: %08x\n", error->cursor[i].position);
9523 		sbuf_printf(m, "  BASE: %08x\n", error->cursor[i].base);
9524 	}
9525 }
9526