xref: /dragonfly/sys/dev/drm/i915/intel_display.c (revision 548a3528)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  *
26  * $FreeBSD: src/sys/dev/drm2/i915/intel_display.c,v 1.2 2012/05/24 19:13:54 dim Exp $
27  */
28 
29 #include <ddb/ddb.h>
30 #include <sys/limits.h>
31 
32 #include <drm/drmP.h>
33 #include <drm/drm_edid.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include <drm/drm_dp_helper.h>
38 #include <drm/drm_crtc_helper.h>
39 
40 #include <linux/err.h>
41 
42 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
43 
44 bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
45 static void intel_increase_pllclock(struct drm_crtc *crtc);
46 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
47 
48 typedef struct {
49 	/* given values */
50 	int n;
51 	int m1, m2;
52 	int p1, p2;
53 	/* derived values */
54 	int	dot;
55 	int	vco;
56 	int	m;
57 	int	p;
58 } intel_clock_t;
59 
60 typedef struct {
61 	int	min, max;
62 } intel_range_t;
63 
64 typedef struct {
65 	int	dot_limit;
66 	int	p2_slow, p2_fast;
67 } intel_p2_t;
68 
69 #define INTEL_P2_NUM		      2
70 typedef struct intel_limit intel_limit_t;
71 struct intel_limit {
72 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
73 	intel_p2_t	    p2;
74 	bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
75 			int, int, intel_clock_t *, intel_clock_t *);
76 };
77 
78 /* FDI */
79 #define IRONLAKE_FDI_FREQ		2700000 /* in kHz for mode->clock */
80 
81 static bool
82 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
83 		    int target, int refclk, intel_clock_t *match_clock,
84 		    intel_clock_t *best_clock);
85 static bool
86 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
87 			int target, int refclk, intel_clock_t *match_clock,
88 			intel_clock_t *best_clock);
89 
90 static bool
91 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
92 		      int target, int refclk, intel_clock_t *match_clock,
93 		      intel_clock_t *best_clock);
94 static bool
95 intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
96 			   int target, int refclk, intel_clock_t *match_clock,
97 			   intel_clock_t *best_clock);
98 
99 static inline u32 /* units of 100MHz */
100 intel_fdi_link_freq(struct drm_device *dev)
101 {
102 	if (IS_GEN5(dev)) {
103 		struct drm_i915_private *dev_priv = dev->dev_private;
104 		return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
105 	} else
106 		return 27;
107 }
108 
109 static const intel_limit_t intel_limits_i8xx_dvo = {
110 	.dot = { .min = 25000, .max = 350000 },
111 	.vco = { .min = 930000, .max = 1400000 },
112 	.n = { .min = 3, .max = 16 },
113 	.m = { .min = 96, .max = 140 },
114 	.m1 = { .min = 18, .max = 26 },
115 	.m2 = { .min = 6, .max = 16 },
116 	.p = { .min = 4, .max = 128 },
117 	.p1 = { .min = 2, .max = 33 },
118 	.p2 = { .dot_limit = 165000,
119 		.p2_slow = 4, .p2_fast = 2 },
120 	.find_pll = intel_find_best_PLL,
121 };
122 
123 static const intel_limit_t intel_limits_i8xx_lvds = {
124 	.dot = { .min = 25000, .max = 350000 },
125 	.vco = { .min = 930000, .max = 1400000 },
126 	.n = { .min = 3, .max = 16 },
127 	.m = { .min = 96, .max = 140 },
128 	.m1 = { .min = 18, .max = 26 },
129 	.m2 = { .min = 6, .max = 16 },
130 	.p = { .min = 4, .max = 128 },
131 	.p1 = { .min = 1, .max = 6 },
132 	.p2 = { .dot_limit = 165000,
133 		.p2_slow = 14, .p2_fast = 7 },
134 	.find_pll = intel_find_best_PLL,
135 };
136 
137 static const intel_limit_t intel_limits_i9xx_sdvo = {
138 	.dot = { .min = 20000, .max = 400000 },
139 	.vco = { .min = 1400000, .max = 2800000 },
140 	.n = { .min = 1, .max = 6 },
141 	.m = { .min = 70, .max = 120 },
142 	.m1 = { .min = 10, .max = 22 },
143 	.m2 = { .min = 5, .max = 9 },
144 	.p = { .min = 5, .max = 80 },
145 	.p1 = { .min = 1, .max = 8 },
146 	.p2 = { .dot_limit = 200000,
147 		.p2_slow = 10, .p2_fast = 5 },
148 	.find_pll = intel_find_best_PLL,
149 };
150 
151 static const intel_limit_t intel_limits_i9xx_lvds = {
152 	.dot = { .min = 20000, .max = 400000 },
153 	.vco = { .min = 1400000, .max = 2800000 },
154 	.n = { .min = 1, .max = 6 },
155 	.m = { .min = 70, .max = 120 },
156 	.m1 = { .min = 10, .max = 22 },
157 	.m2 = { .min = 5, .max = 9 },
158 	.p = { .min = 7, .max = 98 },
159 	.p1 = { .min = 1, .max = 8 },
160 	.p2 = { .dot_limit = 112000,
161 		.p2_slow = 14, .p2_fast = 7 },
162 	.find_pll = intel_find_best_PLL,
163 };
164 
165 
166 static const intel_limit_t intel_limits_g4x_sdvo = {
167 	.dot = { .min = 25000, .max = 270000 },
168 	.vco = { .min = 1750000, .max = 3500000},
169 	.n = { .min = 1, .max = 4 },
170 	.m = { .min = 104, .max = 138 },
171 	.m1 = { .min = 17, .max = 23 },
172 	.m2 = { .min = 5, .max = 11 },
173 	.p = { .min = 10, .max = 30 },
174 	.p1 = { .min = 1, .max = 3},
175 	.p2 = { .dot_limit = 270000,
176 		.p2_slow = 10,
177 		.p2_fast = 10
178 	},
179 	.find_pll = intel_g4x_find_best_PLL,
180 };
181 
182 static const intel_limit_t intel_limits_g4x_hdmi = {
183 	.dot = { .min = 22000, .max = 400000 },
184 	.vco = { .min = 1750000, .max = 3500000},
185 	.n = { .min = 1, .max = 4 },
186 	.m = { .min = 104, .max = 138 },
187 	.m1 = { .min = 16, .max = 23 },
188 	.m2 = { .min = 5, .max = 11 },
189 	.p = { .min = 5, .max = 80 },
190 	.p1 = { .min = 1, .max = 8},
191 	.p2 = { .dot_limit = 165000,
192 		.p2_slow = 10, .p2_fast = 5 },
193 	.find_pll = intel_g4x_find_best_PLL,
194 };
195 
196 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
197 	.dot = { .min = 20000, .max = 115000 },
198 	.vco = { .min = 1750000, .max = 3500000 },
199 	.n = { .min = 1, .max = 3 },
200 	.m = { .min = 104, .max = 138 },
201 	.m1 = { .min = 17, .max = 23 },
202 	.m2 = { .min = 5, .max = 11 },
203 	.p = { .min = 28, .max = 112 },
204 	.p1 = { .min = 2, .max = 8 },
205 	.p2 = { .dot_limit = 0,
206 		.p2_slow = 14, .p2_fast = 14
207 	},
208 	.find_pll = intel_g4x_find_best_PLL,
209 };
210 
211 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
212 	.dot = { .min = 80000, .max = 224000 },
213 	.vco = { .min = 1750000, .max = 3500000 },
214 	.n = { .min = 1, .max = 3 },
215 	.m = { .min = 104, .max = 138 },
216 	.m1 = { .min = 17, .max = 23 },
217 	.m2 = { .min = 5, .max = 11 },
218 	.p = { .min = 14, .max = 42 },
219 	.p1 = { .min = 2, .max = 6 },
220 	.p2 = { .dot_limit = 0,
221 		.p2_slow = 7, .p2_fast = 7
222 	},
223 	.find_pll = intel_g4x_find_best_PLL,
224 };
225 
226 static const intel_limit_t intel_limits_g4x_display_port = {
227 	.dot = { .min = 161670, .max = 227000 },
228 	.vco = { .min = 1750000, .max = 3500000},
229 	.n = { .min = 1, .max = 2 },
230 	.m = { .min = 97, .max = 108 },
231 	.m1 = { .min = 0x10, .max = 0x12 },
232 	.m2 = { .min = 0x05, .max = 0x06 },
233 	.p = { .min = 10, .max = 20 },
234 	.p1 = { .min = 1, .max = 2},
235 	.p2 = { .dot_limit = 0,
236 		.p2_slow = 10, .p2_fast = 10 },
237 	.find_pll = intel_find_pll_g4x_dp,
238 };
239 
240 static const intel_limit_t intel_limits_pineview_sdvo = {
241 	.dot = { .min = 20000, .max = 400000},
242 	.vco = { .min = 1700000, .max = 3500000 },
243 	/* Pineview's Ncounter is a ring counter */
244 	.n = { .min = 3, .max = 6 },
245 	.m = { .min = 2, .max = 256 },
246 	/* Pineview only has one combined m divider, which we treat as m2. */
247 	.m1 = { .min = 0, .max = 0 },
248 	.m2 = { .min = 0, .max = 254 },
249 	.p = { .min = 5, .max = 80 },
250 	.p1 = { .min = 1, .max = 8 },
251 	.p2 = { .dot_limit = 200000,
252 		.p2_slow = 10, .p2_fast = 5 },
253 	.find_pll = intel_find_best_PLL,
254 };
255 
256 static const intel_limit_t intel_limits_pineview_lvds = {
257 	.dot = { .min = 20000, .max = 400000 },
258 	.vco = { .min = 1700000, .max = 3500000 },
259 	.n = { .min = 3, .max = 6 },
260 	.m = { .min = 2, .max = 256 },
261 	.m1 = { .min = 0, .max = 0 },
262 	.m2 = { .min = 0, .max = 254 },
263 	.p = { .min = 7, .max = 112 },
264 	.p1 = { .min = 1, .max = 8 },
265 	.p2 = { .dot_limit = 112000,
266 		.p2_slow = 14, .p2_fast = 14 },
267 	.find_pll = intel_find_best_PLL,
268 };
269 
270 /* Ironlake / Sandybridge
271  *
272  * We calculate clock using (register_value + 2) for N/M1/M2, so here
273  * the range value for them is (actual_value - 2).
274  */
275 static const intel_limit_t intel_limits_ironlake_dac = {
276 	.dot = { .min = 25000, .max = 350000 },
277 	.vco = { .min = 1760000, .max = 3510000 },
278 	.n = { .min = 1, .max = 5 },
279 	.m = { .min = 79, .max = 127 },
280 	.m1 = { .min = 12, .max = 22 },
281 	.m2 = { .min = 5, .max = 9 },
282 	.p = { .min = 5, .max = 80 },
283 	.p1 = { .min = 1, .max = 8 },
284 	.p2 = { .dot_limit = 225000,
285 		.p2_slow = 10, .p2_fast = 5 },
286 	.find_pll = intel_g4x_find_best_PLL,
287 };
288 
289 static const intel_limit_t intel_limits_ironlake_single_lvds = {
290 	.dot = { .min = 25000, .max = 350000 },
291 	.vco = { .min = 1760000, .max = 3510000 },
292 	.n = { .min = 1, .max = 3 },
293 	.m = { .min = 79, .max = 118 },
294 	.m1 = { .min = 12, .max = 22 },
295 	.m2 = { .min = 5, .max = 9 },
296 	.p = { .min = 28, .max = 112 },
297 	.p1 = { .min = 2, .max = 8 },
298 	.p2 = { .dot_limit = 225000,
299 		.p2_slow = 14, .p2_fast = 14 },
300 	.find_pll = intel_g4x_find_best_PLL,
301 };
302 
303 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
304 	.dot = { .min = 25000, .max = 350000 },
305 	.vco = { .min = 1760000, .max = 3510000 },
306 	.n = { .min = 1, .max = 3 },
307 	.m = { .min = 79, .max = 127 },
308 	.m1 = { .min = 12, .max = 22 },
309 	.m2 = { .min = 5, .max = 9 },
310 	.p = { .min = 14, .max = 56 },
311 	.p1 = { .min = 2, .max = 8 },
312 	.p2 = { .dot_limit = 225000,
313 		.p2_slow = 7, .p2_fast = 7 },
314 	.find_pll = intel_g4x_find_best_PLL,
315 };
316 
317 /* LVDS 100mhz refclk limits. */
318 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
319 	.dot = { .min = 25000, .max = 350000 },
320 	.vco = { .min = 1760000, .max = 3510000 },
321 	.n = { .min = 1, .max = 2 },
322 	.m = { .min = 79, .max = 126 },
323 	.m1 = { .min = 12, .max = 22 },
324 	.m2 = { .min = 5, .max = 9 },
325 	.p = { .min = 28, .max = 112 },
326 	.p1 = { .min = 2, .max = 8 },
327 	.p2 = { .dot_limit = 225000,
328 		.p2_slow = 14, .p2_fast = 14 },
329 	.find_pll = intel_g4x_find_best_PLL,
330 };
331 
332 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
333 	.dot = { .min = 25000, .max = 350000 },
334 	.vco = { .min = 1760000, .max = 3510000 },
335 	.n = { .min = 1, .max = 3 },
336 	.m = { .min = 79, .max = 126 },
337 	.m1 = { .min = 12, .max = 22 },
338 	.m2 = { .min = 5, .max = 9 },
339 	.p = { .min = 14, .max = 42 },
340 	.p1 = { .min = 2, .max = 6 },
341 	.p2 = { .dot_limit = 225000,
342 		.p2_slow = 7, .p2_fast = 7 },
343 	.find_pll = intel_g4x_find_best_PLL,
344 };
345 
346 static const intel_limit_t intel_limits_ironlake_display_port = {
347 	.dot = { .min = 25000, .max = 350000 },
348 	.vco = { .min = 1760000, .max = 3510000},
349 	.n = { .min = 1, .max = 2 },
350 	.m = { .min = 81, .max = 90 },
351 	.m1 = { .min = 12, .max = 22 },
352 	.m2 = { .min = 5, .max = 9 },
353 	.p = { .min = 10, .max = 20 },
354 	.p1 = { .min = 1, .max = 2},
355 	.p2 = { .dot_limit = 0,
356 		.p2_slow = 10, .p2_fast = 10 },
357 	.find_pll = intel_find_pll_ironlake_dp,
358 };
359 
360 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
361 						int refclk)
362 {
363 	struct drm_device *dev = crtc->dev;
364 	struct drm_i915_private *dev_priv = dev->dev_private;
365 	const intel_limit_t *limit;
366 
367 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
368 		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
369 		    LVDS_CLKB_POWER_UP) {
370 			/* LVDS dual channel */
371 			if (refclk == 100000)
372 				limit = &intel_limits_ironlake_dual_lvds_100m;
373 			else
374 				limit = &intel_limits_ironlake_dual_lvds;
375 		} else {
376 			if (refclk == 100000)
377 				limit = &intel_limits_ironlake_single_lvds_100m;
378 			else
379 				limit = &intel_limits_ironlake_single_lvds;
380 		}
381 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
382 			HAS_eDP)
383 		limit = &intel_limits_ironlake_display_port;
384 	else
385 		limit = &intel_limits_ironlake_dac;
386 
387 	return limit;
388 }
389 
390 static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
391 {
392 	struct drm_device *dev = crtc->dev;
393 	struct drm_i915_private *dev_priv = dev->dev_private;
394 	const intel_limit_t *limit;
395 
396 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
397 		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
398 		    LVDS_CLKB_POWER_UP)
399 			/* LVDS with dual channel */
400 			limit = &intel_limits_g4x_dual_channel_lvds;
401 		else
402 			/* LVDS with dual channel */
403 			limit = &intel_limits_g4x_single_channel_lvds;
404 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
405 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
406 		limit = &intel_limits_g4x_hdmi;
407 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
408 		limit = &intel_limits_g4x_sdvo;
409 	} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
410 		limit = &intel_limits_g4x_display_port;
411 	} else /* The option is for other outputs */
412 		limit = &intel_limits_i9xx_sdvo;
413 
414 	return limit;
415 }
416 
417 static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
418 {
419 	struct drm_device *dev = crtc->dev;
420 	const intel_limit_t *limit;
421 
422 	if (HAS_PCH_SPLIT(dev))
423 		limit = intel_ironlake_limit(crtc, refclk);
424 	else if (IS_G4X(dev)) {
425 		limit = intel_g4x_limit(crtc);
426 	} else if (IS_PINEVIEW(dev)) {
427 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
428 			limit = &intel_limits_pineview_lvds;
429 		else
430 			limit = &intel_limits_pineview_sdvo;
431 	} else if (!IS_GEN2(dev)) {
432 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
433 			limit = &intel_limits_i9xx_lvds;
434 		else
435 			limit = &intel_limits_i9xx_sdvo;
436 	} else {
437 		if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
438 			limit = &intel_limits_i8xx_lvds;
439 		else
440 			limit = &intel_limits_i8xx_dvo;
441 	}
442 	return limit;
443 }
444 
445 /* m1 is reserved as 0 in Pineview, n is a ring counter */
446 static void pineview_clock(int refclk, intel_clock_t *clock)
447 {
448 	clock->m = clock->m2 + 2;
449 	clock->p = clock->p1 * clock->p2;
450 	clock->vco = refclk * clock->m / clock->n;
451 	clock->dot = clock->vco / clock->p;
452 }
453 
454 static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
455 {
456 	if (IS_PINEVIEW(dev)) {
457 		pineview_clock(refclk, clock);
458 		return;
459 	}
460 	clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
461 	clock->p = clock->p1 * clock->p2;
462 	clock->vco = refclk * clock->m / (clock->n + 2);
463 	clock->dot = clock->vco / clock->p;
464 }
465 
466 /**
467  * Returns whether any output on the specified pipe is of the specified type
468  */
469 bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
470 {
471 	struct drm_device *dev = crtc->dev;
472 	struct drm_mode_config *mode_config = &dev->mode_config;
473 	struct intel_encoder *encoder;
474 
475 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
476 		if (encoder->base.crtc == crtc && encoder->type == type)
477 			return true;
478 
479 	return false;
480 }
481 
482 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
483 /**
484  * Returns whether the given set of divisors are valid for a given refclk with
485  * the given connectors.
486  */
487 
488 static bool intel_PLL_is_valid(struct drm_device *dev,
489 			       const intel_limit_t *limit,
490 			       const intel_clock_t *clock)
491 {
492 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
493 		INTELPllInvalid("p1 out of range\n");
494 	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
495 		INTELPllInvalid("p out of range\n");
496 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
497 		INTELPllInvalid("m2 out of range\n");
498 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
499 		INTELPllInvalid("m1 out of range\n");
500 	if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
501 		INTELPllInvalid("m1 <= m2\n");
502 	if (clock->m   < limit->m.min   || limit->m.max   < clock->m)
503 		INTELPllInvalid("m out of range\n");
504 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
505 		INTELPllInvalid("n out of range\n");
506 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
507 		INTELPllInvalid("vco out of range\n");
508 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
509 	 * connector, etc., rather than just a single range.
510 	 */
511 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
512 		INTELPllInvalid("dot out of range\n");
513 
514 	return true;
515 }
516 
517 static bool
518 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
519 		    int target, int refclk, intel_clock_t *match_clock,
520 		    intel_clock_t *best_clock)
521 
522 {
523 	struct drm_device *dev = crtc->dev;
524 	struct drm_i915_private *dev_priv = dev->dev_private;
525 	intel_clock_t clock;
526 	int err = target;
527 
528 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
529 	    (I915_READ(LVDS)) != 0) {
530 		/*
531 		 * For LVDS, if the panel is on, just rely on its current
532 		 * settings for dual-channel.  We haven't figured out how to
533 		 * reliably set up different single/dual channel state, if we
534 		 * even can.
535 		 */
536 		if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
537 		    LVDS_CLKB_POWER_UP)
538 			clock.p2 = limit->p2.p2_fast;
539 		else
540 			clock.p2 = limit->p2.p2_slow;
541 	} else {
542 		if (target < limit->p2.dot_limit)
543 			clock.p2 = limit->p2.p2_slow;
544 		else
545 			clock.p2 = limit->p2.p2_fast;
546 	}
547 
548 	memset(best_clock, 0, sizeof(*best_clock));
549 
550 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
551 	     clock.m1++) {
552 		for (clock.m2 = limit->m2.min;
553 		     clock.m2 <= limit->m2.max; clock.m2++) {
554 			/* m1 is always 0 in Pineview */
555 			if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
556 				break;
557 			for (clock.n = limit->n.min;
558 			     clock.n <= limit->n.max; clock.n++) {
559 				for (clock.p1 = limit->p1.min;
560 					clock.p1 <= limit->p1.max; clock.p1++) {
561 					int this_err;
562 
563 					intel_clock(dev, refclk, &clock);
564 					if (!intel_PLL_is_valid(dev, limit,
565 								&clock))
566 						continue;
567 					if (match_clock &&
568 					    clock.p != match_clock->p)
569 						continue;
570 
571 					this_err = abs(clock.dot - target);
572 					if (this_err < err) {
573 						*best_clock = clock;
574 						err = this_err;
575 					}
576 				}
577 			}
578 		}
579 	}
580 
581 	return (err != target);
582 }
583 
584 static bool
585 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
586 			int target, int refclk, intel_clock_t *match_clock,
587 			intel_clock_t *best_clock)
588 {
589 	struct drm_device *dev = crtc->dev;
590 	struct drm_i915_private *dev_priv = dev->dev_private;
591 	intel_clock_t clock;
592 	int max_n;
593 	bool found;
594 	/* approximately equals target * 0.00585 */
595 	int err_most = (target >> 8) + (target >> 9);
596 	found = false;
597 
598 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
599 		int lvds_reg;
600 
601 		if (HAS_PCH_SPLIT(dev))
602 			lvds_reg = PCH_LVDS;
603 		else
604 			lvds_reg = LVDS;
605 		if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
606 		    LVDS_CLKB_POWER_UP)
607 			clock.p2 = limit->p2.p2_fast;
608 		else
609 			clock.p2 = limit->p2.p2_slow;
610 	} else {
611 		if (target < limit->p2.dot_limit)
612 			clock.p2 = limit->p2.p2_slow;
613 		else
614 			clock.p2 = limit->p2.p2_fast;
615 	}
616 
617 	memset(best_clock, 0, sizeof(*best_clock));
618 	max_n = limit->n.max;
619 	/* based on hardware requirement, prefer smaller n to precision */
620 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
621 		/* based on hardware requirement, prefere larger m1,m2 */
622 		for (clock.m1 = limit->m1.max;
623 		     clock.m1 >= limit->m1.min; clock.m1--) {
624 			for (clock.m2 = limit->m2.max;
625 			     clock.m2 >= limit->m2.min; clock.m2--) {
626 				for (clock.p1 = limit->p1.max;
627 				     clock.p1 >= limit->p1.min; clock.p1--) {
628 					int this_err;
629 
630 					intel_clock(dev, refclk, &clock);
631 					if (!intel_PLL_is_valid(dev, limit,
632 								&clock))
633 						continue;
634 					if (match_clock &&
635 					    clock.p != match_clock->p)
636 						continue;
637 
638 					this_err = abs(clock.dot - target);
639 					if (this_err < err_most) {
640 						*best_clock = clock;
641 						err_most = this_err;
642 						max_n = clock.n;
643 						found = true;
644 					}
645 				}
646 			}
647 		}
648 	}
649 	return found;
650 }
651 
652 static bool
653 intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
654 			   int target, int refclk, intel_clock_t *match_clock,
655 			   intel_clock_t *best_clock)
656 {
657 	struct drm_device *dev = crtc->dev;
658 	intel_clock_t clock;
659 
660 	if (target < 200000) {
661 		clock.n = 1;
662 		clock.p1 = 2;
663 		clock.p2 = 10;
664 		clock.m1 = 12;
665 		clock.m2 = 9;
666 	} else {
667 		clock.n = 2;
668 		clock.p1 = 1;
669 		clock.p2 = 10;
670 		clock.m1 = 14;
671 		clock.m2 = 8;
672 	}
673 	intel_clock(dev, refclk, &clock);
674 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
675 	return true;
676 }
677 
678 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
679 static bool
680 intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
681 		      int target, int refclk, intel_clock_t *match_clock,
682 		      intel_clock_t *best_clock)
683 {
684 	intel_clock_t clock;
685 	if (target < 200000) {
686 		clock.p1 = 2;
687 		clock.p2 = 10;
688 		clock.n = 2;
689 		clock.m1 = 23;
690 		clock.m2 = 8;
691 	} else {
692 		clock.p1 = 1;
693 		clock.p2 = 10;
694 		clock.n = 1;
695 		clock.m1 = 14;
696 		clock.m2 = 2;
697 	}
698 	clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
699 	clock.p = (clock.p1 * clock.p2);
700 	clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
701 	clock.vco = 0;
702 	memcpy(best_clock, &clock, sizeof(intel_clock_t));
703 	return true;
704 }
705 
706 /**
707  * intel_wait_for_vblank - wait for vblank on a given pipe
708  * @dev: drm device
709  * @pipe: pipe to wait for
710  *
711  * Wait for vblank to occur on a given pipe.  Needed for various bits of
712  * mode setting code.
713  */
714 void intel_wait_for_vblank(struct drm_device *dev, int pipe)
715 {
716 	struct drm_i915_private *dev_priv = dev->dev_private;
717 	int pipestat_reg = PIPESTAT(pipe);
718 
719 	/* Clear existing vblank status. Note this will clear any other
720 	 * sticky status fields as well.
721 	 *
722 	 * This races with i915_driver_irq_handler() with the result
723 	 * that either function could miss a vblank event.  Here it is not
724 	 * fatal, as we will either wait upon the next vblank interrupt or
725 	 * timeout.  Generally speaking intel_wait_for_vblank() is only
726 	 * called during modeset at which time the GPU should be idle and
727 	 * should *not* be performing page flips and thus not waiting on
728 	 * vblanks...
729 	 * Currently, the result of us stealing a vblank from the irq
730 	 * handler is that a single frame will be skipped during swapbuffers.
731 	 */
732 	I915_WRITE(pipestat_reg,
733 		   I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
734 
735 	/* Wait for vblank interrupt bit to set */
736 	if (_intel_wait_for(dev,
737 	    I915_READ(pipestat_reg) & PIPE_VBLANK_INTERRUPT_STATUS,
738 	    50, 1, "915vbl"))
739 		DRM_DEBUG_KMS("vblank wait timed out\n");
740 }
741 
742 /*
743  * intel_wait_for_pipe_off - wait for pipe to turn off
744  * @dev: drm device
745  * @pipe: pipe to wait for
746  *
747  * After disabling a pipe, we can't wait for vblank in the usual way,
748  * spinning on the vblank interrupt status bit, since we won't actually
749  * see an interrupt when the pipe is disabled.
750  *
751  * On Gen4 and above:
752  *   wait for the pipe register state bit to turn off
753  *
754  * Otherwise:
755  *   wait for the display line value to settle (it usually
756  *   ends up stopping at the start of the next frame).
757  *
758  */
759 void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
760 {
761 	struct drm_i915_private *dev_priv = dev->dev_private;
762 
763 	if (INTEL_INFO(dev)->gen >= 4) {
764 		int reg = PIPECONF(pipe);
765 
766 		/* Wait for the Pipe State to go off */
767 		if (_intel_wait_for(dev,
768 		    (I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 100,
769 		    1, "915pip"))
770 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
771 	} else {
772 		u32 last_line, line_mask;
773 		int reg = PIPEDSL(pipe);
774 		unsigned long timeout = jiffies + msecs_to_jiffies(100);
775 
776 		if (IS_GEN2(dev))
777 			line_mask = DSL_LINEMASK_GEN2;
778 		else
779 			line_mask = DSL_LINEMASK_GEN3;
780 
781 		/* Wait for the display line to settle */
782 		do {
783 			last_line = I915_READ(reg) & line_mask;
784 			DELAY(5000);
785 		} while (((I915_READ(reg) & line_mask) != last_line) &&
786 			 time_after(timeout, jiffies));
787 		if (time_after(jiffies, timeout))
788 			DRM_DEBUG_KMS("pipe_off wait timed out\n");
789 	}
790 }
791 
792 static const char *state_string(bool enabled)
793 {
794 	return enabled ? "on" : "off";
795 }
796 
797 /* Only for pre-ILK configs */
798 static void assert_pll(struct drm_i915_private *dev_priv,
799 		       enum i915_pipe pipe, bool state)
800 {
801 	int reg;
802 	u32 val;
803 	bool cur_state;
804 
805 	reg = DPLL(pipe);
806 	val = I915_READ(reg);
807 	cur_state = !!(val & DPLL_VCO_ENABLE);
808 	if (cur_state != state)
809 		kprintf("PLL state assertion failure (expected %s, current %s)\n",
810 		    state_string(state), state_string(cur_state));
811 }
812 #define assert_pll_enabled(d, p) assert_pll(d, p, true)
813 #define assert_pll_disabled(d, p) assert_pll(d, p, false)
814 
815 /* For ILK+ */
816 static void assert_pch_pll(struct drm_i915_private *dev_priv,
817 			   enum i915_pipe pipe, bool state)
818 {
819 	int reg;
820 	u32 val;
821 	bool cur_state;
822 
823 	if (HAS_PCH_CPT(dev_priv->dev)) {
824 		u32 pch_dpll;
825 
826 		pch_dpll = I915_READ(PCH_DPLL_SEL);
827 
828 		/* Make sure the selected PLL is enabled to the transcoder */
829 		KASSERT(((pch_dpll >> (4 * pipe)) & 8) != 0,
830 		    ("transcoder %d PLL not enabled\n", pipe));
831 
832 		/* Convert the transcoder pipe number to a pll pipe number */
833 		pipe = (pch_dpll >> (4 * pipe)) & 1;
834 	}
835 
836 	reg = _PCH_DPLL(pipe);
837 	val = I915_READ(reg);
838 	cur_state = !!(val & DPLL_VCO_ENABLE);
839 	if (cur_state != state)
840 		kprintf("PCH PLL state assertion failure (expected %s, current %s)\n",
841 		    state_string(state), state_string(cur_state));
842 }
843 #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
844 #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
845 
846 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
847 			  enum i915_pipe pipe, bool state)
848 {
849 	int reg;
850 	u32 val;
851 	bool cur_state;
852 
853 	reg = FDI_TX_CTL(pipe);
854 	val = I915_READ(reg);
855 	cur_state = !!(val & FDI_TX_ENABLE);
856 	if (cur_state != state)
857 		kprintf("FDI TX state assertion failure (expected %s, current %s)\n",
858 		    state_string(state), state_string(cur_state));
859 }
860 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
861 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
862 
863 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
864 			  enum i915_pipe pipe, bool state)
865 {
866 	int reg;
867 	u32 val;
868 	bool cur_state;
869 
870 	reg = FDI_RX_CTL(pipe);
871 	val = I915_READ(reg);
872 	cur_state = !!(val & FDI_RX_ENABLE);
873 	if (cur_state != state)
874 		kprintf("FDI RX state assertion failure (expected %s, current %s)\n",
875 		    state_string(state), state_string(cur_state));
876 }
877 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
878 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
879 
880 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
881 				      enum i915_pipe pipe)
882 {
883 	int reg;
884 	u32 val;
885 
886 	/* ILK FDI PLL is always enabled */
887 	if (dev_priv->info->gen == 5)
888 		return;
889 
890 	reg = FDI_TX_CTL(pipe);
891 	val = I915_READ(reg);
892 	if (!(val & FDI_TX_PLL_ENABLE))
893 		kprintf("FDI TX PLL assertion failure, should be active but is disabled\n");
894 }
895 
896 static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
897 				      enum i915_pipe pipe)
898 {
899 	int reg;
900 	u32 val;
901 
902 	reg = FDI_RX_CTL(pipe);
903 	val = I915_READ(reg);
904 	if (!(val & FDI_RX_PLL_ENABLE))
905 		kprintf("FDI RX PLL assertion failure, should be active but is disabled\n");
906 }
907 
908 static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
909 				  enum i915_pipe pipe)
910 {
911 	int pp_reg, lvds_reg;
912 	u32 val;
913 	enum i915_pipe panel_pipe = PIPE_A;
914 	bool locked = true;
915 
916 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
917 		pp_reg = PCH_PP_CONTROL;
918 		lvds_reg = PCH_LVDS;
919 	} else {
920 		pp_reg = PP_CONTROL;
921 		lvds_reg = LVDS;
922 	}
923 
924 	val = I915_READ(pp_reg);
925 	if (!(val & PANEL_POWER_ON) ||
926 	    ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
927 		locked = false;
928 
929 	if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
930 		panel_pipe = PIPE_B;
931 
932 	if (panel_pipe == pipe && locked)
933 		kprintf("panel assertion failure, pipe %c regs locked\n",
934 	     pipe_name(pipe));
935 }
936 
937 void assert_pipe(struct drm_i915_private *dev_priv,
938 		 enum i915_pipe pipe, bool state)
939 {
940 	int reg;
941 	u32 val;
942 	bool cur_state;
943 
944 	/* if we need the pipe A quirk it must be always on */
945 	if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
946 		state = true;
947 
948 	reg = PIPECONF(pipe);
949 	val = I915_READ(reg);
950 	cur_state = !!(val & PIPECONF_ENABLE);
951 	if (cur_state != state)
952 		kprintf("pipe %c assertion failure (expected %s, current %s)\n",
953 		    pipe_name(pipe), state_string(state), state_string(cur_state));
954 }
955 
956 static void assert_plane(struct drm_i915_private *dev_priv,
957 			 enum plane plane, bool state)
958 {
959 	int reg;
960 	u32 val;
961 	bool cur_state;
962 
963 	reg = DSPCNTR(plane);
964 	val = I915_READ(reg);
965 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
966 	if (cur_state != state)
967 		kprintf("plane %c assertion failure, (expected %s, current %s)\n",
968 		       plane_name(plane), state_string(state), state_string(cur_state));
969 }
970 
971 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
972 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
973 
974 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
975 				   enum i915_pipe pipe)
976 {
977 	int reg, i;
978 	u32 val;
979 	int cur_pipe;
980 
981 	/* Planes are fixed to pipes on ILK+ */
982 	if (HAS_PCH_SPLIT(dev_priv->dev)) {
983 		reg = DSPCNTR(pipe);
984 		val = I915_READ(reg);
985 		if ((val & DISPLAY_PLANE_ENABLE) != 0)
986 			kprintf("plane %c assertion failure, should be disabled but not\n",
987 			       plane_name(pipe));
988 		return;
989 	}
990 
991 	/* Need to check both planes against the pipe */
992 	for (i = 0; i < 2; i++) {
993 		reg = DSPCNTR(i);
994 		val = I915_READ(reg);
995 		cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
996 			DISPPLANE_SEL_PIPE_SHIFT;
997 		if ((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe)
998 			kprintf("plane %c assertion failure, should be off on pipe %c but is still active\n",
999 		     plane_name(i), pipe_name(pipe));
1000 	}
1001 }
1002 
1003 static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1004 {
1005 	u32 val;
1006 	bool enabled;
1007 
1008 	val = I915_READ(PCH_DREF_CONTROL);
1009 	enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1010 			    DREF_SUPERSPREAD_SOURCE_MASK));
1011 	if (!enabled)
1012 		kprintf("PCH refclk assertion failure, should be active but is disabled\n");
1013 }
1014 
1015 static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1016 				       enum i915_pipe pipe)
1017 {
1018 	int reg;
1019 	u32 val;
1020 	bool enabled;
1021 
1022 	reg = TRANSCONF(pipe);
1023 	val = I915_READ(reg);
1024 	enabled = !!(val & TRANS_ENABLE);
1025 	if (enabled)
1026 		kprintf("transcoder assertion failed, should be off on pipe %c but is still active\n",
1027 	     pipe_name(pipe));
1028 }
1029 
1030 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1031 			      enum i915_pipe pipe, u32 val)
1032 {
1033 	if ((val & PORT_ENABLE) == 0)
1034 		return false;
1035 
1036 	if (HAS_PCH_CPT(dev_priv->dev)) {
1037 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1038 			return false;
1039 	} else {
1040 		if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1041 			return false;
1042 	}
1043 	return true;
1044 }
1045 
1046 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1047 			      enum i915_pipe pipe, u32 val)
1048 {
1049 	if ((val & LVDS_PORT_EN) == 0)
1050 		return false;
1051 
1052 	if (HAS_PCH_CPT(dev_priv->dev)) {
1053 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1054 			return false;
1055 	} else {
1056 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1057 			return false;
1058 	}
1059 	return true;
1060 }
1061 
1062 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1063 			      enum i915_pipe pipe, u32 val)
1064 {
1065 	if ((val & ADPA_DAC_ENABLE) == 0)
1066 		return false;
1067 	if (HAS_PCH_CPT(dev_priv->dev)) {
1068 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1069 			return false;
1070 	} else {
1071 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1072 			return false;
1073 	}
1074 	return true;
1075 }
1076 
1077 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1078 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1079 {
1080 	if ((val & DP_PORT_EN) == 0)
1081 		return false;
1082 
1083 	if (HAS_PCH_CPT(dev_priv->dev)) {
1084 		u32	trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1085 		u32	trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1086 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1087 			return false;
1088 	} else {
1089 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1090 			return false;
1091 	}
1092 	return true;
1093 }
1094 
1095 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1096 				   enum i915_pipe pipe, int reg, u32 port_sel)
1097 {
1098 	u32 val = I915_READ(reg);
1099 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val))
1100 		kprintf("PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1101 	     reg, pipe_name(pipe));
1102 }
1103 
1104 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1105 				     enum i915_pipe pipe, int reg)
1106 {
1107 	u32 val = I915_READ(reg);
1108 	if (hdmi_pipe_enabled(dev_priv, val, pipe))
1109 		kprintf("PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1110 	     reg, pipe_name(pipe));
1111 }
1112 
1113 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1114 				      enum i915_pipe pipe)
1115 {
1116 	int reg;
1117 	u32 val;
1118 
1119 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1120 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1121 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1122 
1123 	reg = PCH_ADPA;
1124 	val = I915_READ(reg);
1125 	if (adpa_pipe_enabled(dev_priv, val, pipe))
1126 		kprintf("PCH VGA enabled on transcoder %c, should be disabled\n",
1127 	     pipe_name(pipe));
1128 
1129 	reg = PCH_LVDS;
1130 	val = I915_READ(reg);
1131 	if (lvds_pipe_enabled(dev_priv, val, pipe))
1132 		kprintf("PCH LVDS enabled on transcoder %c, should be disabled\n",
1133 	     pipe_name(pipe));
1134 
1135 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1136 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1137 	assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1138 }
1139 
1140 /**
1141  * intel_enable_pll - enable a PLL
1142  * @dev_priv: i915 private structure
1143  * @pipe: pipe PLL to enable
1144  *
1145  * Enable @pipe's PLL so we can start pumping pixels from a plane.  Check to
1146  * make sure the PLL reg is writable first though, since the panel write
1147  * protect mechanism may be enabled.
1148  *
1149  * Note!  This is for pre-ILK only.
1150  */
1151 static void intel_enable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1152 {
1153 	int reg;
1154 	u32 val;
1155 
1156 	/* No really, not for ILK+ */
1157 	KASSERT(dev_priv->info->gen < 5, ("Wrong device gen"));
1158 
1159 	/* PLL is protected by panel, make sure we can write it */
1160 	if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1161 		assert_panel_unlocked(dev_priv, pipe);
1162 
1163 	reg = DPLL(pipe);
1164 	val = I915_READ(reg);
1165 	val |= DPLL_VCO_ENABLE;
1166 
1167 	/* We do this three times for luck */
1168 	I915_WRITE(reg, val);
1169 	POSTING_READ(reg);
1170 	DELAY(150); /* wait for warmup */
1171 	I915_WRITE(reg, val);
1172 	POSTING_READ(reg);
1173 	DELAY(150); /* wait for warmup */
1174 	I915_WRITE(reg, val);
1175 	POSTING_READ(reg);
1176 	DELAY(150); /* wait for warmup */
1177 }
1178 
1179 /**
1180  * intel_disable_pll - disable a PLL
1181  * @dev_priv: i915 private structure
1182  * @pipe: pipe PLL to disable
1183  *
1184  * Disable the PLL for @pipe, making sure the pipe is off first.
1185  *
1186  * Note!  This is for pre-ILK only.
1187  */
1188 static void intel_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1189 {
1190 	int reg;
1191 	u32 val;
1192 
1193 	/* Don't disable pipe A or pipe A PLLs if needed */
1194 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1195 		return;
1196 
1197 	/* Make sure the pipe isn't still relying on us */
1198 	assert_pipe_disabled(dev_priv, pipe);
1199 
1200 	reg = DPLL(pipe);
1201 	val = I915_READ(reg);
1202 	val &= ~DPLL_VCO_ENABLE;
1203 	I915_WRITE(reg, val);
1204 	POSTING_READ(reg);
1205 }
1206 
1207 /**
1208  * intel_enable_pch_pll - enable PCH PLL
1209  * @dev_priv: i915 private structure
1210  * @pipe: pipe PLL to enable
1211  *
1212  * The PCH PLL needs to be enabled before the PCH transcoder, since it
1213  * drives the transcoder clock.
1214  */
1215 static void intel_enable_pch_pll(struct drm_i915_private *dev_priv,
1216 				 enum i915_pipe pipe)
1217 {
1218 	int reg;
1219 	u32 val;
1220 
1221 	if (pipe > 1)
1222 		return;
1223 
1224 	/* PCH only available on ILK+ */
1225 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1226 
1227 	/* PCH refclock must be enabled first */
1228 	assert_pch_refclk_enabled(dev_priv);
1229 
1230 	reg = _PCH_DPLL(pipe);
1231 	val = I915_READ(reg);
1232 	val |= DPLL_VCO_ENABLE;
1233 	I915_WRITE(reg, val);
1234 	POSTING_READ(reg);
1235 	DELAY(200);
1236 }
1237 
1238 static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
1239 				  enum i915_pipe pipe)
1240 {
1241 	int reg;
1242 	u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
1243 		pll_sel = TRANSC_DPLL_ENABLE;
1244 
1245 	if (pipe > 1)
1246 		return;
1247 
1248 	/* PCH only available on ILK+ */
1249 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1250 
1251 	/* Make sure transcoder isn't still depending on us */
1252 	assert_transcoder_disabled(dev_priv, pipe);
1253 
1254 	if (pipe == 0)
1255 		pll_sel |= TRANSC_DPLLA_SEL;
1256 	else if (pipe == 1)
1257 		pll_sel |= TRANSC_DPLLB_SEL;
1258 
1259 
1260 	if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
1261 		return;
1262 
1263 	reg = _PCH_DPLL(pipe);
1264 	val = I915_READ(reg);
1265 	val &= ~DPLL_VCO_ENABLE;
1266 	I915_WRITE(reg, val);
1267 	POSTING_READ(reg);
1268 	DELAY(200);
1269 }
1270 
1271 static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1272 				    enum i915_pipe pipe)
1273 {
1274 	int reg;
1275 	u32 val, pipeconf_val;
1276 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1277 
1278 	/* PCH only available on ILK+ */
1279 	KASSERT(dev_priv->info->gen >= 5, ("Wrong device gen"));
1280 
1281 	/* Make sure PCH DPLL is enabled */
1282 	assert_pch_pll_enabled(dev_priv, pipe);
1283 
1284 	/* FDI must be feeding us bits for PCH ports */
1285 	assert_fdi_tx_enabled(dev_priv, pipe);
1286 	assert_fdi_rx_enabled(dev_priv, pipe);
1287 
1288 
1289 	reg = TRANSCONF(pipe);
1290 	val = I915_READ(reg);
1291 	pipeconf_val = I915_READ(PIPECONF(pipe));
1292 
1293 	if (HAS_PCH_IBX(dev_priv->dev)) {
1294 		/*
1295 		 * make the BPC in transcoder be consistent with
1296 		 * that in pipeconf reg.
1297 		 */
1298 		val &= ~PIPE_BPC_MASK;
1299 		val |= pipeconf_val & PIPE_BPC_MASK;
1300 	}
1301 
1302 	val &= ~TRANS_INTERLACE_MASK;
1303 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1304 		if (HAS_PCH_IBX(dev_priv->dev) &&
1305 		    intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1306 			val |= TRANS_LEGACY_INTERLACED_ILK;
1307 		else
1308 			val |= TRANS_INTERLACED;
1309 	else
1310 		val |= TRANS_PROGRESSIVE;
1311 
1312 	I915_WRITE(reg, val | TRANS_ENABLE);
1313 	if (_intel_wait_for(dev_priv->dev, I915_READ(reg) & TRANS_STATE_ENABLE,
1314 	    100, 1, "915trc"))
1315 		DRM_ERROR("failed to enable transcoder %d\n", pipe);
1316 }
1317 
1318 static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1319 				     enum i915_pipe pipe)
1320 {
1321 	int reg;
1322 	u32 val;
1323 
1324 	/* FDI relies on the transcoder */
1325 	assert_fdi_tx_disabled(dev_priv, pipe);
1326 	assert_fdi_rx_disabled(dev_priv, pipe);
1327 
1328 	/* Ports must be off as well */
1329 	assert_pch_ports_disabled(dev_priv, pipe);
1330 
1331 	reg = TRANSCONF(pipe);
1332 	val = I915_READ(reg);
1333 	val &= ~TRANS_ENABLE;
1334 	I915_WRITE(reg, val);
1335 	/* wait for PCH transcoder off, transcoder state */
1336 	if (_intel_wait_for(dev_priv->dev,
1337 	    (I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50,
1338 	    1, "915trd"))
1339 		DRM_ERROR("failed to disable transcoder %d\n", pipe);
1340 }
1341 
1342 /**
1343  * intel_enable_pipe - enable a pipe, asserting requirements
1344  * @dev_priv: i915 private structure
1345  * @pipe: pipe to enable
1346  * @pch_port: on ILK+, is this pipe driving a PCH port or not
1347  *
1348  * Enable @pipe, making sure that various hardware specific requirements
1349  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1350  *
1351  * @pipe should be %PIPE_A or %PIPE_B.
1352  *
1353  * Will wait until the pipe is actually running (i.e. first vblank) before
1354  * returning.
1355  */
1356 static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
1357 			      bool pch_port)
1358 {
1359 	int reg;
1360 	u32 val;
1361 
1362 	/*
1363 	 * A pipe without a PLL won't actually be able to drive bits from
1364 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1365 	 * need the check.
1366 	 */
1367 	if (!HAS_PCH_SPLIT(dev_priv->dev))
1368 		assert_pll_enabled(dev_priv, pipe);
1369 	else {
1370 		if (pch_port) {
1371 			/* if driving the PCH, we need FDI enabled */
1372 			assert_fdi_rx_pll_enabled(dev_priv, pipe);
1373 			assert_fdi_tx_pll_enabled(dev_priv, pipe);
1374 		}
1375 		/* FIXME: assert CPU port conditions for SNB+ */
1376 	}
1377 
1378 	reg = PIPECONF(pipe);
1379 	val = I915_READ(reg);
1380 	if (val & PIPECONF_ENABLE)
1381 		return;
1382 
1383 	I915_WRITE(reg, val | PIPECONF_ENABLE);
1384 	intel_wait_for_vblank(dev_priv->dev, pipe);
1385 }
1386 
1387 /**
1388  * intel_disable_pipe - disable a pipe, asserting requirements
1389  * @dev_priv: i915 private structure
1390  * @pipe: pipe to disable
1391  *
1392  * Disable @pipe, making sure that various hardware specific requirements
1393  * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1394  *
1395  * @pipe should be %PIPE_A or %PIPE_B.
1396  *
1397  * Will wait until the pipe has shut down before returning.
1398  */
1399 static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1400 			       enum i915_pipe pipe)
1401 {
1402 	int reg;
1403 	u32 val;
1404 
1405 	/*
1406 	 * Make sure planes won't keep trying to pump pixels to us,
1407 	 * or we might hang the display.
1408 	 */
1409 	assert_planes_disabled(dev_priv, pipe);
1410 
1411 	/* Don't disable pipe A or pipe A PLLs if needed */
1412 	if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1413 		return;
1414 
1415 	reg = PIPECONF(pipe);
1416 	val = I915_READ(reg);
1417 	if ((val & PIPECONF_ENABLE) == 0)
1418 		return;
1419 
1420 	I915_WRITE(reg, val & ~PIPECONF_ENABLE);
1421 	intel_wait_for_pipe_off(dev_priv->dev, pipe);
1422 }
1423 
1424 /*
1425  * Plane regs are double buffered, going from enabled->disabled needs a
1426  * trigger in order to latch.  The display address reg provides this.
1427  */
1428 void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1429 				      enum plane plane)
1430 {
1431 	I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1432 	I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1433 }
1434 
1435 /**
1436  * intel_enable_plane - enable a display plane on a given pipe
1437  * @dev_priv: i915 private structure
1438  * @plane: plane to enable
1439  * @pipe: pipe being fed
1440  *
1441  * Enable @plane on @pipe, making sure that @pipe is running first.
1442  */
1443 static void intel_enable_plane(struct drm_i915_private *dev_priv,
1444 			       enum plane plane, enum i915_pipe pipe)
1445 {
1446 	int reg;
1447 	u32 val;
1448 
1449 	/* If the pipe isn't enabled, we can't pump pixels and may hang */
1450 	assert_pipe_enabled(dev_priv, pipe);
1451 
1452 	reg = DSPCNTR(plane);
1453 	val = I915_READ(reg);
1454 	if (val & DISPLAY_PLANE_ENABLE)
1455 		return;
1456 
1457 	I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
1458 	intel_flush_display_plane(dev_priv, plane);
1459 	intel_wait_for_vblank(dev_priv->dev, pipe);
1460 }
1461 
1462 /**
1463  * intel_disable_plane - disable a display plane
1464  * @dev_priv: i915 private structure
1465  * @plane: plane to disable
1466  * @pipe: pipe consuming the data
1467  *
1468  * Disable @plane; should be an independent operation.
1469  */
1470 static void intel_disable_plane(struct drm_i915_private *dev_priv,
1471 				enum plane plane, enum i915_pipe pipe)
1472 {
1473 	int reg;
1474 	u32 val;
1475 
1476 	reg = DSPCNTR(plane);
1477 	val = I915_READ(reg);
1478 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
1479 		return;
1480 
1481 	I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
1482 	intel_flush_display_plane(dev_priv, plane);
1483 	intel_wait_for_vblank(dev_priv->dev, pipe);
1484 }
1485 
1486 static void disable_pch_dp(struct drm_i915_private *dev_priv,
1487 			   enum i915_pipe pipe, int reg, u32 port_sel)
1488 {
1489 	u32 val = I915_READ(reg);
1490 	if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
1491 		DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
1492 		I915_WRITE(reg, val & ~DP_PORT_EN);
1493 	}
1494 }
1495 
1496 static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1497 			     enum i915_pipe pipe, int reg)
1498 {
1499 	u32 val = I915_READ(reg);
1500 	if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
1501 		DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1502 			      reg, pipe);
1503 		I915_WRITE(reg, val & ~PORT_ENABLE);
1504 	}
1505 }
1506 
1507 /* Disable any ports connected to this transcoder */
1508 static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1509 				    enum i915_pipe pipe)
1510 {
1511 	u32 reg, val;
1512 
1513 	val = I915_READ(PCH_PP_CONTROL);
1514 	I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1515 
1516 	disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1517 	disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1518 	disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1519 
1520 	reg = PCH_ADPA;
1521 	val = I915_READ(reg);
1522 	if (adpa_pipe_enabled(dev_priv, val, pipe))
1523 		I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1524 
1525 	reg = PCH_LVDS;
1526 	val = I915_READ(reg);
1527 	if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1528 		DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
1529 		I915_WRITE(reg, val & ~LVDS_PORT_EN);
1530 		POSTING_READ(reg);
1531 		DELAY(100);
1532 	}
1533 
1534 	disable_pch_hdmi(dev_priv, pipe, HDMIB);
1535 	disable_pch_hdmi(dev_priv, pipe, HDMIC);
1536 	disable_pch_hdmi(dev_priv, pipe, HDMID);
1537 }
1538 
1539 int
1540 intel_pin_and_fence_fb_obj(struct drm_device *dev,
1541 			   struct drm_i915_gem_object *obj,
1542 			   struct intel_ring_buffer *pipelined)
1543 {
1544 	struct drm_i915_private *dev_priv = dev->dev_private;
1545 	u32 alignment;
1546 	int ret;
1547 
1548 	alignment = 0; /* shut gcc */
1549 	switch (obj->tiling_mode) {
1550 	case I915_TILING_NONE:
1551 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1552 			alignment = 128 * 1024;
1553 		else if (INTEL_INFO(dev)->gen >= 4)
1554 			alignment = 4 * 1024;
1555 		else
1556 			alignment = 64 * 1024;
1557 		break;
1558 	case I915_TILING_X:
1559 		/* pin() will align the object as required by fence */
1560 		alignment = 0;
1561 		break;
1562 	case I915_TILING_Y:
1563 		/* FIXME: Is this true? */
1564 		DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1565 		return -EINVAL;
1566 	default:
1567 		KASSERT(0, ("Wrong tiling for fb obj"));
1568 	}
1569 
1570 	dev_priv->mm.interruptible = false;
1571 	ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
1572 	if (ret)
1573 		goto err_interruptible;
1574 
1575 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
1576 	 * fence, whereas 965+ only requires a fence if using
1577 	 * framebuffer compression.  For simplicity, we always install
1578 	 * a fence as the cost is not that onerous.
1579 	 */
1580 	if (obj->tiling_mode != I915_TILING_NONE) {
1581 		ret = i915_gem_object_get_fence(obj, pipelined);
1582 		if (ret)
1583 			goto err_unpin;
1584 
1585 		i915_gem_object_pin_fence(obj);
1586 	}
1587 
1588 	dev_priv->mm.interruptible = true;
1589 	return 0;
1590 
1591 err_unpin:
1592 	i915_gem_object_unpin(obj);
1593 err_interruptible:
1594 	dev_priv->mm.interruptible = true;
1595 	return ret;
1596 }
1597 
1598 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1599 {
1600 	i915_gem_object_unpin_fence(obj);
1601 	i915_gem_object_unpin(obj);
1602 }
1603 
1604 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1605  * is assumed to be a power-of-two. */
1606 unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
1607 					       unsigned int bpp,
1608 					       unsigned int pitch)
1609 {
1610 	int tile_rows, tiles;
1611 
1612 	tile_rows = *y / 8;
1613 	*y %= 8;
1614 	tiles = *x / (512/bpp);
1615 	*x %= 512/bpp;
1616 
1617 	return tile_rows * pitch * 8 + tiles * 4096;
1618 }
1619 
1620 static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1621 			     int x, int y)
1622 {
1623 	struct drm_device *dev = crtc->dev;
1624 	struct drm_i915_private *dev_priv = dev->dev_private;
1625 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1626 	struct intel_framebuffer *intel_fb;
1627 	struct drm_i915_gem_object *obj;
1628 	int plane = intel_crtc->plane;
1629 	unsigned long Start, Offset;
1630 	u32 dspcntr;
1631 	u32 reg;
1632 
1633 	switch (plane) {
1634 	case 0:
1635 	case 1:
1636 		break;
1637 	default:
1638 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1639 		return -EINVAL;
1640 	}
1641 
1642 	intel_fb = to_intel_framebuffer(fb);
1643 	obj = intel_fb->obj;
1644 
1645 	reg = DSPCNTR(plane);
1646 	dspcntr = I915_READ(reg);
1647 	/* Mask out pixel format bits in case we change it */
1648 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1649 	switch (fb->bits_per_pixel) {
1650 	case 8:
1651 		dspcntr |= DISPPLANE_8BPP;
1652 		break;
1653 	case 16:
1654 		if (fb->depth == 15)
1655 			dspcntr |= DISPPLANE_BGRX555;
1656 		else
1657 			dspcntr |= DISPPLANE_BGRX565;
1658 		break;
1659 	case 24:
1660 	case 32:
1661 		dspcntr |= DISPPLANE_BGRX888;
1662 		break;
1663 	default:
1664 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1665 		return -EINVAL;
1666 	}
1667 	if (INTEL_INFO(dev)->gen >= 4) {
1668 		if (obj->tiling_mode != I915_TILING_NONE)
1669 			dspcntr |= DISPPLANE_TILED;
1670 		else
1671 			dspcntr &= ~DISPPLANE_TILED;
1672 	}
1673 
1674 	I915_WRITE(reg, dspcntr);
1675 
1676 	Start = obj->gtt_offset;
1677 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1678 
1679 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1680 		      Start, Offset, x, y, fb->pitches[0]);
1681 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1682 	if (INTEL_INFO(dev)->gen >= 4) {
1683 		I915_WRITE(DSPSURF(plane), Start);
1684 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1685 		I915_WRITE(DSPADDR(plane), Offset);
1686 	} else
1687 		I915_WRITE(DSPADDR(plane), Start + Offset);
1688 	POSTING_READ(reg);
1689 
1690 	return (0);
1691 }
1692 
1693 static int ironlake_update_plane(struct drm_crtc *crtc,
1694 				 struct drm_framebuffer *fb, int x, int y)
1695 {
1696 	struct drm_device *dev = crtc->dev;
1697 	struct drm_i915_private *dev_priv = dev->dev_private;
1698 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1699 	struct intel_framebuffer *intel_fb;
1700 	struct drm_i915_gem_object *obj;
1701 	int plane = intel_crtc->plane;
1702 	unsigned long Start, Offset;
1703 	u32 dspcntr;
1704 	u32 reg;
1705 
1706 	switch (plane) {
1707 	case 0:
1708 	case 1:
1709 	case 2:
1710 		break;
1711 	default:
1712 		DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1713 		return -EINVAL;
1714 	}
1715 
1716 	intel_fb = to_intel_framebuffer(fb);
1717 	obj = intel_fb->obj;
1718 
1719 	reg = DSPCNTR(plane);
1720 	dspcntr = I915_READ(reg);
1721 	/* Mask out pixel format bits in case we change it */
1722 	dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1723 	switch (fb->bits_per_pixel) {
1724 	case 8:
1725 		dspcntr |= DISPPLANE_8BPP;
1726 		break;
1727 	case 16:
1728 		if (fb->depth != 16) {
1729 			DRM_ERROR("bpp 16, depth %d\n", fb->depth);
1730 			return -EINVAL;
1731 		}
1732 
1733 		dspcntr |= DISPPLANE_BGRX565;
1734 		break;
1735 	case 24:
1736 	case 32:
1737 		if (fb->depth == 24)
1738 			dspcntr |= DISPPLANE_BGRX888;
1739 		else if (fb->depth == 30)
1740 			dspcntr |= DISPPLANE_BGRX101010;
1741 		else {
1742 			DRM_ERROR("bpp %d depth %d\n", fb->bits_per_pixel,
1743 			    fb->depth);
1744 			return -EINVAL;
1745 		}
1746 		break;
1747 	default:
1748 		DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1749 		return -EINVAL;
1750 	}
1751 
1752 	if (obj->tiling_mode != I915_TILING_NONE)
1753 		dspcntr |= DISPPLANE_TILED;
1754 	else
1755 		dspcntr &= ~DISPPLANE_TILED;
1756 
1757 	/* must disable */
1758 	dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1759 
1760 	I915_WRITE(reg, dspcntr);
1761 
1762 	Start = obj->gtt_offset;
1763 	Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
1764 
1765 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
1766 		      Start, Offset, x, y, fb->pitches[0]);
1767 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
1768 	I915_WRITE(DSPSURF(plane), Start);
1769 	I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1770 	I915_WRITE(DSPADDR(plane), Offset);
1771 	POSTING_READ(reg);
1772 
1773 	return 0;
1774 }
1775 
1776 /* Assume fb object is pinned & idle & fenced and just update base pointers */
1777 static int
1778 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1779 			   int x, int y, enum mode_set_atomic state)
1780 {
1781 	struct drm_device *dev = crtc->dev;
1782 	struct drm_i915_private *dev_priv = dev->dev_private;
1783 
1784 	if (dev_priv->display.disable_fbc)
1785 		dev_priv->display.disable_fbc(dev);
1786 	intel_increase_pllclock(crtc);
1787 
1788 	return dev_priv->display.update_plane(crtc, fb, x, y);
1789 }
1790 
1791 static int
1792 intel_finish_fb(struct drm_framebuffer *old_fb)
1793 {
1794 	struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1795 	struct drm_device *dev = obj->base.dev;
1796 	struct drm_i915_private *dev_priv = dev->dev_private;
1797 	bool was_interruptible = dev_priv->mm.interruptible;
1798 	int ret;
1799 
1800 /* XXX */	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
1801 	while (!atomic_read(&dev_priv->mm.wedged) &&
1802 	       atomic_read(&obj->pending_flip) != 0) {
1803 		lksleep(&obj->pending_flip, &dev->event_lock,
1804 		    0, "915flp", 0);
1805 	}
1806 /* XXX */	lockmgr(&dev->event_lock, LK_RELEASE);
1807 
1808 	/* Big Hammer, we also need to ensure that any pending
1809 	 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1810 	 * current scanout is retired before unpinning the old
1811 	 * framebuffer.
1812 	 *
1813 	 * This should only fail upon a hung GPU, in which case we
1814 	 * can safely continue.
1815 	 */
1816 	dev_priv->mm.interruptible = false;
1817 	ret = i915_gem_object_finish_gpu(obj);
1818 	dev_priv->mm.interruptible = was_interruptible;
1819 	return ret;
1820 }
1821 
1822 static int
1823 intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1824 		    struct drm_framebuffer *old_fb)
1825 {
1826 	struct drm_device *dev = crtc->dev;
1827 #if 0
1828 	struct drm_i915_master_private *master_priv;
1829 #else
1830 	drm_i915_private_t *dev_priv = dev->dev_private;
1831 #endif
1832 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1833 	int ret;
1834 
1835 	/* no fb bound */
1836 	if (!crtc->fb) {
1837 		DRM_ERROR("No FB bound\n");
1838 		return 0;
1839 	}
1840 
1841 	switch (intel_crtc->plane) {
1842 	case 0:
1843 	case 1:
1844 		break;
1845 	case 2:
1846 		if (IS_IVYBRIDGE(dev))
1847 			break;
1848 		/* fall through otherwise */
1849 	default:
1850 		DRM_ERROR("no plane for crtc\n");
1851 		return -EINVAL;
1852 	}
1853 
1854 	DRM_LOCK(dev);
1855 	ret = intel_pin_and_fence_fb_obj(dev,
1856 					 to_intel_framebuffer(crtc->fb)->obj,
1857 					 NULL);
1858 	if (ret != 0) {
1859 		DRM_UNLOCK(dev);
1860 		DRM_ERROR("pin & fence failed\n");
1861 		return ret;
1862 	}
1863 
1864 	if (old_fb)
1865 		intel_finish_fb(old_fb);
1866 
1867 	ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
1868 					 LEAVE_ATOMIC_MODE_SET);
1869 	if (ret) {
1870 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
1871 		DRM_UNLOCK(dev);
1872 		DRM_ERROR("failed to update base address\n");
1873 		return ret;
1874 	}
1875 
1876 	if (old_fb) {
1877 		intel_wait_for_vblank(dev, intel_crtc->pipe);
1878 		intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
1879 	}
1880 
1881 	DRM_UNLOCK(dev);
1882 
1883 #if 0
1884 	if (!dev->primary->master)
1885 		return 0;
1886 
1887 	master_priv = dev->primary->master->driver_priv;
1888 	if (!master_priv->sarea_priv)
1889 		return 0;
1890 
1891 	if (intel_crtc->pipe) {
1892 		master_priv->sarea_priv->pipeB_x = x;
1893 		master_priv->sarea_priv->pipeB_y = y;
1894 	} else {
1895 		master_priv->sarea_priv->pipeA_x = x;
1896 		master_priv->sarea_priv->pipeA_y = y;
1897 	}
1898 #else
1899 
1900 	if (!dev_priv->sarea_priv)
1901 		return 0;
1902 
1903 	if (intel_crtc->pipe) {
1904 		dev_priv->sarea_priv->planeB_x = x;
1905 		dev_priv->sarea_priv->planeB_y = y;
1906 	} else {
1907 		dev_priv->sarea_priv->planeA_x = x;
1908 		dev_priv->sarea_priv->planeA_y = y;
1909 	}
1910 #endif
1911 
1912 	return 0;
1913 }
1914 
1915 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1916 {
1917 	struct drm_device *dev = crtc->dev;
1918 	struct drm_i915_private *dev_priv = dev->dev_private;
1919 	u32 dpa_ctl;
1920 
1921 	DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
1922 	dpa_ctl = I915_READ(DP_A);
1923 	dpa_ctl &= ~DP_PLL_FREQ_MASK;
1924 
1925 	if (clock < 200000) {
1926 		u32 temp;
1927 		dpa_ctl |= DP_PLL_FREQ_160MHZ;
1928 		/* workaround for 160Mhz:
1929 		   1) program 0x4600c bits 15:0 = 0x8124
1930 		   2) program 0x46010 bit 0 = 1
1931 		   3) program 0x46034 bit 24 = 1
1932 		   4) program 0x64000 bit 14 = 1
1933 		   */
1934 		temp = I915_READ(0x4600c);
1935 		temp &= 0xffff0000;
1936 		I915_WRITE(0x4600c, temp | 0x8124);
1937 
1938 		temp = I915_READ(0x46010);
1939 		I915_WRITE(0x46010, temp | 1);
1940 
1941 		temp = I915_READ(0x46034);
1942 		I915_WRITE(0x46034, temp | (1 << 24));
1943 	} else {
1944 		dpa_ctl |= DP_PLL_FREQ_270MHZ;
1945 	}
1946 	I915_WRITE(DP_A, dpa_ctl);
1947 
1948 	POSTING_READ(DP_A);
1949 	DELAY(500);
1950 }
1951 
1952 static void intel_fdi_normal_train(struct drm_crtc *crtc)
1953 {
1954 	struct drm_device *dev = crtc->dev;
1955 	struct drm_i915_private *dev_priv = dev->dev_private;
1956 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1957 	int pipe = intel_crtc->pipe;
1958 	u32 reg, temp;
1959 
1960 	/* enable normal train */
1961 	reg = FDI_TX_CTL(pipe);
1962 	temp = I915_READ(reg);
1963 	if (IS_IVYBRIDGE(dev)) {
1964 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
1965 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
1966 	} else {
1967 		temp &= ~FDI_LINK_TRAIN_NONE;
1968 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1969 	}
1970 	I915_WRITE(reg, temp);
1971 
1972 	reg = FDI_RX_CTL(pipe);
1973 	temp = I915_READ(reg);
1974 	if (HAS_PCH_CPT(dev)) {
1975 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1976 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1977 	} else {
1978 		temp &= ~FDI_LINK_TRAIN_NONE;
1979 		temp |= FDI_LINK_TRAIN_NONE;
1980 	}
1981 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1982 
1983 	/* wait one idle pattern time */
1984 	POSTING_READ(reg);
1985 	DELAY(1000);
1986 
1987 	/* IVB wants error correction enabled */
1988 	if (IS_IVYBRIDGE(dev))
1989 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
1990 			   FDI_FE_ERRC_ENABLE);
1991 }
1992 
1993 static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
1994 {
1995 	struct drm_i915_private *dev_priv = dev->dev_private;
1996 	u32 flags = I915_READ(SOUTH_CHICKEN1);
1997 
1998 	flags |= FDI_PHASE_SYNC_OVR(pipe);
1999 	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2000 	flags |= FDI_PHASE_SYNC_EN(pipe);
2001 	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2002 	POSTING_READ(SOUTH_CHICKEN1);
2003 }
2004 
2005 /* The FDI link training functions for ILK/Ibexpeak. */
2006 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2007 {
2008 	struct drm_device *dev = crtc->dev;
2009 	struct drm_i915_private *dev_priv = dev->dev_private;
2010 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2011 	int pipe = intel_crtc->pipe;
2012 	int plane = intel_crtc->plane;
2013 	u32 reg, temp, tries;
2014 
2015 	/* FDI needs bits from pipe & plane first */
2016 	assert_pipe_enabled(dev_priv, pipe);
2017 	assert_plane_enabled(dev_priv, plane);
2018 
2019 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2020 	   for train result */
2021 	reg = FDI_RX_IMR(pipe);
2022 	temp = I915_READ(reg);
2023 	temp &= ~FDI_RX_SYMBOL_LOCK;
2024 	temp &= ~FDI_RX_BIT_LOCK;
2025 	I915_WRITE(reg, temp);
2026 	I915_READ(reg);
2027 	DELAY(150);
2028 
2029 	/* enable CPU FDI TX and PCH FDI RX */
2030 	reg = FDI_TX_CTL(pipe);
2031 	temp = I915_READ(reg);
2032 	temp &= ~(7 << 19);
2033 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2034 	temp &= ~FDI_LINK_TRAIN_NONE;
2035 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2036 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2037 
2038 	reg = FDI_RX_CTL(pipe);
2039 	temp = I915_READ(reg);
2040 	temp &= ~FDI_LINK_TRAIN_NONE;
2041 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2042 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2043 
2044 	POSTING_READ(reg);
2045 	DELAY(150);
2046 
2047 	/* Ironlake workaround, enable clock pointer after FDI enable*/
2048 	if (HAS_PCH_IBX(dev)) {
2049 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2050 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2051 			   FDI_RX_PHASE_SYNC_POINTER_EN);
2052 	}
2053 
2054 	reg = FDI_RX_IIR(pipe);
2055 	for (tries = 0; tries < 5; tries++) {
2056 		temp = I915_READ(reg);
2057 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2058 
2059 		if ((temp & FDI_RX_BIT_LOCK)) {
2060 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2061 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2062 			break;
2063 		}
2064 	}
2065 	if (tries == 5)
2066 		DRM_ERROR("FDI train 1 fail!\n");
2067 
2068 	/* Train 2 */
2069 	reg = FDI_TX_CTL(pipe);
2070 	temp = I915_READ(reg);
2071 	temp &= ~FDI_LINK_TRAIN_NONE;
2072 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2073 	I915_WRITE(reg, temp);
2074 
2075 	reg = FDI_RX_CTL(pipe);
2076 	temp = I915_READ(reg);
2077 	temp &= ~FDI_LINK_TRAIN_NONE;
2078 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2079 	I915_WRITE(reg, temp);
2080 
2081 	POSTING_READ(reg);
2082 	DELAY(150);
2083 
2084 	reg = FDI_RX_IIR(pipe);
2085 	for (tries = 0; tries < 5; tries++) {
2086 		temp = I915_READ(reg);
2087 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2088 
2089 		if (temp & FDI_RX_SYMBOL_LOCK) {
2090 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2091 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2092 			break;
2093 		}
2094 	}
2095 	if (tries == 5)
2096 		DRM_ERROR("FDI train 2 fail!\n");
2097 
2098 	DRM_DEBUG_KMS("FDI train done\n");
2099 
2100 }
2101 
2102 static const int snb_b_fdi_train_param[] = {
2103 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2104 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2105 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2106 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2107 };
2108 
2109 /* The FDI link training functions for SNB/Cougarpoint. */
2110 static void gen6_fdi_link_train(struct drm_crtc *crtc)
2111 {
2112 	struct drm_device *dev = crtc->dev;
2113 	struct drm_i915_private *dev_priv = dev->dev_private;
2114 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2115 	int pipe = intel_crtc->pipe;
2116 	u32 reg, temp, i;
2117 
2118 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2119 	   for train result */
2120 	reg = FDI_RX_IMR(pipe);
2121 	temp = I915_READ(reg);
2122 	temp &= ~FDI_RX_SYMBOL_LOCK;
2123 	temp &= ~FDI_RX_BIT_LOCK;
2124 	I915_WRITE(reg, temp);
2125 
2126 	POSTING_READ(reg);
2127 	DELAY(150);
2128 
2129 	/* enable CPU FDI TX and PCH FDI RX */
2130 	reg = FDI_TX_CTL(pipe);
2131 	temp = I915_READ(reg);
2132 	temp &= ~(7 << 19);
2133 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2134 	temp &= ~FDI_LINK_TRAIN_NONE;
2135 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2137 	/* SNB-B */
2138 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2139 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2140 
2141 	reg = FDI_RX_CTL(pipe);
2142 	temp = I915_READ(reg);
2143 	if (HAS_PCH_CPT(dev)) {
2144 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2145 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2146 	} else {
2147 		temp &= ~FDI_LINK_TRAIN_NONE;
2148 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2149 	}
2150 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2151 
2152 	POSTING_READ(reg);
2153 	DELAY(150);
2154 
2155 	if (HAS_PCH_CPT(dev))
2156 		cpt_phase_pointer_enable(dev, pipe);
2157 
2158 	for (i = 0; i < 4; i++) {
2159 		reg = FDI_TX_CTL(pipe);
2160 		temp = I915_READ(reg);
2161 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2162 		temp |= snb_b_fdi_train_param[i];
2163 		I915_WRITE(reg, temp);
2164 
2165 		POSTING_READ(reg);
2166 		DELAY(500);
2167 
2168 		reg = FDI_RX_IIR(pipe);
2169 		temp = I915_READ(reg);
2170 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2171 
2172 		if (temp & FDI_RX_BIT_LOCK) {
2173 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2174 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2175 			break;
2176 		}
2177 	}
2178 	if (i == 4)
2179 		DRM_ERROR("FDI train 1 fail!\n");
2180 
2181 	/* Train 2 */
2182 	reg = FDI_TX_CTL(pipe);
2183 	temp = I915_READ(reg);
2184 	temp &= ~FDI_LINK_TRAIN_NONE;
2185 	temp |= FDI_LINK_TRAIN_PATTERN_2;
2186 	if (IS_GEN6(dev)) {
2187 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2188 		/* SNB-B */
2189 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2190 	}
2191 	I915_WRITE(reg, temp);
2192 
2193 	reg = FDI_RX_CTL(pipe);
2194 	temp = I915_READ(reg);
2195 	if (HAS_PCH_CPT(dev)) {
2196 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2197 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2198 	} else {
2199 		temp &= ~FDI_LINK_TRAIN_NONE;
2200 		temp |= FDI_LINK_TRAIN_PATTERN_2;
2201 	}
2202 	I915_WRITE(reg, temp);
2203 
2204 	POSTING_READ(reg);
2205 	DELAY(150);
2206 
2207 	for (i = 0; i < 4; i++) {
2208 		reg = FDI_TX_CTL(pipe);
2209 		temp = I915_READ(reg);
2210 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2211 		temp |= snb_b_fdi_train_param[i];
2212 		I915_WRITE(reg, temp);
2213 
2214 		POSTING_READ(reg);
2215 		DELAY(500);
2216 
2217 		reg = FDI_RX_IIR(pipe);
2218 		temp = I915_READ(reg);
2219 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2220 
2221 		if (temp & FDI_RX_SYMBOL_LOCK) {
2222 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2223 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2224 			break;
2225 		}
2226 	}
2227 	if (i == 4)
2228 		DRM_ERROR("FDI train 2 fail!\n");
2229 
2230 	DRM_DEBUG_KMS("FDI train done.\n");
2231 }
2232 
2233 /* Manual link training for Ivy Bridge A0 parts */
2234 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2235 {
2236 	struct drm_device *dev = crtc->dev;
2237 	struct drm_i915_private *dev_priv = dev->dev_private;
2238 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2239 	int pipe = intel_crtc->pipe;
2240 	u32 reg, temp, i;
2241 
2242 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2243 	   for train result */
2244 	reg = FDI_RX_IMR(pipe);
2245 	temp = I915_READ(reg);
2246 	temp &= ~FDI_RX_SYMBOL_LOCK;
2247 	temp &= ~FDI_RX_BIT_LOCK;
2248 	I915_WRITE(reg, temp);
2249 
2250 	POSTING_READ(reg);
2251 	DELAY(150);
2252 
2253 	/* enable CPU FDI TX and PCH FDI RX */
2254 	reg = FDI_TX_CTL(pipe);
2255 	temp = I915_READ(reg);
2256 	temp &= ~(7 << 19);
2257 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2258 	temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2259 	temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2260 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2261 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2262 	temp |= FDI_COMPOSITE_SYNC;
2263 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
2264 
2265 	reg = FDI_RX_CTL(pipe);
2266 	temp = I915_READ(reg);
2267 	temp &= ~FDI_LINK_TRAIN_AUTO;
2268 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2269 	temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2270 	temp |= FDI_COMPOSITE_SYNC;
2271 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
2272 
2273 	POSTING_READ(reg);
2274 	DELAY(150);
2275 
2276 	for (i = 0; i < 4; i++) {
2277 		reg = FDI_TX_CTL(pipe);
2278 		temp = I915_READ(reg);
2279 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2280 		temp |= snb_b_fdi_train_param[i];
2281 		I915_WRITE(reg, temp);
2282 
2283 		POSTING_READ(reg);
2284 		DELAY(500);
2285 
2286 		reg = FDI_RX_IIR(pipe);
2287 		temp = I915_READ(reg);
2288 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2289 
2290 		if (temp & FDI_RX_BIT_LOCK ||
2291 		    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2292 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2293 			DRM_DEBUG_KMS("FDI train 1 done.\n");
2294 			break;
2295 		}
2296 	}
2297 	if (i == 4)
2298 		DRM_ERROR("FDI train 1 fail!\n");
2299 
2300 	/* Train 2 */
2301 	reg = FDI_TX_CTL(pipe);
2302 	temp = I915_READ(reg);
2303 	temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2304 	temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2305 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2306 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2307 	I915_WRITE(reg, temp);
2308 
2309 	reg = FDI_RX_CTL(pipe);
2310 	temp = I915_READ(reg);
2311 	temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2312 	temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2313 	I915_WRITE(reg, temp);
2314 
2315 	POSTING_READ(reg);
2316 	DELAY(150);
2317 
2318 	for (i = 0; i < 4; i++ ) {
2319 		reg = FDI_TX_CTL(pipe);
2320 		temp = I915_READ(reg);
2321 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2322 		temp |= snb_b_fdi_train_param[i];
2323 		I915_WRITE(reg, temp);
2324 
2325 		POSTING_READ(reg);
2326 		DELAY(500);
2327 
2328 		reg = FDI_RX_IIR(pipe);
2329 		temp = I915_READ(reg);
2330 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2331 
2332 		if (temp & FDI_RX_SYMBOL_LOCK) {
2333 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2334 			DRM_DEBUG_KMS("FDI train 2 done.\n");
2335 			break;
2336 		}
2337 	}
2338 	if (i == 4)
2339 		DRM_ERROR("FDI train 2 fail!\n");
2340 
2341 	DRM_DEBUG_KMS("FDI train done.\n");
2342 }
2343 
2344 static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2345 {
2346 	struct drm_device *dev = crtc->dev;
2347 	struct drm_i915_private *dev_priv = dev->dev_private;
2348 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2349 	int pipe = intel_crtc->pipe;
2350 	u32 reg, temp;
2351 
2352 	/* Write the TU size bits so error detection works */
2353 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
2354 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2355 
2356 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2357 	reg = FDI_RX_CTL(pipe);
2358 	temp = I915_READ(reg);
2359 	temp &= ~((0x7 << 19) | (0x7 << 16));
2360 	temp |= (intel_crtc->fdi_lanes - 1) << 19;
2361 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2362 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2363 
2364 	POSTING_READ(reg);
2365 	DELAY(200);
2366 
2367 	/* Switch from Rawclk to PCDclk */
2368 	temp = I915_READ(reg);
2369 	I915_WRITE(reg, temp | FDI_PCDCLK);
2370 
2371 	POSTING_READ(reg);
2372 	DELAY(200);
2373 
2374 	/* Enable CPU FDI TX PLL, always on for Ironlake */
2375 	reg = FDI_TX_CTL(pipe);
2376 	temp = I915_READ(reg);
2377 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2378 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2379 
2380 		POSTING_READ(reg);
2381 		DELAY(100);
2382 	}
2383 }
2384 
2385 static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2386 {
2387 	struct drm_i915_private *dev_priv = dev->dev_private;
2388 	u32 flags = I915_READ(SOUTH_CHICKEN1);
2389 
2390 	flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2391 	I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2392 	flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2393 	I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2394 	POSTING_READ(SOUTH_CHICKEN1);
2395 }
2396 
2397 static void ironlake_fdi_disable(struct drm_crtc *crtc)
2398 {
2399 	struct drm_device *dev = crtc->dev;
2400 	struct drm_i915_private *dev_priv = dev->dev_private;
2401 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2402 	int pipe = intel_crtc->pipe;
2403 	u32 reg, temp;
2404 
2405 	/* disable CPU FDI tx and PCH FDI rx */
2406 	reg = FDI_TX_CTL(pipe);
2407 	temp = I915_READ(reg);
2408 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2409 	POSTING_READ(reg);
2410 
2411 	reg = FDI_RX_CTL(pipe);
2412 	temp = I915_READ(reg);
2413 	temp &= ~(0x7 << 16);
2414 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2415 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2416 
2417 	POSTING_READ(reg);
2418 	DELAY(100);
2419 
2420 	/* Ironlake workaround, disable clock pointer after downing FDI */
2421 	if (HAS_PCH_IBX(dev)) {
2422 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2423 		I915_WRITE(FDI_RX_CHICKEN(pipe),
2424 			   I915_READ(FDI_RX_CHICKEN(pipe) &
2425 				     ~FDI_RX_PHASE_SYNC_POINTER_EN));
2426 	} else if (HAS_PCH_CPT(dev)) {
2427 		cpt_phase_pointer_disable(dev, pipe);
2428 	}
2429 
2430 	/* still set train pattern 1 */
2431 	reg = FDI_TX_CTL(pipe);
2432 	temp = I915_READ(reg);
2433 	temp &= ~FDI_LINK_TRAIN_NONE;
2434 	temp |= FDI_LINK_TRAIN_PATTERN_1;
2435 	I915_WRITE(reg, temp);
2436 
2437 	reg = FDI_RX_CTL(pipe);
2438 	temp = I915_READ(reg);
2439 	if (HAS_PCH_CPT(dev)) {
2440 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2441 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2442 	} else {
2443 		temp &= ~FDI_LINK_TRAIN_NONE;
2444 		temp |= FDI_LINK_TRAIN_PATTERN_1;
2445 	}
2446 	/* BPC in FDI rx is consistent with that in PIPECONF */
2447 	temp &= ~(0x07 << 16);
2448 	temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2449 	I915_WRITE(reg, temp);
2450 
2451 	POSTING_READ(reg);
2452 	DELAY(100);
2453 }
2454 
2455 /*
2456  * When we disable a pipe, we need to clear any pending scanline wait events
2457  * to avoid hanging the ring, which we assume we are waiting on.
2458  */
2459 static void intel_clear_scanline_wait(struct drm_device *dev)
2460 {
2461 	struct drm_i915_private *dev_priv = dev->dev_private;
2462 	struct intel_ring_buffer *ring;
2463 	u32 tmp;
2464 
2465 	if (IS_GEN2(dev))
2466 		/* Can't break the hang on i8xx */
2467 		return;
2468 
2469 	ring = LP_RING(dev_priv);
2470 	tmp = I915_READ_CTL(ring);
2471 	if (tmp & RING_WAIT)
2472 		I915_WRITE_CTL(ring, tmp);
2473 }
2474 
2475 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2476 {
2477 	struct drm_i915_gem_object *obj;
2478 	struct drm_i915_private *dev_priv;
2479 	struct drm_device *dev;
2480 
2481 	if (crtc->fb == NULL)
2482 		return;
2483 
2484 	obj = to_intel_framebuffer(crtc->fb)->obj;
2485 	dev = crtc->dev;
2486 	dev_priv = dev->dev_private;
2487 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
2488 	while (atomic_read(&obj->pending_flip) != 0)
2489 		lksleep(&obj->pending_flip, &dev->event_lock, 0, "915wfl", 0);
2490 	lockmgr(&dev->event_lock, LK_RELEASE);
2491 }
2492 
2493 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2494 {
2495 	struct drm_device *dev = crtc->dev;
2496 	struct drm_mode_config *mode_config = &dev->mode_config;
2497 	struct intel_encoder *encoder;
2498 
2499 	/*
2500 	 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2501 	 * must be driven by its own crtc; no sharing is possible.
2502 	 */
2503 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2504 		if (encoder->base.crtc != crtc)
2505 			continue;
2506 
2507 		switch (encoder->type) {
2508 		case INTEL_OUTPUT_EDP:
2509 			if (!intel_encoder_is_pch_edp(&encoder->base))
2510 				return false;
2511 			continue;
2512 		}
2513 	}
2514 
2515 	return true;
2516 }
2517 
2518 /*
2519  * Enable PCH resources required for PCH ports:
2520  *   - PCH PLLs
2521  *   - FDI training & RX/TX
2522  *   - update transcoder timings
2523  *   - DP transcoding bits
2524  *   - transcoder
2525  */
2526 static void ironlake_pch_enable(struct drm_crtc *crtc)
2527 {
2528 	struct drm_device *dev = crtc->dev;
2529 	struct drm_i915_private *dev_priv = dev->dev_private;
2530 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2531 	int pipe = intel_crtc->pipe;
2532 	u32 reg, temp, transc_sel;
2533 
2534 	/* For PCH output, training FDI link */
2535 	dev_priv->display.fdi_link_train(crtc);
2536 
2537 	intel_enable_pch_pll(dev_priv, pipe);
2538 
2539 	if (HAS_PCH_CPT(dev)) {
2540 		transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL :
2541 			TRANSC_DPLLB_SEL;
2542 
2543 		/* Be sure PCH DPLL SEL is set */
2544 		temp = I915_READ(PCH_DPLL_SEL);
2545 		if (pipe == 0) {
2546 			temp &= ~(TRANSA_DPLLB_SEL);
2547 			temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL);
2548 		} else if (pipe == 1) {
2549 			temp &= ~(TRANSB_DPLLB_SEL);
2550 			temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2551 		} else if (pipe == 2) {
2552 			temp &= ~(TRANSC_DPLLB_SEL);
2553 			temp |= (TRANSC_DPLL_ENABLE | transc_sel);
2554 		}
2555 		I915_WRITE(PCH_DPLL_SEL, temp);
2556 	}
2557 
2558 	/* set transcoder timing, panel must allow it */
2559 	assert_panel_unlocked(dev_priv, pipe);
2560 	I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2561 	I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2562 	I915_WRITE(TRANS_HSYNC(pipe),  I915_READ(HSYNC(pipe)));
2563 
2564 	I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2565 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2566 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
2567 	I915_WRITE(TRANS_VSYNCSHIFT(pipe),  I915_READ(VSYNCSHIFT(pipe)));
2568 
2569 	intel_fdi_normal_train(crtc);
2570 
2571 	/* For PCH DP, enable TRANS_DP_CTL */
2572 	if (HAS_PCH_CPT(dev) &&
2573 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2574 	     intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2575 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2576 		reg = TRANS_DP_CTL(pipe);
2577 		temp = I915_READ(reg);
2578 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2579 			  TRANS_DP_SYNC_MASK |
2580 			  TRANS_DP_BPC_MASK);
2581 		temp |= (TRANS_DP_OUTPUT_ENABLE |
2582 			 TRANS_DP_ENH_FRAMING);
2583 		temp |= bpc << 9; /* same format but at 11:9 */
2584 
2585 		if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2586 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2587 		if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
2588 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2589 
2590 		switch (intel_trans_dp_port_sel(crtc)) {
2591 		case PCH_DP_B:
2592 			temp |= TRANS_DP_PORT_SEL_B;
2593 			break;
2594 		case PCH_DP_C:
2595 			temp |= TRANS_DP_PORT_SEL_C;
2596 			break;
2597 		case PCH_DP_D:
2598 			temp |= TRANS_DP_PORT_SEL_D;
2599 			break;
2600 		default:
2601 			DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
2602 			temp |= TRANS_DP_PORT_SEL_B;
2603 			break;
2604 		}
2605 
2606 		I915_WRITE(reg, temp);
2607 	}
2608 
2609 	intel_enable_transcoder(dev_priv, pipe);
2610 }
2611 
2612 void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2613 {
2614 	struct drm_i915_private *dev_priv = dev->dev_private;
2615 	int dslreg = PIPEDSL(pipe);
2616 	u32 temp;
2617 
2618 	temp = I915_READ(dslreg);
2619 	udelay(500);
2620 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
2621 		if (wait_for(I915_READ(dslreg) != temp, 5))
2622 			DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
2623 	}
2624 }
2625 
2626 static void ironlake_crtc_enable(struct drm_crtc *crtc)
2627 {
2628 	struct drm_device *dev = crtc->dev;
2629 	struct drm_i915_private *dev_priv = dev->dev_private;
2630 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2631 	int pipe = intel_crtc->pipe;
2632 	int plane = intel_crtc->plane;
2633 	u32 temp;
2634 	bool is_pch_port;
2635 
2636 	if (intel_crtc->active)
2637 		return;
2638 
2639 	intel_crtc->active = true;
2640 	intel_update_watermarks(dev);
2641 
2642 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2643 		temp = I915_READ(PCH_LVDS);
2644 		if ((temp & LVDS_PORT_EN) == 0)
2645 			I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2646 	}
2647 
2648 	is_pch_port = intel_crtc_driving_pch(crtc);
2649 
2650 	if (is_pch_port)
2651 		ironlake_fdi_pll_enable(crtc);
2652 	else
2653 		ironlake_fdi_disable(crtc);
2654 
2655 	/* Enable panel fitting for LVDS */
2656 	if (dev_priv->pch_pf_size &&
2657 	    (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2658 		/* Force use of hard-coded filter coefficients
2659 		 * as some pre-programmed values are broken,
2660 		 * e.g. x201.
2661 		 */
2662 		I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2663 		I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2664 		I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
2665 	}
2666 
2667 	intel_enable_pipe(dev_priv, pipe, is_pch_port);
2668 	intel_enable_plane(dev_priv, plane, pipe);
2669 
2670 	if (is_pch_port)
2671 		ironlake_pch_enable(crtc);
2672 
2673 	intel_crtc_load_lut(crtc);
2674 
2675 	DRM_LOCK(dev);
2676 	intel_update_fbc(dev);
2677 	DRM_UNLOCK(dev);
2678 
2679 	intel_crtc_update_cursor(crtc, true);
2680 }
2681 
2682 static void ironlake_crtc_disable(struct drm_crtc *crtc)
2683 {
2684 	struct drm_device *dev = crtc->dev;
2685 	struct drm_i915_private *dev_priv = dev->dev_private;
2686 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2687 	int pipe = intel_crtc->pipe;
2688 	int plane = intel_crtc->plane;
2689 	u32 reg, temp;
2690 
2691 	if (!intel_crtc->active)
2692 		return;
2693 
2694 	intel_crtc_wait_for_pending_flips(crtc);
2695 	drm_vblank_off(dev, pipe);
2696 	intel_crtc_update_cursor(crtc, false);
2697 
2698 	intel_disable_plane(dev_priv, plane, pipe);
2699 
2700 	if (dev_priv->cfb_plane == plane)
2701 		intel_disable_fbc(dev);
2702 
2703 	intel_disable_pipe(dev_priv, pipe);
2704 
2705 	/* Disable PF */
2706 	I915_WRITE(PF_CTL(pipe), 0);
2707 	I915_WRITE(PF_WIN_SZ(pipe), 0);
2708 
2709 	ironlake_fdi_disable(crtc);
2710 
2711 	/* This is a horrible layering violation; we should be doing this in
2712 	 * the connector/encoder ->prepare instead, but we don't always have
2713 	 * enough information there about the config to know whether it will
2714 	 * actually be necessary or just cause undesired flicker.
2715 	 */
2716 	intel_disable_pch_ports(dev_priv, pipe);
2717 
2718 	intel_disable_transcoder(dev_priv, pipe);
2719 
2720 	if (HAS_PCH_CPT(dev)) {
2721 		/* disable TRANS_DP_CTL */
2722 		reg = TRANS_DP_CTL(pipe);
2723 		temp = I915_READ(reg);
2724 		temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
2725 		temp |= TRANS_DP_PORT_SEL_NONE;
2726 		I915_WRITE(reg, temp);
2727 
2728 		/* disable DPLL_SEL */
2729 		temp = I915_READ(PCH_DPLL_SEL);
2730 		switch (pipe) {
2731 		case 0:
2732 			temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
2733 			break;
2734 		case 1:
2735 			temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
2736 			break;
2737 		case 2:
2738 			/* C shares PLL A or B */
2739 			temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
2740 			break;
2741 		default:
2742 			KASSERT(1, ("Wrong pipe %d", pipe)); /* wtf */
2743 		}
2744 		I915_WRITE(PCH_DPLL_SEL, temp);
2745 	}
2746 
2747 	/* disable PCH DPLL */
2748 	if (!intel_crtc->no_pll)
2749 		intel_disable_pch_pll(dev_priv, pipe);
2750 
2751 	/* Switch from PCDclk to Rawclk */
2752 	reg = FDI_RX_CTL(pipe);
2753 	temp = I915_READ(reg);
2754 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
2755 
2756 	/* Disable CPU FDI TX PLL */
2757 	reg = FDI_TX_CTL(pipe);
2758 	temp = I915_READ(reg);
2759 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2760 
2761 	POSTING_READ(reg);
2762 	DELAY(100);
2763 
2764 	reg = FDI_RX_CTL(pipe);
2765 	temp = I915_READ(reg);
2766 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2767 
2768 	/* Wait for the clocks to turn off. */
2769 	POSTING_READ(reg);
2770 	DELAY(100);
2771 
2772 	intel_crtc->active = false;
2773 	intel_update_watermarks(dev);
2774 
2775 	DRM_LOCK(dev);
2776 	intel_update_fbc(dev);
2777 	intel_clear_scanline_wait(dev);
2778 	DRM_UNLOCK(dev);
2779 }
2780 
2781 static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
2782 {
2783 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2784 	int pipe = intel_crtc->pipe;
2785 	int plane = intel_crtc->plane;
2786 
2787 	/* XXX: When our outputs are all unaware of DPMS modes other than off
2788 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2789 	 */
2790 	switch (mode) {
2791 	case DRM_MODE_DPMS_ON:
2792 	case DRM_MODE_DPMS_STANDBY:
2793 	case DRM_MODE_DPMS_SUSPEND:
2794 		DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
2795 		ironlake_crtc_enable(crtc);
2796 		break;
2797 
2798 	case DRM_MODE_DPMS_OFF:
2799 		DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
2800 		ironlake_crtc_disable(crtc);
2801 		break;
2802 	}
2803 }
2804 
2805 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
2806 {
2807 	if (!enable && intel_crtc->overlay) {
2808 		struct drm_device *dev = intel_crtc->base.dev;
2809 		struct drm_i915_private *dev_priv = dev->dev_private;
2810 
2811 		DRM_LOCK(dev);
2812 		dev_priv->mm.interruptible = false;
2813 		(void) intel_overlay_switch_off(intel_crtc->overlay);
2814 		dev_priv->mm.interruptible = true;
2815 		DRM_UNLOCK(dev);
2816 	}
2817 
2818 	/* Let userspace switch the overlay on again. In most cases userspace
2819 	 * has to recompute where to put it anyway.
2820 	 */
2821 }
2822 
2823 static void i9xx_crtc_enable(struct drm_crtc *crtc)
2824 {
2825 	struct drm_device *dev = crtc->dev;
2826 	struct drm_i915_private *dev_priv = dev->dev_private;
2827 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2828 	int pipe = intel_crtc->pipe;
2829 	int plane = intel_crtc->plane;
2830 
2831 	if (intel_crtc->active)
2832 		return;
2833 
2834 	intel_crtc->active = true;
2835 	intel_update_watermarks(dev);
2836 
2837 	intel_enable_pll(dev_priv, pipe);
2838 	intel_enable_pipe(dev_priv, pipe, false);
2839 	intel_enable_plane(dev_priv, plane, pipe);
2840 
2841 	intel_crtc_load_lut(crtc);
2842 	intel_update_fbc(dev);
2843 
2844 	/* Give the overlay scaler a chance to enable if it's on this pipe */
2845 	intel_crtc_dpms_overlay(intel_crtc, true);
2846 	intel_crtc_update_cursor(crtc, true);
2847 }
2848 
2849 static void i9xx_crtc_disable(struct drm_crtc *crtc)
2850 {
2851 	struct drm_device *dev = crtc->dev;
2852 	struct drm_i915_private *dev_priv = dev->dev_private;
2853 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2854 	int pipe = intel_crtc->pipe;
2855 	int plane = intel_crtc->plane;
2856 
2857 	if (!intel_crtc->active)
2858 		return;
2859 
2860 	/* Give the overlay scaler a chance to disable if it's on this pipe */
2861 	intel_crtc_wait_for_pending_flips(crtc);
2862 	drm_vblank_off(dev, pipe);
2863 	intel_crtc_dpms_overlay(intel_crtc, false);
2864 	intel_crtc_update_cursor(crtc, false);
2865 
2866 	if (dev_priv->cfb_plane == plane)
2867 		intel_disable_fbc(dev);
2868 
2869 	intel_disable_plane(dev_priv, plane, pipe);
2870 	intel_disable_pipe(dev_priv, pipe);
2871 	intel_disable_pll(dev_priv, pipe);
2872 
2873 	intel_crtc->active = false;
2874 	intel_update_fbc(dev);
2875 	intel_update_watermarks(dev);
2876 	intel_clear_scanline_wait(dev);
2877 }
2878 
2879 static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
2880 {
2881 	/* XXX: When our outputs are all unaware of DPMS modes other than off
2882 	 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
2883 	 */
2884 	switch (mode) {
2885 	case DRM_MODE_DPMS_ON:
2886 	case DRM_MODE_DPMS_STANDBY:
2887 	case DRM_MODE_DPMS_SUSPEND:
2888 		i9xx_crtc_enable(crtc);
2889 		break;
2890 	case DRM_MODE_DPMS_OFF:
2891 		i9xx_crtc_disable(crtc);
2892 		break;
2893 	}
2894 }
2895 
2896 /**
2897  * Sets the power management mode of the pipe and plane.
2898  */
2899 static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
2900 {
2901 	struct drm_device *dev = crtc->dev;
2902 	struct drm_i915_private *dev_priv = dev->dev_private;
2903 #if 0
2904 	struct drm_i915_master_private *master_priv;
2905 #endif
2906 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2907 	int pipe = intel_crtc->pipe;
2908 	bool enabled;
2909 
2910 	if (intel_crtc->dpms_mode == mode)
2911 		return;
2912 
2913 	intel_crtc->dpms_mode = mode;
2914 
2915 	dev_priv->display.dpms(crtc, mode);
2916 
2917 #if 0
2918 	if (!dev->primary->master)
2919 		return;
2920 
2921 	master_priv = dev->primary->master->driver_priv;
2922 	if (!master_priv->sarea_priv)
2923 		return;
2924 #else
2925 	if (!dev_priv->sarea_priv)
2926 		return;
2927 #endif
2928 
2929 	enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
2930 
2931 	switch (pipe) {
2932 	case 0:
2933 #if 0
2934 		master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
2935 		master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
2936 #else
2937 		dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
2938 		dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
2939 #endif
2940 		break;
2941 	case 1:
2942 #if 0
2943 		master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
2944 		master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
2945 #else
2946 		dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
2947 		dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
2948 #endif
2949 		break;
2950 	default:
2951 		DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
2952 		break;
2953 	}
2954 }
2955 
2956 static void intel_crtc_disable(struct drm_crtc *crtc)
2957 {
2958 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
2959 	struct drm_device *dev = crtc->dev;
2960 
2961 	/* Flush any pending WAITs before we disable the pipe. Note that
2962 	 * we need to drop the struct_mutex in order to acquire it again
2963 	 * during the lowlevel dpms routines around a couple of the
2964 	 * operations. It does not look trivial nor desirable to move
2965 	 * that locking higher. So instead we leave a window for the
2966 	 * submission of further commands on the fb before we can actually
2967 	 * disable it. This race with userspace exists anyway, and we can
2968 	 * only rely on the pipe being disabled by userspace after it
2969 	 * receives the hotplug notification and has flushed any pending
2970 	 * batches.
2971 	 */
2972 	if (crtc->fb) {
2973 		DRM_LOCK(dev);
2974 		intel_finish_fb(crtc->fb);
2975 		DRM_UNLOCK(dev);
2976 	}
2977 
2978 	crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
2979  	assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
2980 	assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
2981 
2982 	if (crtc->fb) {
2983 		DRM_LOCK(dev);
2984 		intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2985 		DRM_UNLOCK(dev);
2986 	}
2987 }
2988 
2989 /* Prepare for a mode set.
2990  *
2991  * Note we could be a lot smarter here.  We need to figure out which outputs
2992  * will be enabled, which disabled (in short, how the config will changes)
2993  * and perform the minimum necessary steps to accomplish that, e.g. updating
2994  * watermarks, FBC configuration, making sure PLLs are programmed correctly,
2995  * panel fitting is in the proper state, etc.
2996  */
2997 static void i9xx_crtc_prepare(struct drm_crtc *crtc)
2998 {
2999 	i9xx_crtc_disable(crtc);
3000 }
3001 
3002 static void i9xx_crtc_commit(struct drm_crtc *crtc)
3003 {
3004 	i9xx_crtc_enable(crtc);
3005 }
3006 
3007 static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3008 {
3009 	ironlake_crtc_disable(crtc);
3010 }
3011 
3012 static void ironlake_crtc_commit(struct drm_crtc *crtc)
3013 {
3014 	ironlake_crtc_enable(crtc);
3015 }
3016 
3017 void intel_encoder_prepare(struct drm_encoder *encoder)
3018 {
3019 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3020 	/* lvds has its own version of prepare see intel_lvds_prepare */
3021 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3022 }
3023 
3024 void intel_encoder_commit(struct drm_encoder *encoder)
3025 {
3026 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3027 	struct drm_device *dev = encoder->dev;
3028 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3029 	struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
3030 
3031 	/* lvds has its own version of commit see intel_lvds_commit */
3032 	encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
3033 
3034 	if (HAS_PCH_CPT(dev))
3035 		intel_cpt_verify_modeset(dev, intel_crtc->pipe);
3036 }
3037 
3038 void intel_encoder_destroy(struct drm_encoder *encoder)
3039 {
3040 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3041 
3042 	drm_encoder_cleanup(encoder);
3043 	drm_free(intel_encoder, DRM_MEM_KMS);
3044 }
3045 
3046 static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3047 				  const struct drm_display_mode *mode,
3048 				  struct drm_display_mode *adjusted_mode)
3049 {
3050 	struct drm_device *dev = crtc->dev;
3051 
3052 	if (HAS_PCH_SPLIT(dev)) {
3053 		/* FDI link clock is fixed at 2.7G */
3054 		if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3055 			return false;
3056 	}
3057 
3058 	/* All interlaced capable intel hw wants timings in frames. Note though
3059 	 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3060 	 * timings, so we need to be careful not to clobber these.*/
3061 	if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3062 		drm_mode_set_crtcinfo(adjusted_mode, 0);
3063 
3064 	return true;
3065 }
3066 
3067 static int i945_get_display_clock_speed(struct drm_device *dev)
3068 {
3069 	return 400000;
3070 }
3071 
3072 static int i915_get_display_clock_speed(struct drm_device *dev)
3073 {
3074 	return 333000;
3075 }
3076 
3077 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3078 {
3079 	return 200000;
3080 }
3081 
3082 static int i915gm_get_display_clock_speed(struct drm_device *dev)
3083 {
3084 	u16 gcfgc = 0;
3085 
3086 	gcfgc = pci_read_config(dev->dev, GCFGC, 2);
3087 
3088 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3089 		return 133000;
3090 	else {
3091 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3092 		case GC_DISPLAY_CLOCK_333_MHZ:
3093 			return 333000;
3094 		default:
3095 		case GC_DISPLAY_CLOCK_190_200_MHZ:
3096 			return 190000;
3097 		}
3098 	}
3099 }
3100 
3101 static int i865_get_display_clock_speed(struct drm_device *dev)
3102 {
3103 	return 266000;
3104 }
3105 
3106 static int i855_get_display_clock_speed(struct drm_device *dev)
3107 {
3108 	u16 hpllcc = 0;
3109 	/* Assume that the hardware is in the high speed state.  This
3110 	 * should be the default.
3111 	 */
3112 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3113 	case GC_CLOCK_133_200:
3114 	case GC_CLOCK_100_200:
3115 		return 200000;
3116 	case GC_CLOCK_166_250:
3117 		return 250000;
3118 	case GC_CLOCK_100_133:
3119 		return 133000;
3120 	}
3121 
3122 	/* Shouldn't happen */
3123 	return 0;
3124 }
3125 
3126 static int i830_get_display_clock_speed(struct drm_device *dev)
3127 {
3128 	return 133000;
3129 }
3130 
3131 struct fdi_m_n {
3132 	u32        tu;
3133 	u32        gmch_m;
3134 	u32        gmch_n;
3135 	u32        link_m;
3136 	u32        link_n;
3137 };
3138 
3139 static void
3140 fdi_reduce_ratio(u32 *num, u32 *den)
3141 {
3142 	while (*num > 0xffffff || *den > 0xffffff) {
3143 		*num >>= 1;
3144 		*den >>= 1;
3145 	}
3146 }
3147 
3148 static void
3149 ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3150 		     int link_clock, struct fdi_m_n *m_n)
3151 {
3152 	m_n->tu = 64; /* default size */
3153 
3154 	/* BUG_ON(pixel_clock > INT_MAX / 36); */
3155 	m_n->gmch_m = bits_per_pixel * pixel_clock;
3156 	m_n->gmch_n = link_clock * nlanes * 8;
3157 	fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3158 
3159 	m_n->link_m = pixel_clock;
3160 	m_n->link_n = link_clock;
3161 	fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3162 }
3163 
3164 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3165 {
3166 	if (i915_panel_use_ssc >= 0)
3167 		return i915_panel_use_ssc != 0;
3168 	return dev_priv->lvds_use_ssc
3169 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
3170 }
3171 
3172 /**
3173  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3174  * @crtc: CRTC structure
3175  * @mode: requested mode
3176  *
3177  * A pipe may be connected to one or more outputs.  Based on the depth of the
3178  * attached framebuffer, choose a good color depth to use on the pipe.
3179  *
3180  * If possible, match the pipe depth to the fb depth.  In some cases, this
3181  * isn't ideal, because the connected output supports a lesser or restricted
3182  * set of depths.  Resolve that here:
3183  *    LVDS typically supports only 6bpc, so clamp down in that case
3184  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3185  *    Displays may support a restricted set as well, check EDID and clamp as
3186  *      appropriate.
3187  *    DP may want to dither down to 6bpc to fit larger modes
3188  *
3189  * RETURNS:
3190  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3191  * true if they don't match).
3192  */
3193 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3194 					 unsigned int *pipe_bpp,
3195 					 struct drm_display_mode *mode)
3196 {
3197 	struct drm_device *dev = crtc->dev;
3198 	struct drm_i915_private *dev_priv = dev->dev_private;
3199 	struct drm_encoder *encoder;
3200 	struct drm_connector *connector;
3201 	unsigned int display_bpc = UINT_MAX, bpc;
3202 
3203 	/* Walk the encoders & connectors on this crtc, get min bpc */
3204 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3205 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3206 
3207 		if (encoder->crtc != crtc)
3208 			continue;
3209 
3210 		if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3211 			unsigned int lvds_bpc;
3212 
3213 			if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3214 			    LVDS_A3_POWER_UP)
3215 				lvds_bpc = 8;
3216 			else
3217 				lvds_bpc = 6;
3218 
3219 			if (lvds_bpc < display_bpc) {
3220 				DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
3221 				display_bpc = lvds_bpc;
3222 			}
3223 			continue;
3224 		}
3225 
3226 		if (intel_encoder->type == INTEL_OUTPUT_EDP) {
3227 			/* Use VBT settings if we have an eDP panel */
3228 			unsigned int edp_bpc = dev_priv->edp.bpp / 3;
3229 
3230 			if (edp_bpc < display_bpc) {
3231 				DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
3232 				display_bpc = edp_bpc;
3233 			}
3234 			continue;
3235 		}
3236 
3237 		/* Not one of the known troublemakers, check the EDID */
3238 		list_for_each_entry(connector, &dev->mode_config.connector_list,
3239 				    head) {
3240 			if (connector->encoder != encoder)
3241 				continue;
3242 
3243 			/* Don't use an invalid EDID bpc value */
3244 			if (connector->display_info.bpc &&
3245 			    connector->display_info.bpc < display_bpc) {
3246 				DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
3247 				display_bpc = connector->display_info.bpc;
3248 			}
3249 		}
3250 
3251 		/*
3252 		 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3253 		 * through, clamp it down.  (Note: >12bpc will be caught below.)
3254 		 */
3255 		if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3256 			if (display_bpc > 8 && display_bpc < 12) {
3257 				DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
3258 				display_bpc = 12;
3259 			} else {
3260 				DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
3261 				display_bpc = 8;
3262 			}
3263 		}
3264 	}
3265 
3266 	if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3267 		DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3268 		display_bpc = 6;
3269 	}
3270 
3271 	/*
3272 	 * We could just drive the pipe at the highest bpc all the time and
3273 	 * enable dithering as needed, but that costs bandwidth.  So choose
3274 	 * the minimum value that expresses the full color range of the fb but
3275 	 * also stays within the max display bpc discovered above.
3276 	 */
3277 
3278 	switch (crtc->fb->depth) {
3279 	case 8:
3280 		bpc = 8; /* since we go through a colormap */
3281 		break;
3282 	case 15:
3283 	case 16:
3284 		bpc = 6; /* min is 18bpp */
3285 		break;
3286 	case 24:
3287 		bpc = 8;
3288 		break;
3289 	case 30:
3290 		bpc = 10;
3291 		break;
3292 	case 48:
3293 		bpc = 12;
3294 		break;
3295 	default:
3296 		DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3297 		bpc = min((unsigned int)8, display_bpc);
3298 		break;
3299 	}
3300 
3301 	display_bpc = min(display_bpc, bpc);
3302 
3303 	DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3304 			 bpc, display_bpc);
3305 
3306 	*pipe_bpp = display_bpc * 3;
3307 
3308 	return display_bpc != bpc;
3309 }
3310 
3311 static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
3312 {
3313 	struct drm_device *dev = crtc->dev;
3314 	struct drm_i915_private *dev_priv = dev->dev_private;
3315 	int refclk;
3316 
3317 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3318 	    intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3319 		refclk = dev_priv->lvds_ssc_freq * 1000;
3320 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3321 			      refclk / 1000);
3322 	} else if (!IS_GEN2(dev)) {
3323 		refclk = 96000;
3324 	} else {
3325 		refclk = 48000;
3326 	}
3327 
3328 	return refclk;
3329 }
3330 
3331 static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
3332 				      intel_clock_t *clock)
3333 {
3334 	/* SDVO TV has fixed PLL values depend on its clock range,
3335 	   this mirrors vbios setting. */
3336 	if (adjusted_mode->clock >= 100000
3337 	    && adjusted_mode->clock < 140500) {
3338 		clock->p1 = 2;
3339 		clock->p2 = 10;
3340 		clock->n = 3;
3341 		clock->m1 = 16;
3342 		clock->m2 = 8;
3343 	} else if (adjusted_mode->clock >= 140500
3344 		   && adjusted_mode->clock <= 200000) {
3345 		clock->p1 = 1;
3346 		clock->p2 = 10;
3347 		clock->n = 6;
3348 		clock->m1 = 12;
3349 		clock->m2 = 8;
3350 	}
3351 }
3352 
3353 static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
3354 				     intel_clock_t *clock,
3355 				     intel_clock_t *reduced_clock)
3356 {
3357 	struct drm_device *dev = crtc->dev;
3358 	struct drm_i915_private *dev_priv = dev->dev_private;
3359 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3360 	int pipe = intel_crtc->pipe;
3361 	u32 fp, fp2 = 0;
3362 
3363 	if (IS_PINEVIEW(dev)) {
3364 		fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
3365 		if (reduced_clock)
3366 			fp2 = (1 << reduced_clock->n) << 16 |
3367 				reduced_clock->m1 << 8 | reduced_clock->m2;
3368 	} else {
3369 		fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
3370 		if (reduced_clock)
3371 			fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
3372 				reduced_clock->m2;
3373 	}
3374 
3375 	I915_WRITE(FP0(pipe), fp);
3376 
3377 	intel_crtc->lowfreq_avail = false;
3378 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3379 	    reduced_clock && i915_powersave) {
3380 		I915_WRITE(FP1(pipe), fp2);
3381 		intel_crtc->lowfreq_avail = true;
3382 	} else {
3383 		I915_WRITE(FP1(pipe), fp);
3384 	}
3385 }
3386 
3387 static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
3388 			      struct drm_display_mode *mode,
3389 			      struct drm_display_mode *adjusted_mode,
3390 			      int x, int y,
3391 			      struct drm_framebuffer *old_fb)
3392 {
3393 	struct drm_device *dev = crtc->dev;
3394 	struct drm_i915_private *dev_priv = dev->dev_private;
3395 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3396 	int pipe = intel_crtc->pipe;
3397 	int plane = intel_crtc->plane;
3398 	int refclk, num_connectors = 0;
3399 	intel_clock_t clock, reduced_clock;
3400 	u32 dpll, dspcntr, pipeconf, vsyncshift;
3401 	bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
3402 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3403 	struct drm_mode_config *mode_config = &dev->mode_config;
3404 	struct intel_encoder *encoder;
3405 	const intel_limit_t *limit;
3406 	int ret;
3407 	u32 temp;
3408 	u32 lvds_sync = 0;
3409 
3410 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3411 		if (encoder->base.crtc != crtc)
3412 			continue;
3413 
3414 		switch (encoder->type) {
3415 		case INTEL_OUTPUT_LVDS:
3416 			is_lvds = true;
3417 			break;
3418 		case INTEL_OUTPUT_SDVO:
3419 		case INTEL_OUTPUT_HDMI:
3420 			is_sdvo = true;
3421 			if (encoder->needs_tv_clock)
3422 				is_tv = true;
3423 			break;
3424 		case INTEL_OUTPUT_DVO:
3425 			is_dvo = true;
3426 			break;
3427 		case INTEL_OUTPUT_TVOUT:
3428 			is_tv = true;
3429 			break;
3430 		case INTEL_OUTPUT_ANALOG:
3431 			is_crt = true;
3432 			break;
3433 		case INTEL_OUTPUT_DISPLAYPORT:
3434 			is_dp = true;
3435 			break;
3436 		}
3437 
3438 		num_connectors++;
3439 	}
3440 
3441 	refclk = i9xx_get_refclk(crtc, num_connectors);
3442 
3443 	/*
3444 	 * Returns a set of divisors for the desired target clock with the given
3445 	 * refclk, or false.  The returned values represent the clock equation:
3446 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
3447 	 */
3448 	limit = intel_limit(crtc, refclk);
3449 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
3450 			     &clock);
3451 	if (!ok) {
3452 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
3453 		return -EINVAL;
3454 	}
3455 
3456 	/* Ensure that the cursor is valid for the new mode before changing... */
3457 	intel_crtc_update_cursor(crtc, true);
3458 
3459 	if (is_lvds && dev_priv->lvds_downclock_avail) {
3460 		/*
3461 		 * Ensure we match the reduced clock's P to the target clock.
3462 		 * If the clocks don't match, we can't switch the display clock
3463 		 * by using the FP0/FP1. In such case we will disable the LVDS
3464 		 * downclock feature.
3465 		*/
3466 		has_reduced_clock = limit->find_pll(limit, crtc,
3467 						    dev_priv->lvds_downclock,
3468 						    refclk,
3469 						    &clock,
3470 						    &reduced_clock);
3471 	}
3472 
3473 	if (is_sdvo && is_tv)
3474 		i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
3475 
3476 	i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
3477 				 &reduced_clock : NULL);
3478 
3479 	dpll = DPLL_VGA_MODE_DIS;
3480 
3481 	if (!IS_GEN2(dev)) {
3482 		if (is_lvds)
3483 			dpll |= DPLLB_MODE_LVDS;
3484 		else
3485 			dpll |= DPLLB_MODE_DAC_SERIAL;
3486 		if (is_sdvo) {
3487 			int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3488 			if (pixel_multiplier > 1) {
3489 				if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3490 					dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3491 			}
3492 			dpll |= DPLL_DVO_HIGH_SPEED;
3493 		}
3494 		if (is_dp)
3495 			dpll |= DPLL_DVO_HIGH_SPEED;
3496 
3497 		/* compute bitmask from p1 value */
3498 		if (IS_PINEVIEW(dev))
3499 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3500 		else {
3501 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3502 			if (IS_G4X(dev) && has_reduced_clock)
3503 				dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3504 		}
3505 		switch (clock.p2) {
3506 		case 5:
3507 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3508 			break;
3509 		case 7:
3510 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3511 			break;
3512 		case 10:
3513 			dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3514 			break;
3515 		case 14:
3516 			dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3517 			break;
3518 		}
3519 		if (INTEL_INFO(dev)->gen >= 4)
3520 			dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3521 	} else {
3522 		if (is_lvds) {
3523 			dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3524 		} else {
3525 			if (clock.p1 == 2)
3526 				dpll |= PLL_P1_DIVIDE_BY_TWO;
3527 			else
3528 				dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3529 			if (clock.p2 == 4)
3530 				dpll |= PLL_P2_DIVIDE_BY_4;
3531 		}
3532 	}
3533 
3534 	if (is_sdvo && is_tv)
3535 		dpll |= PLL_REF_INPUT_TVCLKINBC;
3536 	else if (is_tv)
3537 		/* XXX: just matching BIOS for now */
3538 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
3539 		dpll |= 3;
3540 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3541 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3542 	else
3543 		dpll |= PLL_REF_INPUT_DREFCLK;
3544 
3545 	/* setup pipeconf */
3546 	pipeconf = I915_READ(PIPECONF(pipe));
3547 
3548 	/* Set up the display plane register */
3549 	dspcntr = DISPPLANE_GAMMA_ENABLE;
3550 
3551 	if (pipe == 0)
3552 		dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3553 	else
3554 		dspcntr |= DISPPLANE_SEL_PIPE_B;
3555 
3556 	if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
3557 		/* Enable pixel doubling when the dot clock is > 90% of the (display)
3558 		 * core speed.
3559 		 *
3560 		 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
3561 		 * pipe == 0 check?
3562 		 */
3563 		if (mode->clock >
3564 		    dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
3565 			pipeconf |= PIPECONF_DOUBLE_WIDE;
3566 		else
3567 			pipeconf &= ~PIPECONF_DOUBLE_WIDE;
3568 	}
3569 
3570 	/* default to 8bpc */
3571 	pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
3572 	if (is_dp) {
3573 		if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3574 			pipeconf |= PIPECONF_BPP_6 |
3575 				    PIPECONF_DITHER_EN |
3576 				    PIPECONF_DITHER_TYPE_SP;
3577 		}
3578 	}
3579 
3580 	dpll |= DPLL_VCO_ENABLE;
3581 
3582 	DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3583 	drm_mode_debug_printmodeline(mode);
3584 
3585 	I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3586 
3587 	POSTING_READ(DPLL(pipe));
3588 	DELAY(150);
3589 
3590 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
3591 	 * This is an exception to the general rule that mode_set doesn't turn
3592 	 * things on.
3593 	 */
3594 	if (is_lvds) {
3595 		temp = I915_READ(LVDS);
3596 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3597 		if (pipe == 1) {
3598 			temp |= LVDS_PIPEB_SELECT;
3599 		} else {
3600 			temp &= ~LVDS_PIPEB_SELECT;
3601 		}
3602 		/* set the corresponsding LVDS_BORDER bit */
3603 		temp |= dev_priv->lvds_border_bits;
3604 		/* Set the B0-B3 data pairs corresponding to whether we're going to
3605 		 * set the DPLLs for dual-channel mode or not.
3606 		 */
3607 		if (clock.p2 == 7)
3608 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3609 		else
3610 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3611 
3612 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3613 		 * appropriately here, but we need to look more thoroughly into how
3614 		 * panels behave in the two modes.
3615 		 */
3616 		/* set the dithering flag on LVDS as needed */
3617 		if (INTEL_INFO(dev)->gen >= 4) {
3618 			if (dev_priv->lvds_dither)
3619 				temp |= LVDS_ENABLE_DITHER;
3620 			else
3621 				temp &= ~LVDS_ENABLE_DITHER;
3622 		}
3623 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
3624 			lvds_sync |= LVDS_HSYNC_POLARITY;
3625 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
3626 			lvds_sync |= LVDS_VSYNC_POLARITY;
3627 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
3628 		    != lvds_sync) {
3629 			char flags[2] = "-+";
3630 			DRM_INFO("Changing LVDS panel from "
3631 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
3632 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
3633 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
3634 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
3635 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
3636 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
3637 			temp |= lvds_sync;
3638 		}
3639 		I915_WRITE(LVDS, temp);
3640 	}
3641 
3642 	if (is_dp) {
3643 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
3644 	}
3645 
3646 	I915_WRITE(DPLL(pipe), dpll);
3647 
3648 	/* Wait for the clocks to stabilize. */
3649 	POSTING_READ(DPLL(pipe));
3650 	DELAY(150);
3651 
3652 	if (INTEL_INFO(dev)->gen >= 4) {
3653 		temp = 0;
3654 		if (is_sdvo) {
3655 			temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3656 			if (temp > 1)
3657 				temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3658 			else
3659 				temp = 0;
3660 		}
3661 		I915_WRITE(DPLL_MD(pipe), temp);
3662 	} else {
3663 		/* The pixel multiplier can only be updated once the
3664 		 * DPLL is enabled and the clocks are stable.
3665 		 *
3666 		 * So write it again.
3667 		 */
3668 		I915_WRITE(DPLL(pipe), dpll);
3669 	}
3670 
3671 	if (HAS_PIPE_CXSR(dev)) {
3672 		if (intel_crtc->lowfreq_avail) {
3673 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
3674 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
3675 		} else {
3676 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
3677 			pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
3678 		}
3679 	}
3680 
3681 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
3682 	if (!IS_GEN2(dev) &&
3683 	    adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
3684 		pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3685 		/* the chip adds 2 halflines automatically */
3686 		adjusted_mode->crtc_vtotal -= 1;
3687 		adjusted_mode->crtc_vblank_end -= 1;
3688 		vsyncshift = adjusted_mode->crtc_hsync_start
3689 			     - adjusted_mode->crtc_htotal/2;
3690 	} else {
3691 		pipeconf |= PIPECONF_PROGRESSIVE;
3692 		vsyncshift = 0;
3693 	}
3694 
3695 	if (!IS_GEN3(dev))
3696 		I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
3697 
3698 	I915_WRITE(HTOTAL(pipe),
3699 		   (adjusted_mode->crtc_hdisplay - 1) |
3700 		   ((adjusted_mode->crtc_htotal - 1) << 16));
3701 	I915_WRITE(HBLANK(pipe),
3702 		   (adjusted_mode->crtc_hblank_start - 1) |
3703 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
3704 	I915_WRITE(HSYNC(pipe),
3705 		   (adjusted_mode->crtc_hsync_start - 1) |
3706 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
3707 
3708 	I915_WRITE(VTOTAL(pipe),
3709 		   (adjusted_mode->crtc_vdisplay - 1) |
3710 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
3711 	I915_WRITE(VBLANK(pipe),
3712 		   (adjusted_mode->crtc_vblank_start - 1) |
3713 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
3714 	I915_WRITE(VSYNC(pipe),
3715 		   (adjusted_mode->crtc_vsync_start - 1) |
3716 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
3717 
3718 	/* pipesrc and dspsize control the size that is scaled from,
3719 	 * which should always be the user's requested size.
3720 	 */
3721 	I915_WRITE(DSPSIZE(plane),
3722 		   ((mode->vdisplay - 1) << 16) |
3723 		   (mode->hdisplay - 1));
3724 	I915_WRITE(DSPPOS(plane), 0);
3725 	I915_WRITE(PIPESRC(pipe),
3726 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3727 
3728 	I915_WRITE(PIPECONF(pipe), pipeconf);
3729 	POSTING_READ(PIPECONF(pipe));
3730 	intel_enable_pipe(dev_priv, pipe, false);
3731 
3732 	intel_wait_for_vblank(dev, pipe);
3733 
3734 	I915_WRITE(DSPCNTR(plane), dspcntr);
3735 	POSTING_READ(DSPCNTR(plane));
3736 	intel_enable_plane(dev_priv, plane, pipe);
3737 
3738 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
3739 
3740 	intel_update_watermarks(dev);
3741 
3742 	return ret;
3743 }
3744 
3745 /*
3746  * Initialize reference clocks when the driver loads
3747  */
3748 void ironlake_init_pch_refclk(struct drm_device *dev)
3749 {
3750 	struct drm_i915_private *dev_priv = dev->dev_private;
3751 	struct drm_mode_config *mode_config = &dev->mode_config;
3752 	struct intel_encoder *encoder;
3753 	u32 temp;
3754 	bool has_lvds = false;
3755 	bool has_cpu_edp = false;
3756 	bool has_pch_edp = false;
3757 	bool has_panel = false;
3758 	bool has_ck505 = false;
3759 	bool can_ssc = false;
3760 
3761 	/* We need to take the global config into account */
3762 	list_for_each_entry(encoder, &mode_config->encoder_list,
3763 			    base.head) {
3764 		switch (encoder->type) {
3765 		case INTEL_OUTPUT_LVDS:
3766 			has_panel = true;
3767 			has_lvds = true;
3768 			break;
3769 		case INTEL_OUTPUT_EDP:
3770 			has_panel = true;
3771 			if (intel_encoder_is_pch_edp(&encoder->base))
3772 				has_pch_edp = true;
3773 			else
3774 				has_cpu_edp = true;
3775 			break;
3776 		}
3777 	}
3778 
3779 	if (HAS_PCH_IBX(dev)) {
3780 		has_ck505 = dev_priv->display_clock_mode;
3781 		can_ssc = has_ck505;
3782 	} else {
3783 		has_ck505 = false;
3784 		can_ssc = true;
3785 	}
3786 
3787 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
3788 		      has_panel, has_lvds, has_pch_edp, has_cpu_edp,
3789 		      has_ck505);
3790 
3791 	/* Ironlake: try to setup display ref clock before DPLL
3792 	 * enabling. This is only under driver's control after
3793 	 * PCH B stepping, previous chipset stepping should be
3794 	 * ignoring this setting.
3795 	 */
3796 	temp = I915_READ(PCH_DREF_CONTROL);
3797 	/* Always enable nonspread source */
3798 	temp &= ~DREF_NONSPREAD_SOURCE_MASK;
3799 
3800 	if (has_ck505)
3801 		temp |= DREF_NONSPREAD_CK505_ENABLE;
3802 	else
3803 		temp |= DREF_NONSPREAD_SOURCE_ENABLE;
3804 
3805 	if (has_panel) {
3806 		temp &= ~DREF_SSC_SOURCE_MASK;
3807 		temp |= DREF_SSC_SOURCE_ENABLE;
3808 
3809 		/* SSC must be turned on before enabling the CPU output  */
3810 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
3811 			DRM_DEBUG_KMS("Using SSC on panel\n");
3812 			temp |= DREF_SSC1_ENABLE;
3813 		} else
3814 			temp &= ~DREF_SSC1_ENABLE;
3815 
3816 		/* Get SSC going before enabling the outputs */
3817 		I915_WRITE(PCH_DREF_CONTROL, temp);
3818 		POSTING_READ(PCH_DREF_CONTROL);
3819 		DELAY(200);
3820 
3821 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3822 
3823 		/* Enable CPU source on CPU attached eDP */
3824 		if (has_cpu_edp) {
3825 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
3826 				DRM_DEBUG_KMS("Using SSC on eDP\n");
3827 				temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
3828 			}
3829 			else
3830 				temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
3831 		} else
3832 			temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
3833 
3834 		I915_WRITE(PCH_DREF_CONTROL, temp);
3835 		POSTING_READ(PCH_DREF_CONTROL);
3836 		DELAY(200);
3837 	} else {
3838 		DRM_DEBUG_KMS("Disabling SSC entirely\n");
3839 
3840 		temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
3841 
3842 		/* Turn off CPU output */
3843 		temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
3844 
3845 		I915_WRITE(PCH_DREF_CONTROL, temp);
3846 		POSTING_READ(PCH_DREF_CONTROL);
3847 		DELAY(200);
3848 
3849 		/* Turn off the SSC source */
3850 		temp &= ~DREF_SSC_SOURCE_MASK;
3851 		temp |= DREF_SSC_SOURCE_DISABLE;
3852 
3853 		/* Turn off SSC1 */
3854 		temp &= ~ DREF_SSC1_ENABLE;
3855 
3856 		I915_WRITE(PCH_DREF_CONTROL, temp);
3857 		POSTING_READ(PCH_DREF_CONTROL);
3858 		DELAY(200);
3859 	}
3860 }
3861 
3862 static int ironlake_get_refclk(struct drm_crtc *crtc)
3863 {
3864 	struct drm_device *dev = crtc->dev;
3865 	struct drm_i915_private *dev_priv = dev->dev_private;
3866 	struct intel_encoder *encoder;
3867 	struct drm_mode_config *mode_config = &dev->mode_config;
3868 	struct intel_encoder *edp_encoder = NULL;
3869 	int num_connectors = 0;
3870 	bool is_lvds = false;
3871 
3872 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3873 		if (encoder->base.crtc != crtc)
3874 			continue;
3875 
3876 		switch (encoder->type) {
3877 		case INTEL_OUTPUT_LVDS:
3878 			is_lvds = true;
3879 			break;
3880 		case INTEL_OUTPUT_EDP:
3881 			edp_encoder = encoder;
3882 			break;
3883 		}
3884 		num_connectors++;
3885 	}
3886 
3887 	if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3888 		DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3889 			      dev_priv->lvds_ssc_freq);
3890 		return dev_priv->lvds_ssc_freq * 1000;
3891 	}
3892 
3893 	return 120000;
3894 }
3895 
3896 static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
3897 				  struct drm_display_mode *mode,
3898 				  struct drm_display_mode *adjusted_mode,
3899 				  int x, int y,
3900 				  struct drm_framebuffer *old_fb)
3901 {
3902 	struct drm_device *dev = crtc->dev;
3903 	struct drm_i915_private *dev_priv = dev->dev_private;
3904 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3905 	int pipe = intel_crtc->pipe;
3906 	int plane = intel_crtc->plane;
3907 	int refclk, num_connectors = 0;
3908 	intel_clock_t clock, reduced_clock;
3909 	u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
3910 	bool ok, has_reduced_clock = false, is_sdvo = false;
3911 	bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
3912 	struct intel_encoder *has_edp_encoder = NULL;
3913 	struct drm_mode_config *mode_config = &dev->mode_config;
3914 	struct intel_encoder *encoder;
3915 	const intel_limit_t *limit;
3916 	int ret;
3917 	struct fdi_m_n m_n = {0};
3918 	u32 temp;
3919 	u32 lvds_sync = 0;
3920 	int target_clock, pixel_multiplier, lane, link_bw, factor;
3921 	unsigned int pipe_bpp;
3922 	bool dither;
3923 
3924 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3925 		if (encoder->base.crtc != crtc)
3926 			continue;
3927 
3928 		switch (encoder->type) {
3929 		case INTEL_OUTPUT_LVDS:
3930 			is_lvds = true;
3931 			break;
3932 		case INTEL_OUTPUT_SDVO:
3933 		case INTEL_OUTPUT_HDMI:
3934 			is_sdvo = true;
3935 			if (encoder->needs_tv_clock)
3936 				is_tv = true;
3937 			break;
3938 		case INTEL_OUTPUT_TVOUT:
3939 			is_tv = true;
3940 			break;
3941 		case INTEL_OUTPUT_ANALOG:
3942 			is_crt = true;
3943 			break;
3944 		case INTEL_OUTPUT_DISPLAYPORT:
3945 			is_dp = true;
3946 			break;
3947 		case INTEL_OUTPUT_EDP:
3948 			has_edp_encoder = encoder;
3949 			break;
3950 		}
3951 
3952 		num_connectors++;
3953 	}
3954 
3955 	refclk = ironlake_get_refclk(crtc);
3956 
3957 	/*
3958 	 * Returns a set of divisors for the desired target clock with the given
3959 	 * refclk, or false.  The returned values represent the clock equation:
3960 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
3961 	 */
3962 	limit = intel_limit(crtc, refclk);
3963 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
3964 			     &clock);
3965 	if (!ok) {
3966 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
3967 		return -EINVAL;
3968 	}
3969 
3970 	/* Ensure that the cursor is valid for the new mode before changing... */
3971 	intel_crtc_update_cursor(crtc, true);
3972 
3973 	if (is_lvds && dev_priv->lvds_downclock_avail) {
3974 		/*
3975 		 * Ensure we match the reduced clock's P to the target clock.
3976 		 * If the clocks don't match, we can't switch the display clock
3977 		 * by using the FP0/FP1. In such case we will disable the LVDS
3978 		 * downclock feature.
3979 		*/
3980 		has_reduced_clock = limit->find_pll(limit, crtc,
3981 						    dev_priv->lvds_downclock,
3982 						    refclk,
3983 						    &clock,
3984 						    &reduced_clock);
3985 	}
3986 	/* SDVO TV has fixed PLL values depend on its clock range,
3987 	   this mirrors vbios setting. */
3988 	if (is_sdvo && is_tv) {
3989 		if (adjusted_mode->clock >= 100000
3990 		    && adjusted_mode->clock < 140500) {
3991 			clock.p1 = 2;
3992 			clock.p2 = 10;
3993 			clock.n = 3;
3994 			clock.m1 = 16;
3995 			clock.m2 = 8;
3996 		} else if (adjusted_mode->clock >= 140500
3997 			   && adjusted_mode->clock <= 200000) {
3998 			clock.p1 = 1;
3999 			clock.p2 = 10;
4000 			clock.n = 6;
4001 			clock.m1 = 12;
4002 			clock.m2 = 8;
4003 		}
4004 	}
4005 
4006 	/* FDI link */
4007 	pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4008 	lane = 0;
4009 	/* CPU eDP doesn't require FDI link, so just set DP M/N
4010 	   according to current link config */
4011 	if (has_edp_encoder &&
4012 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4013 		target_clock = mode->clock;
4014 		intel_edp_link_config(has_edp_encoder,
4015 				      &lane, &link_bw);
4016 	} else {
4017 		/* [e]DP over FDI requires target mode clock
4018 		   instead of link clock */
4019 		if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4020 			target_clock = mode->clock;
4021 		else
4022 			target_clock = adjusted_mode->clock;
4023 
4024 		/* FDI is a binary signal running at ~2.7GHz, encoding
4025 		 * each output octet as 10 bits. The actual frequency
4026 		 * is stored as a divider into a 100MHz clock, and the
4027 		 * mode pixel clock is stored in units of 1KHz.
4028 		 * Hence the bw of each lane in terms of the mode signal
4029 		 * is:
4030 		 */
4031 		link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4032 	}
4033 
4034 	/* determine panel color depth */
4035 	temp = I915_READ(PIPECONF(pipe));
4036 	temp &= ~PIPE_BPC_MASK;
4037 	dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
4038 	switch (pipe_bpp) {
4039 	case 18:
4040 		temp |= PIPE_6BPC;
4041 		break;
4042 	case 24:
4043 		temp |= PIPE_8BPC;
4044 		break;
4045 	case 30:
4046 		temp |= PIPE_10BPC;
4047 		break;
4048 	case 36:
4049 		temp |= PIPE_12BPC;
4050 		break;
4051 	default:
4052 		kprintf("intel_choose_pipe_bpp returned invalid value %d\n",
4053 			pipe_bpp);
4054 		temp |= PIPE_8BPC;
4055 		pipe_bpp = 24;
4056 		break;
4057 	}
4058 
4059 	intel_crtc->bpp = pipe_bpp;
4060 	I915_WRITE(PIPECONF(pipe), temp);
4061 
4062 	if (!lane) {
4063 		/*
4064 		 * Account for spread spectrum to avoid
4065 		 * oversubscribing the link. Max center spread
4066 		 * is 2.5%; use 5% for safety's sake.
4067 		 */
4068 		u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4069 		lane = bps / (link_bw * 8) + 1;
4070 	}
4071 
4072 	intel_crtc->fdi_lanes = lane;
4073 
4074 	if (pixel_multiplier > 1)
4075 		link_bw *= pixel_multiplier;
4076 	ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4077 			     &m_n);
4078 
4079 	fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4080 	if (has_reduced_clock)
4081 		fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4082 			reduced_clock.m2;
4083 
4084 	/* Enable autotuning of the PLL clock (if permissible) */
4085 	factor = 21;
4086 	if (is_lvds) {
4087 		if ((intel_panel_use_ssc(dev_priv) &&
4088 		     dev_priv->lvds_ssc_freq == 100) ||
4089 		    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4090 			factor = 25;
4091 	} else if (is_sdvo && is_tv)
4092 		factor = 20;
4093 
4094 	if (clock.m < factor * clock.n)
4095 		fp |= FP_CB_TUNE;
4096 
4097 	dpll = 0;
4098 
4099 	if (is_lvds)
4100 		dpll |= DPLLB_MODE_LVDS;
4101 	else
4102 		dpll |= DPLLB_MODE_DAC_SERIAL;
4103 	if (is_sdvo) {
4104 		int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4105 		if (pixel_multiplier > 1) {
4106 			dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4107 		}
4108 		dpll |= DPLL_DVO_HIGH_SPEED;
4109 	}
4110 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base))
4111 		dpll |= DPLL_DVO_HIGH_SPEED;
4112 
4113 	/* compute bitmask from p1 value */
4114 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4115 	/* also FPA1 */
4116 	dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4117 
4118 	switch (clock.p2) {
4119 	case 5:
4120 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4121 		break;
4122 	case 7:
4123 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4124 		break;
4125 	case 10:
4126 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4127 		break;
4128 	case 14:
4129 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4130 		break;
4131 	}
4132 
4133 	if (is_sdvo && is_tv)
4134 		dpll |= PLL_REF_INPUT_TVCLKINBC;
4135 	else if (is_tv)
4136 		/* XXX: just matching BIOS for now */
4137 		/*	dpll |= PLL_REF_INPUT_TVCLKINBC; */
4138 		dpll |= 3;
4139 	else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4140 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4141 	else
4142 		dpll |= PLL_REF_INPUT_DREFCLK;
4143 
4144 	/* setup pipeconf */
4145 	pipeconf = I915_READ(PIPECONF(pipe));
4146 
4147 	/* Set up the display plane register */
4148 	dspcntr = DISPPLANE_GAMMA_ENABLE;
4149 
4150 	DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4151 	drm_mode_debug_printmodeline(mode);
4152 
4153 	/* PCH eDP needs FDI, but CPU eDP does not */
4154 	if (!intel_crtc->no_pll) {
4155 		if (!has_edp_encoder ||
4156 		    intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4157 			I915_WRITE(_PCH_FP0(pipe), fp);
4158 			I915_WRITE(_PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4159 
4160 			POSTING_READ(_PCH_DPLL(pipe));
4161 			DELAY(150);
4162 		}
4163 	} else {
4164 		if (dpll == (I915_READ(_PCH_DPLL(0)) & 0x7fffffff) &&
4165 		    fp == I915_READ(_PCH_FP0(0))) {
4166 			intel_crtc->use_pll_a = true;
4167 			DRM_DEBUG_KMS("using pipe a dpll\n");
4168 		} else if (dpll == (I915_READ(_PCH_DPLL(1)) & 0x7fffffff) &&
4169 			   fp == I915_READ(_PCH_FP0(1))) {
4170 			intel_crtc->use_pll_a = false;
4171 			DRM_DEBUG_KMS("using pipe b dpll\n");
4172 		} else {
4173 			DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n");
4174 			return -EINVAL;
4175 		}
4176 	}
4177 
4178 	/* The LVDS pin pair needs to be on before the DPLLs are enabled.
4179 	 * This is an exception to the general rule that mode_set doesn't turn
4180 	 * things on.
4181 	 */
4182 	if (is_lvds) {
4183 		temp = I915_READ(PCH_LVDS);
4184 		temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4185 		if (HAS_PCH_CPT(dev)) {
4186 			temp &= ~PORT_TRANS_SEL_MASK;
4187 			temp |= PORT_TRANS_SEL_CPT(pipe);
4188 		} else {
4189 			if (pipe == 1)
4190 				temp |= LVDS_PIPEB_SELECT;
4191 			else
4192 				temp &= ~LVDS_PIPEB_SELECT;
4193 		}
4194 
4195 		/* set the corresponsding LVDS_BORDER bit */
4196 		temp |= dev_priv->lvds_border_bits;
4197 		/* Set the B0-B3 data pairs corresponding to whether we're going to
4198 		 * set the DPLLs for dual-channel mode or not.
4199 		 */
4200 		if (clock.p2 == 7)
4201 			temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4202 		else
4203 			temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4204 
4205 		/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4206 		 * appropriately here, but we need to look more thoroughly into how
4207 		 * panels behave in the two modes.
4208 		 */
4209 		if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4210 			lvds_sync |= LVDS_HSYNC_POLARITY;
4211 		if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4212 			lvds_sync |= LVDS_VSYNC_POLARITY;
4213 		if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
4214 		    != lvds_sync) {
4215 			char flags[2] = "-+";
4216 			DRM_INFO("Changing LVDS panel from "
4217 				 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
4218 				 flags[!(temp & LVDS_HSYNC_POLARITY)],
4219 				 flags[!(temp & LVDS_VSYNC_POLARITY)],
4220 				 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
4221 				 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
4222 			temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4223 			temp |= lvds_sync;
4224 		}
4225 		I915_WRITE(PCH_LVDS, temp);
4226 	}
4227 
4228 	pipeconf &= ~PIPECONF_DITHER_EN;
4229 	pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
4230 	if ((is_lvds && dev_priv->lvds_dither) || dither) {
4231 		pipeconf |= PIPECONF_DITHER_EN;
4232 		pipeconf |= PIPECONF_DITHER_TYPE_SP;
4233 	}
4234 	if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4235 		intel_dp_set_m_n(crtc, mode, adjusted_mode);
4236 	} else {
4237 		/* For non-DP output, clear any trans DP clock recovery setting.*/
4238 		I915_WRITE(TRANSDATA_M1(pipe), 0);
4239 		I915_WRITE(TRANSDATA_N1(pipe), 0);
4240 		I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4241 		I915_WRITE(TRANSDPLINK_N1(pipe), 0);
4242 	}
4243 
4244 	if (!intel_crtc->no_pll &&
4245 	    (!has_edp_encoder ||
4246 	     intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
4247 		I915_WRITE(_PCH_DPLL(pipe), dpll);
4248 
4249 		/* Wait for the clocks to stabilize. */
4250 		POSTING_READ(_PCH_DPLL(pipe));
4251 		DELAY(150);
4252 
4253 		/* The pixel multiplier can only be updated once the
4254 		 * DPLL is enabled and the clocks are stable.
4255 		 *
4256 		 * So write it again.
4257 		 */
4258 		I915_WRITE(_PCH_DPLL(pipe), dpll);
4259 	}
4260 
4261 	intel_crtc->lowfreq_avail = false;
4262 	if (!intel_crtc->no_pll) {
4263 		if (is_lvds && has_reduced_clock && i915_powersave) {
4264 			I915_WRITE(_PCH_FP1(pipe), fp2);
4265 			intel_crtc->lowfreq_avail = true;
4266 			if (HAS_PIPE_CXSR(dev)) {
4267 				DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4268 				pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4269 			}
4270 		} else {
4271 			I915_WRITE(_PCH_FP1(pipe), fp);
4272 			if (HAS_PIPE_CXSR(dev)) {
4273 				DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4274 				pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4275 			}
4276 		}
4277 	}
4278 
4279 	pipeconf &= ~PIPECONF_INTERLACE_MASK;
4280 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4281 		pipeconf |= PIPECONF_INTERLACED_ILK;
4282 		/* the chip adds 2 halflines automatically */
4283 		adjusted_mode->crtc_vtotal -= 1;
4284 		adjusted_mode->crtc_vblank_end -= 1;
4285 		I915_WRITE(VSYNCSHIFT(pipe),
4286 			   adjusted_mode->crtc_hsync_start
4287 			   - adjusted_mode->crtc_htotal/2);
4288 	} else {
4289 		pipeconf |= PIPECONF_PROGRESSIVE;
4290 		I915_WRITE(VSYNCSHIFT(pipe), 0);
4291 	}
4292 
4293 	I915_WRITE(HTOTAL(pipe),
4294 		   (adjusted_mode->crtc_hdisplay - 1) |
4295 		   ((adjusted_mode->crtc_htotal - 1) << 16));
4296 	I915_WRITE(HBLANK(pipe),
4297 		   (adjusted_mode->crtc_hblank_start - 1) |
4298 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
4299 	I915_WRITE(HSYNC(pipe),
4300 		   (adjusted_mode->crtc_hsync_start - 1) |
4301 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
4302 
4303 	I915_WRITE(VTOTAL(pipe),
4304 		   (adjusted_mode->crtc_vdisplay - 1) |
4305 		   ((adjusted_mode->crtc_vtotal - 1) << 16));
4306 	I915_WRITE(VBLANK(pipe),
4307 		   (adjusted_mode->crtc_vblank_start - 1) |
4308 		   ((adjusted_mode->crtc_vblank_end - 1) << 16));
4309 	I915_WRITE(VSYNC(pipe),
4310 		   (adjusted_mode->crtc_vsync_start - 1) |
4311 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
4312 
4313 	/* pipesrc controls the size that is scaled from, which should
4314 	 * always be the user's requested size.
4315 	 */
4316 	I915_WRITE(PIPESRC(pipe),
4317 		   ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4318 
4319 	I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4320 	I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4321 	I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4322 	I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
4323 
4324 	if (has_edp_encoder &&
4325 	    !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4326 		ironlake_set_pll_edp(crtc, adjusted_mode->clock);
4327 	}
4328 
4329 	I915_WRITE(PIPECONF(pipe), pipeconf);
4330 	POSTING_READ(PIPECONF(pipe));
4331 
4332 	intel_wait_for_vblank(dev, pipe);
4333 
4334 	I915_WRITE(DSPCNTR(plane), dspcntr);
4335 	POSTING_READ(DSPCNTR(plane));
4336 
4337 	ret = intel_pipe_set_base(crtc, x, y, old_fb);
4338 
4339 	intel_update_watermarks(dev);
4340 
4341 	return ret;
4342 }
4343 
4344 static int intel_crtc_mode_set(struct drm_crtc *crtc,
4345 			       struct drm_display_mode *mode,
4346 			       struct drm_display_mode *adjusted_mode,
4347 			       int x, int y,
4348 			       struct drm_framebuffer *old_fb)
4349 {
4350 	struct drm_device *dev = crtc->dev;
4351 	struct drm_i915_private *dev_priv = dev->dev_private;
4352 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4353 	int pipe = intel_crtc->pipe;
4354 	int ret;
4355 
4356 	drm_vblank_pre_modeset(dev, pipe);
4357 
4358 	ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4359 					      x, y, old_fb);
4360 	drm_vblank_post_modeset(dev, pipe);
4361 
4362 	if (ret)
4363 		intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4364 	else
4365 		intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
4366 
4367 	return ret;
4368 }
4369 
4370 static bool intel_eld_uptodate(struct drm_connector *connector,
4371 			       int reg_eldv, uint32_t bits_eldv,
4372 			       int reg_elda, uint32_t bits_elda,
4373 			       int reg_edid)
4374 {
4375 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4376 	uint8_t *eld = connector->eld;
4377 	uint32_t i;
4378 
4379 	i = I915_READ(reg_eldv);
4380 	i &= bits_eldv;
4381 
4382 	if (!eld[0])
4383 		return !i;
4384 
4385 	if (!i)
4386 		return false;
4387 
4388 	i = I915_READ(reg_elda);
4389 	i &= ~bits_elda;
4390 	I915_WRITE(reg_elda, i);
4391 
4392 	for (i = 0; i < eld[2]; i++)
4393 		if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
4394 			return false;
4395 
4396 	return true;
4397 }
4398 
4399 static void g4x_write_eld(struct drm_connector *connector,
4400 			  struct drm_crtc *crtc)
4401 {
4402 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4403 	uint8_t *eld = connector->eld;
4404 	uint32_t eldv;
4405 	uint32_t len;
4406 	uint32_t i;
4407 
4408 	i = I915_READ(G4X_AUD_VID_DID);
4409 
4410 	if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
4411 		eldv = G4X_ELDV_DEVCL_DEVBLC;
4412 	else
4413 		eldv = G4X_ELDV_DEVCTG;
4414 
4415 	if (intel_eld_uptodate(connector,
4416 			       G4X_AUD_CNTL_ST, eldv,
4417 			       G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
4418 			       G4X_HDMIW_HDMIEDID))
4419 		return;
4420 
4421 	i = I915_READ(G4X_AUD_CNTL_ST);
4422 	i &= ~(eldv | G4X_ELD_ADDR);
4423 	len = (i >> 9) & 0x1f;		/* ELD buffer size */
4424 	I915_WRITE(G4X_AUD_CNTL_ST, i);
4425 
4426 	if (!eld[0])
4427 		return;
4428 
4429 	if (eld[2] < (uint8_t)len)
4430 		len = eld[2];
4431 	DRM_DEBUG_KMS("ELD size %d\n", len);
4432 	for (i = 0; i < len; i++)
4433 		I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
4434 
4435 	i = I915_READ(G4X_AUD_CNTL_ST);
4436 	i |= eldv;
4437 	I915_WRITE(G4X_AUD_CNTL_ST, i);
4438 }
4439 
4440 static void ironlake_write_eld(struct drm_connector *connector,
4441 				     struct drm_crtc *crtc)
4442 {
4443 	struct drm_i915_private *dev_priv = connector->dev->dev_private;
4444 	uint8_t *eld = connector->eld;
4445 	uint32_t eldv;
4446 	uint32_t i;
4447 	int len;
4448 	int hdmiw_hdmiedid;
4449 	int aud_config;
4450 	int aud_cntl_st;
4451 	int aud_cntrl_st2;
4452 
4453 	if (HAS_PCH_IBX(connector->dev)) {
4454 		hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
4455 		aud_config = IBX_AUD_CONFIG_A;
4456 		aud_cntl_st = IBX_AUD_CNTL_ST_A;
4457 		aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
4458 	} else {
4459 		hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
4460 		aud_config = CPT_AUD_CONFIG_A;
4461 		aud_cntl_st = CPT_AUD_CNTL_ST_A;
4462 		aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
4463 	}
4464 
4465 	i = to_intel_crtc(crtc)->pipe;
4466 	hdmiw_hdmiedid += i * 0x100;
4467 	aud_cntl_st += i * 0x100;
4468 	aud_config += i * 0x100;
4469 
4470 	DRM_DEBUG_KMS("ELD on pipe %c\n", pipe_name(i));
4471 
4472 	i = I915_READ(aud_cntl_st);
4473 	i = (i >> 29) & 0x3;		/* DIP_Port_Select, 0x1 = PortB */
4474 	if (!i) {
4475 		DRM_DEBUG_KMS("Audio directed to unknown port\n");
4476 		/* operate blindly on all ports */
4477 		eldv = IBX_ELD_VALIDB;
4478 		eldv |= IBX_ELD_VALIDB << 4;
4479 		eldv |= IBX_ELD_VALIDB << 8;
4480 	} else {
4481 		DRM_DEBUG_KMS("ELD on port %c\n", 'A' + i);
4482 		eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
4483 	}
4484 
4485 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
4486 		DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
4487 		eld[5] |= (1 << 2);	/* Conn_Type, 0x1 = DisplayPort */
4488 		I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4489 	} else
4490 		I915_WRITE(aud_config, 0);
4491 
4492 	if (intel_eld_uptodate(connector,
4493 			       aud_cntrl_st2, eldv,
4494 			       aud_cntl_st, IBX_ELD_ADDRESS,
4495 			       hdmiw_hdmiedid))
4496 		return;
4497 
4498 	i = I915_READ(aud_cntrl_st2);
4499 	i &= ~eldv;
4500 	I915_WRITE(aud_cntrl_st2, i);
4501 
4502 	if (!eld[0])
4503 		return;
4504 
4505 	i = I915_READ(aud_cntl_st);
4506 	i &= ~IBX_ELD_ADDRESS;
4507 	I915_WRITE(aud_cntl_st, i);
4508 
4509 	/* 84 bytes of hw ELD buffer */
4510 	len = 21;
4511 	if (eld[2] < (uint8_t)len)
4512 		len = eld[2];
4513 	DRM_DEBUG_KMS("ELD size %d\n", len);
4514 	for (i = 0; i < len; i++)
4515 		I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
4516 
4517 	i = I915_READ(aud_cntrl_st2);
4518 	i |= eldv;
4519 	I915_WRITE(aud_cntrl_st2, i);
4520 }
4521 
4522 void intel_write_eld(struct drm_encoder *encoder,
4523 		     struct drm_display_mode *mode)
4524 {
4525 	struct drm_crtc *crtc = encoder->crtc;
4526 	struct drm_connector *connector;
4527 	struct drm_device *dev = encoder->dev;
4528 	struct drm_i915_private *dev_priv = dev->dev_private;
4529 
4530 	connector = drm_select_eld(encoder, mode);
4531 	if (!connector)
4532 		return;
4533 
4534 	DRM_DEBUG_KMS("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4535 			 connector->base.id,
4536 			 drm_get_connector_name(connector),
4537 			 connector->encoder->base.id,
4538 			 drm_get_encoder_name(connector->encoder));
4539 
4540 	connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
4541 
4542 	if (dev_priv->display.write_eld)
4543 		dev_priv->display.write_eld(connector, crtc);
4544 }
4545 
4546 /** Loads the palette/gamma unit for the CRTC with the prepared values */
4547 void intel_crtc_load_lut(struct drm_crtc *crtc)
4548 {
4549 	struct drm_device *dev = crtc->dev;
4550 	struct drm_i915_private *dev_priv = dev->dev_private;
4551 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4552 	int palreg = PALETTE(intel_crtc->pipe);
4553 	int i;
4554 
4555 	/* The clocks have to be on to load the palette. */
4556 	if (!crtc->enabled || !intel_crtc->active)
4557 		return;
4558 
4559 	/* use legacy palette for Ironlake */
4560 	if (HAS_PCH_SPLIT(dev))
4561 		palreg = LGC_PALETTE(intel_crtc->pipe);
4562 
4563 	for (i = 0; i < 256; i++) {
4564 		I915_WRITE(palreg + 4 * i,
4565 			   (intel_crtc->lut_r[i] << 16) |
4566 			   (intel_crtc->lut_g[i] << 8) |
4567 			   intel_crtc->lut_b[i]);
4568 	}
4569 }
4570 
4571 static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4572 {
4573 	struct drm_device *dev = crtc->dev;
4574 	struct drm_i915_private *dev_priv = dev->dev_private;
4575 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4576 	bool visible = base != 0;
4577 	u32 cntl;
4578 
4579 	if (intel_crtc->cursor_visible == visible)
4580 		return;
4581 
4582 	cntl = I915_READ(_CURACNTR);
4583 	if (visible) {
4584 		/* On these chipsets we can only modify the base whilst
4585 		 * the cursor is disabled.
4586 		 */
4587 		I915_WRITE(_CURABASE, base);
4588 
4589 		cntl &= ~(CURSOR_FORMAT_MASK);
4590 		/* XXX width must be 64, stride 256 => 0x00 << 28 */
4591 		cntl |= CURSOR_ENABLE |
4592 			CURSOR_GAMMA_ENABLE |
4593 			CURSOR_FORMAT_ARGB;
4594 	} else
4595 		cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
4596 	I915_WRITE(_CURACNTR, cntl);
4597 
4598 	intel_crtc->cursor_visible = visible;
4599 }
4600 
4601 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4602 {
4603 	struct drm_device *dev = crtc->dev;
4604 	struct drm_i915_private *dev_priv = dev->dev_private;
4605 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4606 	int pipe = intel_crtc->pipe;
4607 	bool visible = base != 0;
4608 
4609 	if (intel_crtc->cursor_visible != visible) {
4610 		uint32_t cntl = I915_READ(CURCNTR(pipe));
4611 		if (base) {
4612 			cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4613 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4614 			cntl |= pipe << 28; /* Connect to correct pipe */
4615 		} else {
4616 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4617 			cntl |= CURSOR_MODE_DISABLE;
4618 		}
4619 		I915_WRITE(CURCNTR(pipe), cntl);
4620 
4621 		intel_crtc->cursor_visible = visible;
4622 	}
4623 	/* and commit changes on next vblank */
4624 	I915_WRITE(CURBASE(pipe), base);
4625 }
4626 
4627 static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
4628 {
4629 	struct drm_device *dev = crtc->dev;
4630 	struct drm_i915_private *dev_priv = dev->dev_private;
4631 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4632 	int pipe = intel_crtc->pipe;
4633 	bool visible = base != 0;
4634 
4635 	if (intel_crtc->cursor_visible != visible) {
4636 		uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
4637 		if (base) {
4638 			cntl &= ~CURSOR_MODE;
4639 			cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4640 		} else {
4641 			cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4642 			cntl |= CURSOR_MODE_DISABLE;
4643 		}
4644 		I915_WRITE(CURCNTR_IVB(pipe), cntl);
4645 
4646 		intel_crtc->cursor_visible = visible;
4647 	}
4648 	/* and commit changes on next vblank */
4649 	I915_WRITE(CURBASE_IVB(pipe), base);
4650 }
4651 
4652 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
4653 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4654 				     bool on)
4655 {
4656 	struct drm_device *dev = crtc->dev;
4657 	struct drm_i915_private *dev_priv = dev->dev_private;
4658 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4659 	int pipe = intel_crtc->pipe;
4660 	int x = intel_crtc->cursor_x;
4661 	int y = intel_crtc->cursor_y;
4662 	u32 base, pos;
4663 	bool visible;
4664 
4665 	pos = 0;
4666 
4667 	if (on && crtc->enabled && crtc->fb) {
4668 		base = intel_crtc->cursor_addr;
4669 		if (x > (int) crtc->fb->width)
4670 			base = 0;
4671 
4672 		if (y > (int) crtc->fb->height)
4673 			base = 0;
4674 	} else
4675 		base = 0;
4676 
4677 	if (x < 0) {
4678 		if (x + intel_crtc->cursor_width < 0)
4679 			base = 0;
4680 
4681 		pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4682 		x = -x;
4683 	}
4684 	pos |= x << CURSOR_X_SHIFT;
4685 
4686 	if (y < 0) {
4687 		if (y + intel_crtc->cursor_height < 0)
4688 			base = 0;
4689 
4690 		pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4691 		y = -y;
4692 	}
4693 	pos |= y << CURSOR_Y_SHIFT;
4694 
4695 	visible = base != 0;
4696 	if (!visible && !intel_crtc->cursor_visible)
4697 		return;
4698 
4699 	if (IS_IVYBRIDGE(dev)) {
4700 		I915_WRITE(CURPOS_IVB(pipe), pos);
4701 		ivb_update_cursor(crtc, base);
4702 	} else {
4703 		I915_WRITE(CURPOS(pipe), pos);
4704 		if (IS_845G(dev) || IS_I865G(dev))
4705 			i845_update_cursor(crtc, base);
4706 		else
4707 			i9xx_update_cursor(crtc, base);
4708 	}
4709 }
4710 
4711 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4712 				 struct drm_file *file,
4713 				 uint32_t handle,
4714 				 uint32_t width, uint32_t height)
4715 {
4716 	struct drm_device *dev = crtc->dev;
4717 	struct drm_i915_private *dev_priv = dev->dev_private;
4718 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4719 	struct drm_i915_gem_object *obj;
4720 	uint32_t addr;
4721 	int ret;
4722 
4723 	DRM_DEBUG_KMS("\n");
4724 
4725 	/* if we want to turn off the cursor ignore width and height */
4726 	if (!handle) {
4727 		DRM_DEBUG_KMS("cursor off\n");
4728 		addr = 0;
4729 		obj = NULL;
4730 		DRM_LOCK(dev);
4731 		goto finish;
4732 	}
4733 
4734 	/* Currently we only support 64x64 cursors */
4735 	if (width != 64 || height != 64) {
4736 		DRM_ERROR("we currently only support 64x64 cursors\n");
4737 		return -EINVAL;
4738 	}
4739 
4740 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
4741 	if (&obj->base == NULL)
4742 		return -ENOENT;
4743 
4744 	if (obj->base.size < width * height * 4) {
4745 		DRM_ERROR("buffer is to small\n");
4746 		ret = -ENOMEM;
4747 		goto fail;
4748 	}
4749 
4750 	/* we only need to pin inside GTT if cursor is non-phy */
4751 	DRM_LOCK(dev);
4752 	if (!dev_priv->info->cursor_needs_physical) {
4753 		if (obj->tiling_mode) {
4754 			DRM_ERROR("cursor cannot be tiled\n");
4755 			ret = -EINVAL;
4756 			goto fail_locked;
4757 		}
4758 
4759 		ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
4760 		if (ret) {
4761 			DRM_ERROR("failed to move cursor bo into the GTT\n");
4762 			goto fail_locked;
4763 		}
4764 
4765 		ret = i915_gem_object_put_fence(obj);
4766 		if (ret) {
4767 			DRM_ERROR("failed to release fence for cursor\n");
4768 			goto fail_unpin;
4769 		}
4770 
4771 		addr = obj->gtt_offset;
4772 	} else {
4773 		int align = IS_I830(dev) ? 16 * 1024 : 256;
4774 		ret = i915_gem_attach_phys_object(dev, obj,
4775 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
4776 						  align);
4777 		if (ret) {
4778 			DRM_ERROR("failed to attach phys object\n");
4779 			goto fail_locked;
4780 		}
4781 		addr = obj->phys_obj->handle->busaddr;
4782 	}
4783 
4784 	if (IS_GEN2(dev))
4785 		I915_WRITE(CURSIZE, (height << 12) | width);
4786 
4787  finish:
4788 	if (intel_crtc->cursor_bo) {
4789 		if (dev_priv->info->cursor_needs_physical) {
4790 			if (intel_crtc->cursor_bo != obj)
4791 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
4792 		} else
4793 			i915_gem_object_unpin(intel_crtc->cursor_bo);
4794 		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
4795 	}
4796 
4797 	DRM_UNLOCK(dev);
4798 
4799 	intel_crtc->cursor_addr = addr;
4800 	intel_crtc->cursor_bo = obj;
4801 	intel_crtc->cursor_width = width;
4802 	intel_crtc->cursor_height = height;
4803 
4804 	intel_crtc_update_cursor(crtc, true);
4805 
4806 	return 0;
4807 fail_unpin:
4808 	i915_gem_object_unpin(obj);
4809 fail_locked:
4810 	DRM_UNLOCK(dev);
4811 fail:
4812 	drm_gem_object_unreference_unlocked(&obj->base);
4813 	return ret;
4814 }
4815 
4816 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
4817 {
4818 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4819 
4820 	intel_crtc->cursor_x = x;
4821 	intel_crtc->cursor_y = y;
4822 
4823 	intel_crtc_update_cursor(crtc, true);
4824 
4825 	return 0;
4826 }
4827 
4828 /** Sets the color ramps on behalf of RandR */
4829 void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
4830 				 u16 blue, int regno)
4831 {
4832 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4833 
4834 	intel_crtc->lut_r[regno] = red >> 8;
4835 	intel_crtc->lut_g[regno] = green >> 8;
4836 	intel_crtc->lut_b[regno] = blue >> 8;
4837 }
4838 
4839 void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
4840 			     u16 *blue, int regno)
4841 {
4842 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4843 
4844 	*red = intel_crtc->lut_r[regno] << 8;
4845 	*green = intel_crtc->lut_g[regno] << 8;
4846 	*blue = intel_crtc->lut_b[regno] << 8;
4847 }
4848 
4849 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
4850 				 u16 *blue, uint32_t start, uint32_t size)
4851 {
4852 	int end = (start + size > 256) ? 256 : start + size, i;
4853 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4854 
4855 	for (i = start; i < end; i++) {
4856 		intel_crtc->lut_r[i] = red[i] >> 8;
4857 		intel_crtc->lut_g[i] = green[i] >> 8;
4858 		intel_crtc->lut_b[i] = blue[i] >> 8;
4859 	}
4860 
4861 	intel_crtc_load_lut(crtc);
4862 }
4863 
4864 /**
4865  * Get a pipe with a simple mode set on it for doing load-based monitor
4866  * detection.
4867  *
4868  * It will be up to the load-detect code to adjust the pipe as appropriate for
4869  * its requirements.  The pipe will be connected to no other encoders.
4870  *
4871  * Currently this code will only succeed if there is a pipe with no encoders
4872  * configured for it.  In the future, it could choose to temporarily disable
4873  * some outputs to free up a pipe for its use.
4874  *
4875  * \return crtc, or NULL if no pipes are available.
4876  */
4877 
4878 /* VESA 640x480x72Hz mode to set on the pipe */
4879 static struct drm_display_mode load_detect_mode = {
4880 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
4881 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
4882 };
4883 
4884 static struct drm_framebuffer *
4885 intel_framebuffer_create(struct drm_device *dev,
4886 			 struct drm_mode_fb_cmd2 *mode_cmd,
4887 			 struct drm_i915_gem_object *obj)
4888 {
4889 	struct intel_framebuffer *intel_fb;
4890 	int ret;
4891 
4892 	intel_fb = kmalloc(sizeof(*intel_fb), DRM_MEM_KMS, M_WAITOK | M_ZERO);
4893 	if (!intel_fb) {
4894 		drm_gem_object_unreference_unlocked(&obj->base);
4895 		return ERR_PTR(-ENOMEM);
4896 	}
4897 
4898 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
4899 	if (ret) {
4900 		drm_gem_object_unreference_unlocked(&obj->base);
4901 		kfree(intel_fb, DRM_MEM_KMS);
4902 		return ERR_PTR(ret);
4903 	}
4904 
4905 	return &intel_fb->base;
4906 }
4907 
4908 static u32
4909 intel_framebuffer_pitch_for_width(int width, int bpp)
4910 {
4911 	u32 pitch = howmany(width * bpp, 8);
4912 	return roundup2(pitch, 64);
4913 }
4914 
4915 static u32
4916 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
4917 {
4918 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
4919 	return roundup2(pitch * mode->vdisplay, PAGE_SIZE);
4920 }
4921 
4922 static struct drm_framebuffer *
4923 intel_framebuffer_create_for_mode(struct drm_device *dev,
4924 				  struct drm_display_mode *mode,
4925 				  int depth, int bpp)
4926 {
4927 	struct drm_i915_gem_object *obj;
4928 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
4929 
4930 	obj = i915_gem_alloc_object(dev,
4931 				    intel_framebuffer_size_for_mode(mode, bpp));
4932 	if (obj == NULL)
4933 		return ERR_PTR(-ENOMEM);
4934 
4935 	mode_cmd.width = mode->hdisplay;
4936 	mode_cmd.height = mode->vdisplay;
4937 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
4938 								bpp);
4939 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
4940 
4941 	return intel_framebuffer_create(dev, &mode_cmd, obj);
4942 }
4943 
4944 static int
4945 mode_fits_in_fbdev(struct drm_device *dev,
4946     struct drm_display_mode *mode, struct drm_framebuffer **res)
4947 {
4948 	struct drm_i915_private *dev_priv = dev->dev_private;
4949 	struct drm_i915_gem_object *obj;
4950 	struct drm_framebuffer *fb;
4951 
4952 	if (dev_priv->fbdev == NULL) {
4953 		*res = NULL;
4954 		return (0);
4955 	}
4956 
4957 	obj = dev_priv->fbdev->ifb.obj;
4958 	if (obj == NULL) {
4959 		*res = NULL;
4960 		return (0);
4961 	}
4962 
4963 	fb = &dev_priv->fbdev->ifb.base;
4964 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
4965 	    fb->bits_per_pixel)) {
4966 		*res = NULL;
4967 		return (0);
4968 	}
4969 
4970 	if (obj->base.size < mode->vdisplay * fb->pitches[0]) {
4971 		*res = NULL;
4972 		return (0);
4973 	}
4974 
4975 	*res = fb;
4976 	return (0);
4977 }
4978 
4979 bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
4980 				struct drm_connector *connector,
4981 				struct drm_display_mode *mode,
4982 				struct intel_load_detect_pipe *old)
4983 {
4984 	struct intel_crtc *intel_crtc;
4985 	struct drm_crtc *possible_crtc;
4986 	struct drm_encoder *encoder = &intel_encoder->base;
4987 	struct drm_crtc *crtc = NULL;
4988 	struct drm_device *dev = encoder->dev;
4989 	struct drm_framebuffer *old_fb;
4990 	int i = -1, r;
4991 
4992 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4993 		      connector->base.id, drm_get_connector_name(connector),
4994 		      encoder->base.id, drm_get_encoder_name(encoder));
4995 
4996 	/*
4997 	 * Algorithm gets a little messy:
4998 	 *
4999 	 *   - if the connector already has an assigned crtc, use it (but make
5000 	 *     sure it's on first)
5001 	 *
5002 	 *   - try to find the first unused crtc that can drive this connector,
5003 	 *     and use that if we find one
5004 	 */
5005 
5006 	/* See if we already have a CRTC for this connector */
5007 	if (encoder->crtc) {
5008 		crtc = encoder->crtc;
5009 
5010 		intel_crtc = to_intel_crtc(crtc);
5011 		old->dpms_mode = intel_crtc->dpms_mode;
5012 		old->load_detect_temp = false;
5013 
5014 		/* Make sure the crtc and connector are running */
5015 		if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
5016 			struct drm_encoder_helper_funcs *encoder_funcs;
5017 			struct drm_crtc_helper_funcs *crtc_funcs;
5018 
5019 			crtc_funcs = crtc->helper_private;
5020 			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
5021 
5022 			encoder_funcs = encoder->helper_private;
5023 			encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5024 		}
5025 
5026 		return true;
5027 	}
5028 
5029 	/* Find an unused one (if possible) */
5030 	list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5031 		i++;
5032 		if (!(encoder->possible_crtcs & (1 << i)))
5033 			continue;
5034 		if (!possible_crtc->enabled) {
5035 			crtc = possible_crtc;
5036 			break;
5037 		}
5038 	}
5039 
5040 	/*
5041 	 * If we didn't find an unused CRTC, don't use any.
5042 	 */
5043 	if (!crtc) {
5044 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
5045 		return false;
5046 	}
5047 
5048 	encoder->crtc = crtc;
5049 	connector->encoder = encoder;
5050 
5051 	intel_crtc = to_intel_crtc(crtc);
5052 	old->dpms_mode = intel_crtc->dpms_mode;
5053 	old->load_detect_temp = true;
5054 	old->release_fb = NULL;
5055 
5056 	if (!mode)
5057 		mode = &load_detect_mode;
5058 
5059 	old_fb = crtc->fb;
5060 
5061 	/* We need a framebuffer large enough to accommodate all accesses
5062 	 * that the plane may generate whilst we perform load detection.
5063 	 * We can not rely on the fbcon either being present (we get called
5064 	 * during its initialisation to detect all boot displays, or it may
5065 	 * not even exist) or that it is large enough to satisfy the
5066 	 * requested mode.
5067 	 */
5068 	r = mode_fits_in_fbdev(dev, mode, &crtc->fb);
5069 	if (crtc->fb == NULL) {
5070 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5071 		crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5072 		old->release_fb = crtc->fb;
5073 	} else
5074 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5075 	if (IS_ERR(crtc->fb)) {
5076 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5077 		return false;
5078 	}
5079 
5080 	if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
5081 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5082 		if (old->release_fb)
5083 			old->release_fb->funcs->destroy(old->release_fb);
5084 		crtc->fb = old_fb;
5085 		return false;
5086 	}
5087 
5088 	/* let the connector get through one full cycle before testing */
5089 	intel_wait_for_vblank(dev, intel_crtc->pipe);
5090 
5091 	return true;
5092 }
5093 
5094 void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
5095 				    struct drm_connector *connector,
5096 				    struct intel_load_detect_pipe *old)
5097 {
5098 	struct drm_encoder *encoder = &intel_encoder->base;
5099 	struct drm_device *dev = encoder->dev;
5100 	struct drm_crtc *crtc = encoder->crtc;
5101 	struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5102 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5103 
5104 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5105 		      connector->base.id, drm_get_connector_name(connector),
5106 		      encoder->base.id, drm_get_encoder_name(encoder));
5107 
5108 	if (old->load_detect_temp) {
5109 		connector->encoder = NULL;
5110 		drm_helper_disable_unused_functions(dev);
5111 
5112 		if (old->release_fb)
5113 			old->release_fb->funcs->destroy(old->release_fb);
5114 
5115 		return;
5116 	}
5117 
5118 	/* Switch crtc and encoder back off if necessary */
5119 	if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5120 		encoder_funcs->dpms(encoder, old->dpms_mode);
5121 		crtc_funcs->dpms(crtc, old->dpms_mode);
5122 	}
5123 }
5124 
5125 /* Returns the clock of the currently programmed mode of the given pipe. */
5126 static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5127 {
5128 	struct drm_i915_private *dev_priv = dev->dev_private;
5129 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5130 	int pipe = intel_crtc->pipe;
5131 	u32 dpll = I915_READ(DPLL(pipe));
5132 	u32 fp;
5133 	intel_clock_t clock;
5134 
5135 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
5136 		fp = I915_READ(FP0(pipe));
5137 	else
5138 		fp = I915_READ(FP1(pipe));
5139 
5140 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
5141 	if (IS_PINEVIEW(dev)) {
5142 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5143 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
5144 	} else {
5145 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5146 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5147 	}
5148 
5149 	if (!IS_GEN2(dev)) {
5150 		if (IS_PINEVIEW(dev))
5151 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5152 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
5153 		else
5154 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
5155 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
5156 
5157 		switch (dpll & DPLL_MODE_MASK) {
5158 		case DPLLB_MODE_DAC_SERIAL:
5159 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5160 				5 : 10;
5161 			break;
5162 		case DPLLB_MODE_LVDS:
5163 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5164 				7 : 14;
5165 			break;
5166 		default:
5167 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
5168 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
5169 			return 0;
5170 		}
5171 
5172 		/* XXX: Handle the 100Mhz refclk */
5173 		intel_clock(dev, 96000, &clock);
5174 	} else {
5175 		bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5176 
5177 		if (is_lvds) {
5178 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5179 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
5180 			clock.p2 = 14;
5181 
5182 			if ((dpll & PLL_REF_INPUT_MASK) ==
5183 			    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5184 				/* XXX: might not be 66MHz */
5185 				intel_clock(dev, 66000, &clock);
5186 			} else
5187 				intel_clock(dev, 48000, &clock);
5188 		} else {
5189 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
5190 				clock.p1 = 2;
5191 			else {
5192 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5193 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5194 			}
5195 			if (dpll & PLL_P2_DIVIDE_BY_4)
5196 				clock.p2 = 4;
5197 			else
5198 				clock.p2 = 2;
5199 
5200 			intel_clock(dev, 48000, &clock);
5201 		}
5202 	}
5203 
5204 	/* XXX: It would be nice to validate the clocks, but we can't reuse
5205 	 * i830PllIsValid() because it relies on the xf86_config connector
5206 	 * configuration being accurate, which it isn't necessarily.
5207 	 */
5208 
5209 	return clock.dot;
5210 }
5211 
5212 /** Returns the currently programmed mode of the given pipe. */
5213 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5214 					     struct drm_crtc *crtc)
5215 {
5216 	struct drm_i915_private *dev_priv = dev->dev_private;
5217 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5218 	int pipe = intel_crtc->pipe;
5219 	struct drm_display_mode *mode;
5220 	int htot = I915_READ(HTOTAL(pipe));
5221 	int hsync = I915_READ(HSYNC(pipe));
5222 	int vtot = I915_READ(VTOTAL(pipe));
5223 	int vsync = I915_READ(VSYNC(pipe));
5224 
5225 	mode = kmalloc(sizeof(*mode), DRM_MEM_KMS, M_WAITOK | M_ZERO);
5226 
5227 	mode->clock = intel_crtc_clock_get(dev, crtc);
5228 	mode->hdisplay = (htot & 0xffff) + 1;
5229 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5230 	mode->hsync_start = (hsync & 0xffff) + 1;
5231 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5232 	mode->vdisplay = (vtot & 0xffff) + 1;
5233 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5234 	mode->vsync_start = (vsync & 0xffff) + 1;
5235 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5236 
5237 	drm_mode_set_name(mode);
5238 	drm_mode_set_crtcinfo(mode, 0);
5239 
5240 	return mode;
5241 }
5242 
5243 static void intel_increase_pllclock(struct drm_crtc *crtc)
5244 {
5245 	struct drm_device *dev = crtc->dev;
5246 	drm_i915_private_t *dev_priv = dev->dev_private;
5247 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5248 	int pipe = intel_crtc->pipe;
5249 	int dpll_reg = DPLL(pipe);
5250 	int dpll;
5251 
5252 	if (HAS_PCH_SPLIT(dev))
5253 		return;
5254 
5255 	if (!dev_priv->lvds_downclock_avail)
5256 		return;
5257 
5258 	dpll = I915_READ(dpll_reg);
5259 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
5260 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
5261 
5262 		assert_panel_unlocked(dev_priv, pipe);
5263 
5264 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5265 		I915_WRITE(dpll_reg, dpll);
5266 		intel_wait_for_vblank(dev, pipe);
5267 
5268 		dpll = I915_READ(dpll_reg);
5269 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
5270 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
5271 	}
5272 }
5273 
5274 static void intel_decrease_pllclock(struct drm_crtc *crtc)
5275 {
5276 	struct drm_device *dev = crtc->dev;
5277 	drm_i915_private_t *dev_priv = dev->dev_private;
5278 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5279 
5280 	if (HAS_PCH_SPLIT(dev))
5281 		return;
5282 
5283 	if (!dev_priv->lvds_downclock_avail)
5284 		return;
5285 
5286 	/*
5287 	 * Since this is called by a timer, we should never get here in
5288 	 * the manual case.
5289 	 */
5290 	if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
5291 		int pipe = intel_crtc->pipe;
5292 		int dpll_reg = DPLL(pipe);
5293 		u32 dpll;
5294 
5295 		DRM_DEBUG_DRIVER("downclocking LVDS\n");
5296 
5297 		assert_panel_unlocked(dev_priv, pipe);
5298 
5299 		dpll = I915_READ(dpll_reg);
5300 		dpll |= DISPLAY_RATE_SELECT_FPA1;
5301 		I915_WRITE(dpll_reg, dpll);
5302 		intel_wait_for_vblank(dev, pipe);
5303 		dpll = I915_READ(dpll_reg);
5304 		if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
5305 			DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
5306 	}
5307 }
5308 
5309 void intel_mark_busy(struct drm_device *dev)
5310 {
5311 	i915_update_gfx_val(dev->dev_private);
5312 }
5313 
5314 void intel_mark_idle(struct drm_device *dev)
5315 {
5316 	struct drm_crtc *crtc;
5317 
5318 	if (!i915_powersave)
5319 		return;
5320 
5321 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5322 		if (!crtc->fb)
5323 			continue;
5324 
5325 		intel_decrease_pllclock(crtc);
5326 	}
5327 }
5328 
5329 static void intel_crtc_destroy(struct drm_crtc *crtc)
5330 {
5331 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5332 	struct drm_device *dev = crtc->dev;
5333 	struct intel_unpin_work *work;
5334 
5335 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
5336 	work = intel_crtc->unpin_work;
5337 	intel_crtc->unpin_work = NULL;
5338 	lockmgr(&dev->event_lock, LK_RELEASE);
5339 
5340 	if (work) {
5341 		cancel_work_sync(&work->work);
5342 		kfree(work, DRM_MEM_KMS);
5343 	}
5344 
5345 	drm_crtc_cleanup(crtc);
5346 
5347 	drm_free(intel_crtc, DRM_MEM_KMS);
5348 }
5349 
5350 static void intel_unpin_work_fn(struct work_struct *__work)
5351 {
5352 	struct intel_unpin_work *work =
5353 				container_of(__work, struct intel_unpin_work, work);
5354 	struct drm_device *dev;
5355 
5356 	dev = work->dev;
5357 	DRM_LOCK(dev);
5358 	intel_unpin_fb_obj(work->old_fb_obj);
5359 	drm_gem_object_unreference(&work->pending_flip_obj->base);
5360 	drm_gem_object_unreference(&work->old_fb_obj->base);
5361 
5362 	intel_update_fbc(work->dev);
5363 	DRM_UNLOCK(dev);
5364 	drm_free(work, DRM_MEM_KMS);
5365 }
5366 
5367 static void do_intel_finish_page_flip(struct drm_device *dev,
5368 				      struct drm_crtc *crtc)
5369 {
5370 	drm_i915_private_t *dev_priv = dev->dev_private;
5371 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5372 	struct intel_unpin_work *work;
5373 	struct drm_i915_gem_object *obj;
5374 
5375 	/* Ignore early vblank irqs */
5376 	if (intel_crtc == NULL)
5377 		return;
5378 
5379 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
5380 	work = intel_crtc->unpin_work;
5381 	if (work == NULL || !atomic_read(&work->pending)) {
5382 		lockmgr(&dev->event_lock, LK_RELEASE);
5383 		return;
5384 	}
5385 
5386 	intel_crtc->unpin_work = NULL;
5387 
5388 	if (work->event)
5389 		drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
5390 
5391 	drm_vblank_put(dev, intel_crtc->pipe);
5392 
5393 	lockmgr(&dev->event_lock, LK_RELEASE);
5394 
5395 	obj = work->old_fb_obj;
5396 
5397 	atomic_clear_mask(1 << intel_crtc->plane,
5398 			  &obj->pending_flip.counter);
5399 	wakeup(&obj->pending_flip);
5400 
5401 	queue_work(dev_priv->wq, &work->work);
5402 }
5403 
5404 void intel_finish_page_flip(struct drm_device *dev, int pipe)
5405 {
5406 	drm_i915_private_t *dev_priv = dev->dev_private;
5407 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
5408 
5409 	do_intel_finish_page_flip(dev, crtc);
5410 }
5411 
5412 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
5413 {
5414 	drm_i915_private_t *dev_priv = dev->dev_private;
5415 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
5416 
5417 	do_intel_finish_page_flip(dev, crtc);
5418 }
5419 
5420 void intel_prepare_page_flip(struct drm_device *dev, int plane)
5421 {
5422 	drm_i915_private_t *dev_priv = dev->dev_private;
5423 	struct intel_crtc *intel_crtc =
5424 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
5425 
5426 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
5427 	if (intel_crtc->unpin_work)
5428 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
5429 	lockmgr(&dev->event_lock, LK_RELEASE);
5430 }
5431 
5432 static int intel_gen2_queue_flip(struct drm_device *dev,
5433 				 struct drm_crtc *crtc,
5434 				 struct drm_framebuffer *fb,
5435 				 struct drm_i915_gem_object *obj)
5436 {
5437 	struct drm_i915_private *dev_priv = dev->dev_private;
5438 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5439 	unsigned long offset;
5440 	u32 flip_mask;
5441 	int ret;
5442 
5443 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5444 	if (ret)
5445 		goto out;
5446 
5447 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
5448 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
5449 
5450 	ret = BEGIN_LP_RING(6);
5451 	if (ret)
5452 		goto out;
5453 
5454 	/* Can't queue multiple flips, so wait for the previous
5455 	 * one to finish before executing the next.
5456 	 */
5457 	if (intel_crtc->plane)
5458 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5459 	else
5460 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5461 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5462 	OUT_RING(MI_NOOP);
5463 	OUT_RING(MI_DISPLAY_FLIP |
5464 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5465 	OUT_RING(fb->pitches[0]);
5466 	OUT_RING(obj->gtt_offset + offset);
5467 	OUT_RING(0); /* aux display base address, unused */
5468 	ADVANCE_LP_RING();
5469 out:
5470 	return ret;
5471 }
5472 
5473 static int intel_gen3_queue_flip(struct drm_device *dev,
5474 				 struct drm_crtc *crtc,
5475 				 struct drm_framebuffer *fb,
5476 				 struct drm_i915_gem_object *obj)
5477 {
5478 	struct drm_i915_private *dev_priv = dev->dev_private;
5479 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5480 	unsigned long offset;
5481 	u32 flip_mask;
5482 	int ret;
5483 
5484 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5485 	if (ret)
5486 		goto out;
5487 
5488 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
5489 	offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
5490 
5491 	ret = BEGIN_LP_RING(6);
5492 	if (ret)
5493 		goto out;
5494 
5495 	if (intel_crtc->plane)
5496 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5497 	else
5498 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
5499 	OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
5500 	OUT_RING(MI_NOOP);
5501 	OUT_RING(MI_DISPLAY_FLIP_I915 |
5502 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5503 	OUT_RING(fb->pitches[0]);
5504 	OUT_RING(obj->gtt_offset + offset);
5505 	OUT_RING(MI_NOOP);
5506 
5507 	ADVANCE_LP_RING();
5508 out:
5509 	return ret;
5510 }
5511 
5512 static int intel_gen4_queue_flip(struct drm_device *dev,
5513 				 struct drm_crtc *crtc,
5514 				 struct drm_framebuffer *fb,
5515 				 struct drm_i915_gem_object *obj)
5516 {
5517 	struct drm_i915_private *dev_priv = dev->dev_private;
5518 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5519 	uint32_t pf, pipesrc;
5520 	int ret;
5521 
5522 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5523 	if (ret)
5524 		goto out;
5525 
5526 	ret = BEGIN_LP_RING(4);
5527 	if (ret)
5528 		goto out;
5529 
5530 	/* i965+ uses the linear or tiled offsets from the
5531 	 * Display Registers (which do not change across a page-flip)
5532 	 * so we need only reprogram the base address.
5533 	 */
5534 	OUT_RING(MI_DISPLAY_FLIP |
5535 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5536 	OUT_RING(fb->pitches[0]);
5537 	OUT_RING(obj->gtt_offset | obj->tiling_mode);
5538 
5539 	/* XXX Enabling the panel-fitter across page-flip is so far
5540 	 * untested on non-native modes, so ignore it for now.
5541 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5542 	 */
5543 	pf = 0;
5544 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
5545 	OUT_RING(pf | pipesrc);
5546 	ADVANCE_LP_RING();
5547 out:
5548 	return ret;
5549 }
5550 
5551 static int intel_gen6_queue_flip(struct drm_device *dev,
5552 				 struct drm_crtc *crtc,
5553 				 struct drm_framebuffer *fb,
5554 				 struct drm_i915_gem_object *obj)
5555 {
5556 	struct drm_i915_private *dev_priv = dev->dev_private;
5557 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5558 	uint32_t pf, pipesrc;
5559 	int ret;
5560 
5561 	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5562 	if (ret)
5563 		goto out;
5564 
5565 	ret = BEGIN_LP_RING(4);
5566 	if (ret)
5567 		goto out;
5568 
5569 	OUT_RING(MI_DISPLAY_FLIP |
5570 		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5571 	OUT_RING(fb->pitches[0] | obj->tiling_mode);
5572 	OUT_RING(obj->gtt_offset);
5573 
5574 	/* Contrary to the suggestions in the documentation,
5575 	 * "Enable Panel Fitter" does not seem to be required when page
5576 	 * flipping with a non-native mode, and worse causes a normal
5577 	 * modeset to fail.
5578 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
5579 	 */
5580 	pf = 0;
5581 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
5582 	OUT_RING(pf | pipesrc);
5583 	ADVANCE_LP_RING();
5584 out:
5585 	return ret;
5586 }
5587 
5588 /*
5589  * On gen7 we currently use the blit ring because (in early silicon at least)
5590  * the render ring doesn't give us interrpts for page flip completion, which
5591  * means clients will hang after the first flip is queued.  Fortunately the
5592  * blit ring generates interrupts properly, so use it instead.
5593  */
5594 static int intel_gen7_queue_flip(struct drm_device *dev,
5595 				 struct drm_crtc *crtc,
5596 				 struct drm_framebuffer *fb,
5597 				 struct drm_i915_gem_object *obj)
5598 {
5599 	struct drm_i915_private *dev_priv = dev->dev_private;
5600 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5601 	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
5602 	int ret;
5603 
5604 	ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
5605 	if (ret)
5606 		goto out;
5607 
5608 	ret = intel_ring_begin(ring, 4);
5609 	if (ret)
5610 		goto out;
5611 
5612 	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
5613 	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
5614 	intel_ring_emit(ring, (obj->gtt_offset));
5615 	intel_ring_emit(ring, (MI_NOOP));
5616 	intel_ring_advance(ring);
5617 out:
5618 	return ret;
5619 }
5620 
5621 static int intel_default_queue_flip(struct drm_device *dev,
5622 				    struct drm_crtc *crtc,
5623 				    struct drm_framebuffer *fb,
5624 				    struct drm_i915_gem_object *obj)
5625 {
5626 	return -ENODEV;
5627 }
5628 
5629 static int intel_crtc_page_flip(struct drm_crtc *crtc,
5630 				struct drm_framebuffer *fb,
5631 				struct drm_pending_vblank_event *event)
5632 {
5633 	struct drm_device *dev = crtc->dev;
5634 	struct drm_i915_private *dev_priv = dev->dev_private;
5635 	struct intel_framebuffer *intel_fb;
5636 	struct drm_i915_gem_object *obj;
5637 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5638 	struct intel_unpin_work *work;
5639 	int ret;
5640 
5641 	work = kmalloc(sizeof *work, DRM_MEM_KMS, M_WAITOK | M_ZERO);
5642 
5643 	work->event = event;
5644 	work->dev = crtc->dev;
5645 	intel_fb = to_intel_framebuffer(crtc->fb);
5646 	work->old_fb_obj = intel_fb->obj;
5647 	INIT_WORK(&work->work, intel_unpin_work_fn);
5648 
5649 	ret = drm_vblank_get(dev, intel_crtc->pipe);
5650 	if (ret)
5651 		goto free_work;
5652 
5653 	/* We borrow the event spin lock for protecting unpin_work */
5654 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
5655 	if (intel_crtc->unpin_work) {
5656 		lockmgr(&dev->event_lock, LK_RELEASE);
5657 		drm_free(work, DRM_MEM_KMS);
5658 		drm_vblank_put(dev, intel_crtc->pipe);
5659 
5660 		DRM_DEBUG("flip queue: crtc already busy\n");
5661 		return -EBUSY;
5662 	}
5663 	intel_crtc->unpin_work = work;
5664 	lockmgr(&dev->event_lock, LK_RELEASE);
5665 
5666 	intel_fb = to_intel_framebuffer(fb);
5667 	obj = intel_fb->obj;
5668 
5669 	DRM_LOCK(dev);
5670 
5671 	/* Reference the objects for the scheduled work. */
5672 	drm_gem_object_reference(&work->old_fb_obj->base);
5673 	drm_gem_object_reference(&obj->base);
5674 
5675 	crtc->fb = fb;
5676 
5677 	work->pending_flip_obj = obj;
5678 
5679 	work->enable_stall_check = true;
5680 
5681 	/* Block clients from rendering to the new back buffer until
5682 	 * the flip occurs and the object is no longer visible.
5683 	 */
5684 	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
5685 
5686 	ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
5687 	if (ret)
5688 		goto cleanup_pending;
5689 	intel_disable_fbc(dev);
5690 	DRM_UNLOCK(dev);
5691 
5692 	return 0;
5693 
5694 cleanup_pending:
5695 	atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
5696 	drm_gem_object_unreference(&work->old_fb_obj->base);
5697 	drm_gem_object_unreference(&obj->base);
5698 	DRM_UNLOCK(dev);
5699 
5700 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
5701 	intel_crtc->unpin_work = NULL;
5702 	lockmgr(&dev->event_lock, LK_RELEASE);
5703 
5704 	drm_vblank_put(dev, intel_crtc->pipe);
5705 free_work:
5706 	drm_free(work, DRM_MEM_KMS);
5707 
5708 	return ret;
5709 }
5710 
5711 static void intel_sanitize_modesetting(struct drm_device *dev,
5712 				       int pipe, int plane)
5713 {
5714 	struct drm_i915_private *dev_priv = dev->dev_private;
5715 	u32 reg, val;
5716 
5717 	/* Clear any frame start delays used for debugging left by the BIOS */
5718 	for_each_pipe(pipe) {
5719 		reg = PIPECONF(pipe);
5720 		I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
5721 	}
5722 
5723 	if (HAS_PCH_SPLIT(dev))
5724 		return;
5725 
5726 	/* Who knows what state these registers were left in by the BIOS or
5727 	 * grub?
5728 	 *
5729 	 * If we leave the registers in a conflicting state (e.g. with the
5730 	 * display plane reading from the other pipe than the one we intend
5731 	 * to use) then when we attempt to teardown the active mode, we will
5732 	 * not disable the pipes and planes in the correct order -- leaving
5733 	 * a plane reading from a disabled pipe and possibly leading to
5734 	 * undefined behaviour.
5735 	 */
5736 
5737 	reg = DSPCNTR(plane);
5738 	val = I915_READ(reg);
5739 
5740 	if ((val & DISPLAY_PLANE_ENABLE) == 0)
5741 		return;
5742 	if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
5743 		return;
5744 
5745 	/* This display plane is active and attached to the other CPU pipe. */
5746 	pipe = !pipe;
5747 
5748 	/* Disable the plane and wait for it to stop reading from the pipe. */
5749 	intel_disable_plane(dev_priv, plane, pipe);
5750 	intel_disable_pipe(dev_priv, pipe);
5751 }
5752 
5753 static void intel_crtc_reset(struct drm_crtc *crtc)
5754 {
5755 	struct drm_device *dev = crtc->dev;
5756 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5757 
5758 	/* Reset flags back to the 'unknown' status so that they
5759 	 * will be correctly set on the initial modeset.
5760 	 */
5761 	intel_crtc->dpms_mode = -1;
5762 
5763 	/* We need to fix up any BIOS configuration that conflicts with
5764 	 * our expectations.
5765 	 */
5766 	intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
5767 }
5768 
5769 static struct drm_crtc_helper_funcs intel_helper_funcs = {
5770 	.dpms = intel_crtc_dpms,
5771 	.mode_fixup = intel_crtc_mode_fixup,
5772 	.mode_set = intel_crtc_mode_set,
5773 	.mode_set_base = intel_pipe_set_base,
5774 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
5775 	.load_lut = intel_crtc_load_lut,
5776 	.disable = intel_crtc_disable,
5777 };
5778 
5779 static const struct drm_crtc_funcs intel_crtc_funcs = {
5780 	.reset = intel_crtc_reset,
5781 	.cursor_set = intel_crtc_cursor_set,
5782 	.cursor_move = intel_crtc_cursor_move,
5783 	.gamma_set = intel_crtc_gamma_set,
5784 	.set_config = drm_crtc_helper_set_config,
5785 	.destroy = intel_crtc_destroy,
5786 	.page_flip = intel_crtc_page_flip,
5787 };
5788 
5789 static void intel_crtc_init(struct drm_device *dev, int pipe)
5790 {
5791 	drm_i915_private_t *dev_priv = dev->dev_private;
5792 	struct intel_crtc *intel_crtc;
5793 	int i;
5794 
5795 	intel_crtc = kmalloc(sizeof(struct intel_crtc) +
5796 	    (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
5797 	    DRM_MEM_KMS, M_WAITOK | M_ZERO);
5798 
5799 	drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
5800 
5801 	drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
5802 	for (i = 0; i < 256; i++) {
5803 		intel_crtc->lut_r[i] = i;
5804 		intel_crtc->lut_g[i] = i;
5805 		intel_crtc->lut_b[i] = i;
5806 	}
5807 
5808 	/* Swap pipes & planes for FBC on pre-965 */
5809 	intel_crtc->pipe = pipe;
5810 	intel_crtc->plane = pipe;
5811 	if (IS_MOBILE(dev) && IS_GEN3(dev)) {
5812 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
5813 		intel_crtc->plane = !pipe;
5814 	}
5815 
5816 	KASSERT(pipe < DRM_ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) &&
5817 	    dev_priv->plane_to_crtc_mapping[intel_crtc->plane] == NULL,
5818 	    ("plane_to_crtc is already initialized"));
5819 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
5820 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5821 
5822 	intel_crtc_reset(&intel_crtc->base);
5823 	intel_crtc->active = true; /* force the pipe off on setup_init_config */
5824 	intel_crtc->bpp = 24; /* default for pre-Ironlake */
5825 
5826 	if (HAS_PCH_SPLIT(dev)) {
5827 		if (pipe == 2 && IS_IVYBRIDGE(dev))
5828 			intel_crtc->no_pll = true;
5829 		intel_helper_funcs.prepare = ironlake_crtc_prepare;
5830 		intel_helper_funcs.commit = ironlake_crtc_commit;
5831 	} else {
5832 		intel_helper_funcs.prepare = i9xx_crtc_prepare;
5833 		intel_helper_funcs.commit = i9xx_crtc_commit;
5834 	}
5835 
5836 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
5837 
5838 	intel_crtc->busy = false;
5839 
5840 	callout_init_mp(&intel_crtc->idle_callout);
5841 }
5842 
5843 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
5844 				struct drm_file *file)
5845 {
5846 	drm_i915_private_t *dev_priv = dev->dev_private;
5847 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
5848 	struct drm_mode_object *drmmode_obj;
5849 	struct intel_crtc *crtc;
5850 
5851 	if (!dev_priv) {
5852 		DRM_ERROR("called with no initialization\n");
5853 		return -EINVAL;
5854 	}
5855 
5856 	drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
5857 			DRM_MODE_OBJECT_CRTC);
5858 
5859 	if (!drmmode_obj) {
5860 		DRM_ERROR("no such CRTC id\n");
5861 		return -EINVAL;
5862 	}
5863 
5864 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
5865 	pipe_from_crtc_id->pipe = crtc->pipe;
5866 
5867 	return 0;
5868 }
5869 
5870 static int intel_encoder_clones(struct drm_device *dev, int type_mask)
5871 {
5872 	struct intel_encoder *encoder;
5873 	int index_mask = 0;
5874 	int entry = 0;
5875 
5876 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
5877 		if (type_mask & encoder->clone_mask)
5878 			index_mask |= (1 << entry);
5879 		entry++;
5880 	}
5881 
5882 	return index_mask;
5883 }
5884 
5885 static bool has_edp_a(struct drm_device *dev)
5886 {
5887 	struct drm_i915_private *dev_priv = dev->dev_private;
5888 
5889 	if (!IS_MOBILE(dev))
5890 		return false;
5891 
5892 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
5893 		return false;
5894 
5895 	if (IS_GEN5(dev) &&
5896 	    (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
5897 		return false;
5898 
5899 	return true;
5900 }
5901 
5902 static void intel_setup_outputs(struct drm_device *dev)
5903 {
5904 	struct drm_i915_private *dev_priv = dev->dev_private;
5905 	struct intel_encoder *encoder;
5906 	bool dpd_is_edp = false;
5907 	bool has_lvds;
5908 
5909 	has_lvds = intel_lvds_init(dev);
5910 	if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
5911 		/* disable the panel fitter on everything but LVDS */
5912 		I915_WRITE(PFIT_CONTROL, 0);
5913 	}
5914 
5915 	if (HAS_PCH_SPLIT(dev)) {
5916 		dpd_is_edp = intel_dpd_is_edp(dev);
5917 
5918 		if (has_edp_a(dev))
5919 			intel_dp_init(dev, DP_A);
5920 
5921 		if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5922 			intel_dp_init(dev, PCH_DP_D);
5923 	}
5924 
5925 	intel_crt_init(dev);
5926 
5927 	if (HAS_PCH_SPLIT(dev)) {
5928 		int found;
5929 
5930 		DRM_DEBUG_KMS(
5931 "HDMIB %d PCH_DP_B %d HDMIC %d HDMID %d PCH_DP_C %d PCH_DP_D %d LVDS %d\n",
5932 		    (I915_READ(HDMIB) & PORT_DETECTED) != 0,
5933 		    (I915_READ(PCH_DP_B) & DP_DETECTED) != 0,
5934 		    (I915_READ(HDMIC) & PORT_DETECTED) != 0,
5935 		    (I915_READ(HDMID) & PORT_DETECTED) != 0,
5936 		    (I915_READ(PCH_DP_C) & DP_DETECTED) != 0,
5937 		    (I915_READ(PCH_DP_D) & DP_DETECTED) != 0,
5938 		    (I915_READ(PCH_LVDS) & LVDS_DETECTED) != 0);
5939 
5940 		if (I915_READ(HDMIB) & PORT_DETECTED) {
5941 			/* PCH SDVOB multiplex with HDMIB */
5942 			found = intel_sdvo_init(dev, PCH_SDVOB);
5943 			if (!found)
5944 				intel_hdmi_init(dev, HDMIB);
5945 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
5946 				intel_dp_init(dev, PCH_DP_B);
5947 		}
5948 
5949 		if (I915_READ(HDMIC) & PORT_DETECTED)
5950 			intel_hdmi_init(dev, HDMIC);
5951 
5952 		if (I915_READ(HDMID) & PORT_DETECTED)
5953 			intel_hdmi_init(dev, HDMID);
5954 
5955 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
5956 			intel_dp_init(dev, PCH_DP_C);
5957 
5958 		if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5959 			intel_dp_init(dev, PCH_DP_D);
5960 
5961 	} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
5962 		bool found = false;
5963 
5964 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
5965 			DRM_DEBUG_KMS("probing SDVOB\n");
5966 			found = intel_sdvo_init(dev, SDVOB);
5967 			if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
5968 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
5969 				intel_hdmi_init(dev, SDVOB);
5970 			}
5971 
5972 			if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
5973 				DRM_DEBUG_KMS("probing DP_B\n");
5974 				intel_dp_init(dev, DP_B);
5975 			}
5976 		}
5977 
5978 		/* Before G4X SDVOC doesn't have its own detect register */
5979 
5980 		if (I915_READ(SDVOB) & SDVO_DETECTED) {
5981 			DRM_DEBUG_KMS("probing SDVOC\n");
5982 			found = intel_sdvo_init(dev, SDVOC);
5983 		}
5984 
5985 		if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
5986 
5987 			if (SUPPORTS_INTEGRATED_HDMI(dev)) {
5988 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
5989 				intel_hdmi_init(dev, SDVOC);
5990 			}
5991 			if (SUPPORTS_INTEGRATED_DP(dev)) {
5992 				DRM_DEBUG_KMS("probing DP_C\n");
5993 				intel_dp_init(dev, DP_C);
5994 			}
5995 		}
5996 
5997 		if (SUPPORTS_INTEGRATED_DP(dev) &&
5998 		    (I915_READ(DP_D) & DP_DETECTED)) {
5999 			DRM_DEBUG_KMS("probing DP_D\n");
6000 			intel_dp_init(dev, DP_D);
6001 		}
6002 	} else if (IS_GEN2(dev)) {
6003 #if 1
6004 		KIB_NOTYET();
6005 #else
6006 		intel_dvo_init(dev);
6007 #endif
6008 	}
6009 
6010 	if (SUPPORTS_TV(dev))
6011 		intel_tv_init(dev);
6012 
6013 	list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6014 		encoder->base.possible_crtcs = encoder->crtc_mask;
6015 		encoder->base.possible_clones =
6016 			intel_encoder_clones(dev, encoder->clone_mask);
6017 	}
6018 
6019 	/* disable all the possible outputs/crtcs before entering KMS mode */
6020 	drm_helper_disable_unused_functions(dev);
6021 
6022 	if (HAS_PCH_SPLIT(dev))
6023 		ironlake_init_pch_refclk(dev);
6024 }
6025 
6026 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6027 {
6028 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6029 
6030 	drm_framebuffer_cleanup(fb);
6031 	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
6032 
6033 	drm_free(intel_fb, DRM_MEM_KMS);
6034 }
6035 
6036 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
6037 						struct drm_file *file,
6038 						unsigned int *handle)
6039 {
6040 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
6041 	struct drm_i915_gem_object *obj = intel_fb->obj;
6042 
6043 	return drm_gem_handle_create(file, &obj->base, handle);
6044 }
6045 
6046 static const struct drm_framebuffer_funcs intel_fb_funcs = {
6047 	.destroy = intel_user_framebuffer_destroy,
6048 	.create_handle = intel_user_framebuffer_create_handle,
6049 };
6050 
6051 int intel_framebuffer_init(struct drm_device *dev,
6052 			   struct intel_framebuffer *intel_fb,
6053 			   struct drm_mode_fb_cmd2 *mode_cmd,
6054 			   struct drm_i915_gem_object *obj)
6055 {
6056 	int ret;
6057 
6058 	if (obj->tiling_mode == I915_TILING_Y)
6059 		return -EINVAL;
6060 
6061 	if (mode_cmd->pitches[0] & 63)
6062 		return -EINVAL;
6063 
6064 	switch (mode_cmd->pixel_format) {
6065 	case DRM_FORMAT_RGB332:
6066 	case DRM_FORMAT_RGB565:
6067 	case DRM_FORMAT_XRGB8888:
6068 	case DRM_FORMAT_XBGR8888:
6069 	case DRM_FORMAT_ARGB8888:
6070 	case DRM_FORMAT_XRGB2101010:
6071 	case DRM_FORMAT_ARGB2101010:
6072 		/* RGB formats are common across chipsets */
6073 		break;
6074 	case DRM_FORMAT_YUYV:
6075 	case DRM_FORMAT_UYVY:
6076 	case DRM_FORMAT_YVYU:
6077 	case DRM_FORMAT_VYUY:
6078 		break;
6079 	default:
6080 		DRM_DEBUG_KMS("unsupported pixel format %u\n",
6081 				mode_cmd->pixel_format);
6082 		return -EINVAL;
6083 	}
6084 
6085 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6086 	if (ret) {
6087 		DRM_ERROR("framebuffer init failed %d\n", ret);
6088 		return ret;
6089 	}
6090 
6091 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
6092 	intel_fb->obj = obj;
6093 	return 0;
6094 }
6095 
6096 static struct drm_framebuffer *
6097 intel_user_framebuffer_create(struct drm_device *dev,
6098 			      struct drm_file *filp,
6099 			      struct drm_mode_fb_cmd2 *mode_cmd)
6100 {
6101 	struct drm_i915_gem_object *obj;
6102 
6103 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
6104 						mode_cmd->handles[0]));
6105 	if (&obj->base == NULL)
6106 		return ERR_PTR(-ENOENT);
6107 
6108 	return intel_framebuffer_create(dev, mode_cmd, obj);
6109 }
6110 
6111 static const struct drm_mode_config_funcs intel_mode_funcs = {
6112 	.fb_create = intel_user_framebuffer_create,
6113 	.output_poll_changed = intel_fb_output_poll_changed,
6114 };
6115 
6116 /* Set up chip specific display functions */
6117 static void intel_init_display(struct drm_device *dev)
6118 {
6119 	struct drm_i915_private *dev_priv = dev->dev_private;
6120 
6121 	/* We always want a DPMS function */
6122 	if (HAS_PCH_SPLIT(dev)) {
6123 		dev_priv->display.dpms = ironlake_crtc_dpms;
6124 		dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
6125 		dev_priv->display.update_plane = ironlake_update_plane;
6126 	} else {
6127 		dev_priv->display.dpms = i9xx_crtc_dpms;
6128 		dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
6129 		dev_priv->display.update_plane = i9xx_update_plane;
6130 	}
6131 
6132 	if (I915_HAS_FBC(dev)) {
6133 		if (HAS_PCH_SPLIT(dev)) {
6134 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6135 			dev_priv->display.enable_fbc = ironlake_enable_fbc;
6136 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
6137 		} else if (IS_GM45(dev)) {
6138 			dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6139 			dev_priv->display.enable_fbc = g4x_enable_fbc;
6140 			dev_priv->display.disable_fbc = g4x_disable_fbc;
6141 		} else if (IS_CRESTLINE(dev)) {
6142 			dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6143 			dev_priv->display.enable_fbc = i8xx_enable_fbc;
6144 			dev_priv->display.disable_fbc = i8xx_disable_fbc;
6145 		}
6146 		/* 855GM needs testing */
6147 	}
6148 
6149 	/* Returns the core display clock speed */
6150 	if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
6151 		dev_priv->display.get_display_clock_speed =
6152 			i945_get_display_clock_speed;
6153 	else if (IS_I915G(dev))
6154 		dev_priv->display.get_display_clock_speed =
6155 			i915_get_display_clock_speed;
6156 	else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
6157 		dev_priv->display.get_display_clock_speed =
6158 			i9xx_misc_get_display_clock_speed;
6159 	else if (IS_I915GM(dev))
6160 		dev_priv->display.get_display_clock_speed =
6161 			i915gm_get_display_clock_speed;
6162 	else if (IS_I865G(dev))
6163 		dev_priv->display.get_display_clock_speed =
6164 			i865_get_display_clock_speed;
6165 	else if (IS_I85X(dev))
6166 		dev_priv->display.get_display_clock_speed =
6167 			i855_get_display_clock_speed;
6168 	else /* 852, 830 */
6169 		dev_priv->display.get_display_clock_speed =
6170 			i830_get_display_clock_speed;
6171 
6172 	/* For FIFO watermark updates */
6173 	if (HAS_PCH_SPLIT(dev)) {
6174 
6175 		/* IVB configs may use multi-threaded forcewake */
6176 		if (IS_IVYBRIDGE(dev)) {
6177 			u32	ecobus;
6178 
6179 			/* A small trick here - if the bios hasn't configured MT forcewake,
6180 			 * and if the device is in RC6, then force_wake_mt_get will not wake
6181 			 * the device and the ECOBUS read will return zero. Which will be
6182 			 * (correctly) interpreted by the test below as MT forcewake being
6183 			 * disabled.
6184 			 */
6185 			DRM_LOCK(dev);
6186 			ecobus = I915_READ_NOTRACE(ECOBUS);
6187 			DRM_UNLOCK(dev);
6188 
6189 			if (ecobus & FORCEWAKE_MT_ENABLE) {
6190 				DRM_DEBUG_KMS("Using MT version of forcewake\n");
6191 			}
6192 		}
6193 
6194 		if (HAS_PCH_IBX(dev))
6195 			dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
6196 		else if (HAS_PCH_CPT(dev))
6197 			dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
6198 
6199 		if (IS_GEN5(dev)) {
6200 			if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
6201 				dev_priv->display.update_wm = ironlake_update_wm;
6202 			else {
6203 				DRM_DEBUG_KMS("Failed to get proper latency. "
6204 					      "Disable CxSR\n");
6205 				dev_priv->display.update_wm = NULL;
6206 			}
6207 			dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
6208 			dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6209 			dev_priv->display.write_eld = ironlake_write_eld;
6210 		} else if (IS_GEN6(dev)) {
6211 			if (SNB_READ_WM0_LATENCY()) {
6212 				dev_priv->display.update_wm = sandybridge_update_wm;
6213 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
6214 			} else {
6215 				DRM_DEBUG_KMS("Failed to read display plane latency. "
6216 					      "Disable CxSR\n");
6217 				dev_priv->display.update_wm = NULL;
6218 			}
6219 			dev_priv->display.fdi_link_train = gen6_fdi_link_train;
6220 			dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6221 			dev_priv->display.write_eld = ironlake_write_eld;
6222 		} else if (IS_IVYBRIDGE(dev)) {
6223 			/* FIXME: detect B0+ stepping and use auto training */
6224 			dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
6225 			if (SNB_READ_WM0_LATENCY()) {
6226 				dev_priv->display.update_wm = sandybridge_update_wm;
6227 				dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
6228 			} else {
6229 				DRM_DEBUG_KMS("Failed to read display plane latency. "
6230 					      "Disable CxSR\n");
6231 				dev_priv->display.update_wm = NULL;
6232 			}
6233 			dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6234 			dev_priv->display.write_eld = ironlake_write_eld;
6235 		} else
6236 			dev_priv->display.update_wm = NULL;
6237 	} else if (IS_PINEVIEW(dev)) {
6238 		if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6239 					    dev_priv->is_ddr3,
6240 					    dev_priv->fsb_freq,
6241 					    dev_priv->mem_freq)) {
6242 			DRM_INFO("failed to find known CxSR latency "
6243 				 "(found ddr%s fsb freq %d, mem freq %d), "
6244 				 "disabling CxSR\n",
6245 				 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6246 				 dev_priv->fsb_freq, dev_priv->mem_freq);
6247 			/* Disable CxSR and never update its watermark again */
6248 			pineview_disable_cxsr(dev);
6249 			dev_priv->display.update_wm = NULL;
6250 		} else
6251 			dev_priv->display.update_wm = pineview_update_wm;
6252 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6253 	} else if (IS_G4X(dev)) {
6254 		dev_priv->display.write_eld = g4x_write_eld;
6255 		dev_priv->display.update_wm = g4x_update_wm;
6256 		dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6257 	} else if (IS_GEN4(dev)) {
6258 		dev_priv->display.update_wm = i965_update_wm;
6259 		if (IS_CRESTLINE(dev))
6260 			dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6261 		else if (IS_BROADWATER(dev))
6262 			dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6263 	} else if (IS_GEN3(dev)) {
6264 		dev_priv->display.update_wm = i9xx_update_wm;
6265 		dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6266 		dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6267 	} else if (IS_I865G(dev)) {
6268 		dev_priv->display.update_wm = i830_update_wm;
6269 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6270 		dev_priv->display.get_fifo_size = i830_get_fifo_size;
6271 	} else if (IS_I85X(dev)) {
6272 		dev_priv->display.update_wm = i9xx_update_wm;
6273 		dev_priv->display.get_fifo_size = i85x_get_fifo_size;
6274 		dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6275 	} else {
6276 		dev_priv->display.update_wm = i830_update_wm;
6277 		dev_priv->display.init_clock_gating = i830_init_clock_gating;
6278 		if (IS_845G(dev))
6279 			dev_priv->display.get_fifo_size = i845_get_fifo_size;
6280 		else
6281 			dev_priv->display.get_fifo_size = i830_get_fifo_size;
6282 	}
6283 
6284 	/* Default just returns -ENODEV to indicate unsupported */
6285 	dev_priv->display.queue_flip = intel_default_queue_flip;
6286 
6287 	switch (INTEL_INFO(dev)->gen) {
6288 	case 2:
6289 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
6290 		break;
6291 
6292 	case 3:
6293 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
6294 		break;
6295 
6296 	case 4:
6297 	case 5:
6298 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
6299 		break;
6300 
6301 	case 6:
6302 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
6303 		break;
6304 	case 7:
6305 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
6306 		break;
6307 	}
6308 }
6309 
6310 /*
6311  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
6312  * resume, or other times.  This quirk makes sure that's the case for
6313  * affected systems.
6314  */
6315 static void quirk_pipea_force(struct drm_device *dev)
6316 {
6317 	struct drm_i915_private *dev_priv = dev->dev_private;
6318 
6319 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
6320 	DRM_DEBUG("applying pipe a force quirk\n");
6321 }
6322 
6323 /*
6324  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6325  */
6326 static void quirk_ssc_force_disable(struct drm_device *dev)
6327 {
6328 	struct drm_i915_private *dev_priv = dev->dev_private;
6329 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
6330 }
6331 
6332 struct intel_quirk {
6333 	int device;
6334 	int subsystem_vendor;
6335 	int subsystem_device;
6336 	void (*hook)(struct drm_device *dev);
6337 };
6338 
6339 #define	PCI_ANY_ID	(~0u)
6340 
6341 struct intel_quirk intel_quirks[] = {
6342 	/* HP Mini needs pipe A force quirk (LP: #322104) */
6343 	{ 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
6344 
6345 	/* Thinkpad R31 needs pipe A force quirk */
6346 	{ 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6347 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6348 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6349 
6350 	/* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
6351 	{ 0x3577,  0x1014, 0x0513, quirk_pipea_force },
6352 	/* ThinkPad X40 needs pipe A force quirk */
6353 
6354 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
6355 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
6356 
6357 	/* 855 & before need to leave pipe A & dpll A up */
6358 	{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6359 	{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6360 
6361 	/* Lenovo U160 cannot use SSC on LVDS */
6362 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
6363 
6364 	/* Sony Vaio Y cannot use SSC on LVDS */
6365 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
6366 };
6367 
6368 static void intel_init_quirks(struct drm_device *dev)
6369 {
6370 	struct intel_quirk *q;
6371 	device_t d;
6372 	int i;
6373 
6374 	d = dev->dev;
6375 	for (i = 0; i < DRM_ARRAY_SIZE(intel_quirks); i++) {
6376 		q = &intel_quirks[i];
6377 		if (pci_get_device(d) == q->device &&
6378 		    (pci_get_subvendor(d) == q->subsystem_vendor ||
6379 		     q->subsystem_vendor == PCI_ANY_ID) &&
6380 		    (pci_get_subdevice(d) == q->subsystem_device ||
6381 		     q->subsystem_device == PCI_ANY_ID))
6382 			q->hook(dev);
6383 	}
6384 }
6385 
6386 /* Disable the VGA plane that we never use */
6387 static void i915_disable_vga(struct drm_device *dev)
6388 {
6389 	struct drm_i915_private *dev_priv = dev->dev_private;
6390 	u8 sr1;
6391 	u32 vga_reg;
6392 
6393 	if (HAS_PCH_SPLIT(dev))
6394 		vga_reg = CPU_VGACNTRL;
6395 	else
6396 		vga_reg = VGACNTRL;
6397 
6398 #if 0
6399 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6400 #endif
6401 	outb(VGA_SR_INDEX, 1);
6402 	sr1 = inb(VGA_SR_DATA);
6403 	outb(VGA_SR_DATA, sr1 | 1 << 5);
6404 #if 0
6405 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6406 #endif
6407 	DELAY(300);
6408 
6409 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
6410 	POSTING_READ(vga_reg);
6411 }
6412 
6413 void intel_modeset_init(struct drm_device *dev)
6414 {
6415 	struct drm_i915_private *dev_priv = dev->dev_private;
6416 	int i, ret;
6417 
6418 	drm_mode_config_init(dev);
6419 
6420 	dev->mode_config.min_width = 0;
6421 	dev->mode_config.min_height = 0;
6422 
6423 	dev->mode_config.preferred_depth = 24;
6424 	dev->mode_config.prefer_shadow = 1;
6425 
6426 	dev->mode_config.funcs = __DECONST(struct drm_mode_config_funcs *,
6427 	    &intel_mode_funcs);
6428 
6429 	intel_init_quirks(dev);
6430 
6431 	intel_init_display(dev);
6432 
6433 	if (IS_GEN2(dev)) {
6434 		dev->mode_config.max_width = 2048;
6435 		dev->mode_config.max_height = 2048;
6436 	} else if (IS_GEN3(dev)) {
6437 		dev->mode_config.max_width = 4096;
6438 		dev->mode_config.max_height = 4096;
6439 	} else {
6440 		dev->mode_config.max_width = 8192;
6441 		dev->mode_config.max_height = 8192;
6442 	}
6443 	dev->mode_config.fb_base = dev->agp->base;
6444 
6445 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
6446 		      dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
6447 
6448 	for (i = 0; i < dev_priv->num_pipe; i++) {
6449 		intel_crtc_init(dev, i);
6450 		ret = intel_plane_init(dev, i);
6451 		if (ret)
6452 			DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
6453 	}
6454 
6455 	/* Just disable it once at startup */
6456 	i915_disable_vga(dev);
6457 	intel_setup_outputs(dev);
6458 
6459 	intel_init_clock_gating(dev);
6460 
6461 	if (IS_IRONLAKE_M(dev)) {
6462 		ironlake_enable_drps(dev);
6463 		intel_init_emon(dev);
6464 	}
6465 
6466 	if (IS_GEN6(dev)) {
6467 		gen6_enable_rps(dev_priv);
6468 		gen6_update_ring_freq(dev_priv);
6469 	}
6470 
6471 	callout_init_mp(&dev_priv->idle_callout);
6472 }
6473 
6474 void intel_modeset_gem_init(struct drm_device *dev)
6475 {
6476 	if (IS_IRONLAKE_M(dev))
6477 		ironlake_enable_rc6(dev);
6478 
6479 	intel_setup_overlay(dev);
6480 }
6481 
6482 void intel_modeset_cleanup(struct drm_device *dev)
6483 {
6484 	struct drm_i915_private *dev_priv = dev->dev_private;
6485 	struct drm_crtc *crtc;
6486 	struct intel_crtc *intel_crtc;
6487 
6488 	drm_kms_helper_poll_fini(dev);
6489 	DRM_LOCK(dev);
6490 
6491 #if 0
6492 	intel_unregister_dsm_handler();
6493 #endif
6494 
6495 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6496 		/* Skip inactive CRTCs */
6497 		if (!crtc->fb)
6498 			continue;
6499 
6500 		intel_crtc = to_intel_crtc(crtc);
6501 		intel_increase_pllclock(crtc);
6502 	}
6503 
6504 	intel_disable_fbc(dev);
6505 
6506 	if (IS_IRONLAKE_M(dev))
6507 		ironlake_disable_drps(dev);
6508 	if (IS_GEN6(dev))
6509 		gen6_disable_rps(dev);
6510 
6511 	if (IS_IRONLAKE_M(dev))
6512 		ironlake_disable_rc6(dev);
6513 
6514 	DRM_UNLOCK(dev);
6515 
6516 	/* Disable the irq before mode object teardown, for the irq might
6517 	 * enqueue unpin/hotplug work. */
6518 	drm_irq_uninstall(dev);
6519 	cancel_work_sync(&dev_priv->hotplug_work);
6520 	cancel_work_sync(&dev_priv->rps.work);
6521 
6522 	/* flush any delayed tasks or pending work */
6523 	flush_scheduled_work();
6524 
6525 	drm_mode_config_cleanup(dev);
6526 }
6527 
6528 /*
6529  * Return which encoder is currently attached for connector.
6530  */
6531 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
6532 {
6533 	return &intel_attached_encoder(connector)->base;
6534 }
6535 
6536 void intel_connector_attach_encoder(struct intel_connector *connector,
6537 				    struct intel_encoder *encoder)
6538 {
6539 	connector->encoder = encoder;
6540 	drm_mode_connector_attach_encoder(&connector->base,
6541 					  &encoder->base);
6542 }
6543 
6544 /*
6545  * set vga decode state - true == enable VGA decode
6546  */
6547 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
6548 {
6549 	struct drm_i915_private *dev_priv;
6550 	device_t bridge_dev;
6551 	u16 gmch_ctrl;
6552 
6553 	dev_priv = dev->dev_private;
6554 	bridge_dev = intel_gtt_get_bridge_device();
6555 	gmch_ctrl = pci_read_config(bridge_dev, INTEL_GMCH_CTRL, 2);
6556 	if (state)
6557 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
6558 	else
6559 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
6560 	pci_write_config(bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl, 2);
6561 	return (0);
6562 }
6563 
6564 struct intel_display_error_state {
6565 	struct intel_cursor_error_state {
6566 		u32 control;
6567 		u32 position;
6568 		u32 base;
6569 		u32 size;
6570 	} cursor[2];
6571 
6572 	struct intel_pipe_error_state {
6573 		u32 conf;
6574 		u32 source;
6575 
6576 		u32 htotal;
6577 		u32 hblank;
6578 		u32 hsync;
6579 		u32 vtotal;
6580 		u32 vblank;
6581 		u32 vsync;
6582 	} pipe[2];
6583 
6584 	struct intel_plane_error_state {
6585 		u32 control;
6586 		u32 stride;
6587 		u32 size;
6588 		u32 pos;
6589 		u32 addr;
6590 		u32 surface;
6591 		u32 tile_offset;
6592 	} plane[2];
6593 };
6594 
6595 struct intel_display_error_state *
6596 intel_display_capture_error_state(struct drm_device *dev)
6597 {
6598 	drm_i915_private_t *dev_priv = dev->dev_private;
6599 	struct intel_display_error_state *error;
6600 	int i;
6601 
6602 	error = kmalloc(sizeof(*error), DRM_MEM_KMS, M_NOWAIT);
6603 	if (error == NULL)
6604 		return NULL;
6605 
6606 	for (i = 0; i < 2; i++) {
6607 		error->cursor[i].control = I915_READ(CURCNTR(i));
6608 		error->cursor[i].position = I915_READ(CURPOS(i));
6609 		error->cursor[i].base = I915_READ(CURBASE(i));
6610 
6611 		error->plane[i].control = I915_READ(DSPCNTR(i));
6612 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
6613 		error->plane[i].size = I915_READ(DSPSIZE(i));
6614 		error->plane[i].pos = I915_READ(DSPPOS(i));
6615 		error->plane[i].addr = I915_READ(DSPADDR(i));
6616 		if (INTEL_INFO(dev)->gen >= 4) {
6617 			error->plane[i].surface = I915_READ(DSPSURF(i));
6618 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
6619 		}
6620 
6621 		error->pipe[i].conf = I915_READ(PIPECONF(i));
6622 		error->pipe[i].source = I915_READ(PIPESRC(i));
6623 		error->pipe[i].htotal = I915_READ(HTOTAL(i));
6624 		error->pipe[i].hblank = I915_READ(HBLANK(i));
6625 		error->pipe[i].hsync = I915_READ(HSYNC(i));
6626 		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
6627 		error->pipe[i].vblank = I915_READ(VBLANK(i));
6628 		error->pipe[i].vsync = I915_READ(VSYNC(i));
6629 	}
6630 
6631 	return error;
6632 }
6633 
6634 void
6635 intel_display_print_error_state(struct sbuf *m,
6636 				struct drm_device *dev,
6637 				struct intel_display_error_state *error)
6638 {
6639 	int i;
6640 
6641 	for (i = 0; i < 2; i++) {
6642 		sbuf_printf(m, "Pipe [%d]:\n", i);
6643 		sbuf_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
6644 		sbuf_printf(m, "  SRC: %08x\n", error->pipe[i].source);
6645 		sbuf_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
6646 		sbuf_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
6647 		sbuf_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
6648 		sbuf_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
6649 		sbuf_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
6650 		sbuf_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
6651 
6652 		sbuf_printf(m, "Plane [%d]:\n", i);
6653 		sbuf_printf(m, "  CNTR: %08x\n", error->plane[i].control);
6654 		sbuf_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
6655 		sbuf_printf(m, "  SIZE: %08x\n", error->plane[i].size);
6656 		sbuf_printf(m, "  POS: %08x\n", error->plane[i].pos);
6657 		sbuf_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
6658 		if (INTEL_INFO(dev)->gen >= 4) {
6659 			sbuf_printf(m, "  SURF: %08x\n", error->plane[i].surface);
6660 			sbuf_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
6661 		}
6662 
6663 		sbuf_printf(m, "Cursor [%d]:\n", i);
6664 		sbuf_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
6665 		sbuf_printf(m, "  POS: %08x\n", error->cursor[i].position);
6666 		sbuf_printf(m, "  BASE: %08x\n", error->cursor[i].base);
6667 	}
6668 }
6669