xref: /dragonfly/sys/dev/drm/i915/intel_display.c (revision a4c31683)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <drm/drm_edid.h>
32 #include <drm/drmP.h>
33 #include "intel_drv.h"
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "intel_dsi.h"
37 #include "i915_trace.h"
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/reservation.h>
45 
46 /* Primary plane formats for gen <= 3 */
47 static const uint32_t i8xx_primary_formats[] = {
48 	DRM_FORMAT_C8,
49 	DRM_FORMAT_RGB565,
50 	DRM_FORMAT_XRGB1555,
51 	DRM_FORMAT_XRGB8888,
52 };
53 
54 /* Primary plane formats for gen >= 4 */
55 static const uint32_t i965_primary_formats[] = {
56 	DRM_FORMAT_C8,
57 	DRM_FORMAT_RGB565,
58 	DRM_FORMAT_XRGB8888,
59 	DRM_FORMAT_XBGR8888,
60 	DRM_FORMAT_XRGB2101010,
61 	DRM_FORMAT_XBGR2101010,
62 };
63 
64 static const uint32_t skl_primary_formats[] = {
65 	DRM_FORMAT_C8,
66 	DRM_FORMAT_RGB565,
67 	DRM_FORMAT_XRGB8888,
68 	DRM_FORMAT_XBGR8888,
69 	DRM_FORMAT_ARGB8888,
70 	DRM_FORMAT_ABGR8888,
71 	DRM_FORMAT_XRGB2101010,
72 	DRM_FORMAT_XBGR2101010,
73 	DRM_FORMAT_YUYV,
74 	DRM_FORMAT_YVYU,
75 	DRM_FORMAT_UYVY,
76 	DRM_FORMAT_VYUY,
77 };
78 
79 /* Cursor formats */
80 static const uint32_t intel_cursor_formats[] = {
81 	DRM_FORMAT_ARGB8888,
82 };
83 
84 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
85 				struct intel_crtc_state *pipe_config);
86 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
87 				   struct intel_crtc_state *pipe_config);
88 
89 static int intel_framebuffer_init(struct drm_device *dev,
90 				  struct intel_framebuffer *ifb,
91 				  struct drm_mode_fb_cmd2 *mode_cmd,
92 				  struct drm_i915_gem_object *obj);
93 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
94 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
95 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
96 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
97 					 struct intel_link_m_n *m_n,
98 					 struct intel_link_m_n *m2_n2);
99 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
100 static void haswell_set_pipeconf(struct drm_crtc *crtc);
101 static void haswell_set_pipemisc(struct drm_crtc *crtc);
102 static void vlv_prepare_pll(struct intel_crtc *crtc,
103 			    const struct intel_crtc_state *pipe_config);
104 static void chv_prepare_pll(struct intel_crtc *crtc,
105 			    const struct intel_crtc_state *pipe_config);
106 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
107 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
108 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
109 	struct intel_crtc_state *crtc_state);
110 static void skylake_pfit_enable(struct intel_crtc *crtc);
111 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
112 static void ironlake_pfit_enable(struct intel_crtc *crtc);
113 static void intel_modeset_setup_hw_state(struct drm_device *dev);
114 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
115 
116 typedef struct {
117 	int	min, max;
118 } intel_range_t;
119 
120 typedef struct {
121 	int	dot_limit;
122 	int	p2_slow, p2_fast;
123 } intel_p2_t;
124 
125 typedef struct intel_limit intel_limit_t;
126 struct intel_limit {
127 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
128 	intel_p2_t	    p2;
129 };
130 
131 /* returns HPLL frequency in kHz */
132 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
133 {
134 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
135 
136 	/* Obtain SKU information */
137 	mutex_lock(&dev_priv->sb_lock);
138 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
139 		CCK_FUSE_HPLL_FREQ_MASK;
140 	mutex_unlock(&dev_priv->sb_lock);
141 
142 	return vco_freq[hpll_freq] * 1000;
143 }
144 
145 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
146 		      const char *name, u32 reg, int ref_freq)
147 {
148 	u32 val;
149 	int divider;
150 
151 	mutex_lock(&dev_priv->sb_lock);
152 	val = vlv_cck_read(dev_priv, reg);
153 	mutex_unlock(&dev_priv->sb_lock);
154 
155 	divider = val & CCK_FREQUENCY_VALUES;
156 
157 	WARN((val & CCK_FREQUENCY_STATUS) !=
158 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
159 	     "%s change in progress\n", name);
160 
161 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
162 }
163 
164 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
165 				  const char *name, u32 reg)
166 {
167 	if (dev_priv->hpll_freq == 0)
168 		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
169 
170 	return vlv_get_cck_clock(dev_priv, name, reg,
171 				 dev_priv->hpll_freq);
172 }
173 
174 static int
175 intel_pch_rawclk(struct drm_i915_private *dev_priv)
176 {
177 	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
178 }
179 
180 static int
181 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
182 {
183 	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
184 				      CCK_DISPLAY_REF_CLOCK_CONTROL);
185 }
186 
187 static int
188 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
189 {
190 	uint32_t clkcfg;
191 
192 	/* hrawclock is 1/4 the FSB frequency */
193 	clkcfg = I915_READ(CLKCFG);
194 	switch (clkcfg & CLKCFG_FSB_MASK) {
195 	case CLKCFG_FSB_400:
196 		return 100000;
197 	case CLKCFG_FSB_533:
198 		return 133333;
199 	case CLKCFG_FSB_667:
200 		return 166667;
201 	case CLKCFG_FSB_800:
202 		return 200000;
203 	case CLKCFG_FSB_1067:
204 		return 266667;
205 	case CLKCFG_FSB_1333:
206 		return 333333;
207 	/* these two are just a guess; one of them might be right */
208 	case CLKCFG_FSB_1600:
209 	case CLKCFG_FSB_1600_ALT:
210 		return 400000;
211 	default:
212 		return 133333;
213 	}
214 }
215 
216 static void intel_update_rawclk(struct drm_i915_private *dev_priv)
217 {
218 	if (HAS_PCH_SPLIT(dev_priv))
219 		dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
220 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
221 		dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
222 	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
223 		dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
224 	else
225 		return; /* no rawclk on other platforms, or no need to know it */
226 
227 	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
228 }
229 
230 static void intel_update_czclk(struct drm_i915_private *dev_priv)
231 {
232 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
233 		return;
234 
235 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
236 						      CCK_CZ_CLOCK_CONTROL);
237 
238 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
239 }
240 
241 static inline u32 /* units of 100MHz */
242 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
243 		    const struct intel_crtc_state *pipe_config)
244 {
245 	if (HAS_DDI(dev_priv))
246 		return pipe_config->port_clock; /* SPLL */
247 	else if (IS_GEN5(dev_priv))
248 		return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
249 	else
250 		return 270000;
251 }
252 
253 static const intel_limit_t intel_limits_i8xx_dac = {
254 	.dot = { .min = 25000, .max = 350000 },
255 	.vco = { .min = 908000, .max = 1512000 },
256 	.n = { .min = 2, .max = 16 },
257 	.m = { .min = 96, .max = 140 },
258 	.m1 = { .min = 18, .max = 26 },
259 	.m2 = { .min = 6, .max = 16 },
260 	.p = { .min = 4, .max = 128 },
261 	.p1 = { .min = 2, .max = 33 },
262 	.p2 = { .dot_limit = 165000,
263 		.p2_slow = 4, .p2_fast = 2 },
264 };
265 
266 static const intel_limit_t intel_limits_i8xx_dvo = {
267 	.dot = { .min = 25000, .max = 350000 },
268 	.vco = { .min = 908000, .max = 1512000 },
269 	.n = { .min = 2, .max = 16 },
270 	.m = { .min = 96, .max = 140 },
271 	.m1 = { .min = 18, .max = 26 },
272 	.m2 = { .min = 6, .max = 16 },
273 	.p = { .min = 4, .max = 128 },
274 	.p1 = { .min = 2, .max = 33 },
275 	.p2 = { .dot_limit = 165000,
276 		.p2_slow = 4, .p2_fast = 4 },
277 };
278 
279 static const intel_limit_t intel_limits_i8xx_lvds = {
280 	.dot = { .min = 25000, .max = 350000 },
281 	.vco = { .min = 908000, .max = 1512000 },
282 	.n = { .min = 2, .max = 16 },
283 	.m = { .min = 96, .max = 140 },
284 	.m1 = { .min = 18, .max = 26 },
285 	.m2 = { .min = 6, .max = 16 },
286 	.p = { .min = 4, .max = 128 },
287 	.p1 = { .min = 1, .max = 6 },
288 	.p2 = { .dot_limit = 165000,
289 		.p2_slow = 14, .p2_fast = 7 },
290 };
291 
292 static const intel_limit_t intel_limits_i9xx_sdvo = {
293 	.dot = { .min = 20000, .max = 400000 },
294 	.vco = { .min = 1400000, .max = 2800000 },
295 	.n = { .min = 1, .max = 6 },
296 	.m = { .min = 70, .max = 120 },
297 	.m1 = { .min = 8, .max = 18 },
298 	.m2 = { .min = 3, .max = 7 },
299 	.p = { .min = 5, .max = 80 },
300 	.p1 = { .min = 1, .max = 8 },
301 	.p2 = { .dot_limit = 200000,
302 		.p2_slow = 10, .p2_fast = 5 },
303 };
304 
305 static const intel_limit_t intel_limits_i9xx_lvds = {
306 	.dot = { .min = 20000, .max = 400000 },
307 	.vco = { .min = 1400000, .max = 2800000 },
308 	.n = { .min = 1, .max = 6 },
309 	.m = { .min = 70, .max = 120 },
310 	.m1 = { .min = 8, .max = 18 },
311 	.m2 = { .min = 3, .max = 7 },
312 	.p = { .min = 7, .max = 98 },
313 	.p1 = { .min = 1, .max = 8 },
314 	.p2 = { .dot_limit = 112000,
315 		.p2_slow = 14, .p2_fast = 7 },
316 };
317 
318 
319 static const intel_limit_t intel_limits_g4x_sdvo = {
320 	.dot = { .min = 25000, .max = 270000 },
321 	.vco = { .min = 1750000, .max = 3500000},
322 	.n = { .min = 1, .max = 4 },
323 	.m = { .min = 104, .max = 138 },
324 	.m1 = { .min = 17, .max = 23 },
325 	.m2 = { .min = 5, .max = 11 },
326 	.p = { .min = 10, .max = 30 },
327 	.p1 = { .min = 1, .max = 3},
328 	.p2 = { .dot_limit = 270000,
329 		.p2_slow = 10,
330 		.p2_fast = 10
331 	},
332 };
333 
334 static const intel_limit_t intel_limits_g4x_hdmi = {
335 	.dot = { .min = 22000, .max = 400000 },
336 	.vco = { .min = 1750000, .max = 3500000},
337 	.n = { .min = 1, .max = 4 },
338 	.m = { .min = 104, .max = 138 },
339 	.m1 = { .min = 16, .max = 23 },
340 	.m2 = { .min = 5, .max = 11 },
341 	.p = { .min = 5, .max = 80 },
342 	.p1 = { .min = 1, .max = 8},
343 	.p2 = { .dot_limit = 165000,
344 		.p2_slow = 10, .p2_fast = 5 },
345 };
346 
347 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
348 	.dot = { .min = 20000, .max = 115000 },
349 	.vco = { .min = 1750000, .max = 3500000 },
350 	.n = { .min = 1, .max = 3 },
351 	.m = { .min = 104, .max = 138 },
352 	.m1 = { .min = 17, .max = 23 },
353 	.m2 = { .min = 5, .max = 11 },
354 	.p = { .min = 28, .max = 112 },
355 	.p1 = { .min = 2, .max = 8 },
356 	.p2 = { .dot_limit = 0,
357 		.p2_slow = 14, .p2_fast = 14
358 	},
359 };
360 
361 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
362 	.dot = { .min = 80000, .max = 224000 },
363 	.vco = { .min = 1750000, .max = 3500000 },
364 	.n = { .min = 1, .max = 3 },
365 	.m = { .min = 104, .max = 138 },
366 	.m1 = { .min = 17, .max = 23 },
367 	.m2 = { .min = 5, .max = 11 },
368 	.p = { .min = 14, .max = 42 },
369 	.p1 = { .min = 2, .max = 6 },
370 	.p2 = { .dot_limit = 0,
371 		.p2_slow = 7, .p2_fast = 7
372 	},
373 };
374 
375 static const intel_limit_t intel_limits_pineview_sdvo = {
376 	.dot = { .min = 20000, .max = 400000},
377 	.vco = { .min = 1700000, .max = 3500000 },
378 	/* Pineview's Ncounter is a ring counter */
379 	.n = { .min = 3, .max = 6 },
380 	.m = { .min = 2, .max = 256 },
381 	/* Pineview only has one combined m divider, which we treat as m2. */
382 	.m1 = { .min = 0, .max = 0 },
383 	.m2 = { .min = 0, .max = 254 },
384 	.p = { .min = 5, .max = 80 },
385 	.p1 = { .min = 1, .max = 8 },
386 	.p2 = { .dot_limit = 200000,
387 		.p2_slow = 10, .p2_fast = 5 },
388 };
389 
390 static const intel_limit_t intel_limits_pineview_lvds = {
391 	.dot = { .min = 20000, .max = 400000 },
392 	.vco = { .min = 1700000, .max = 3500000 },
393 	.n = { .min = 3, .max = 6 },
394 	.m = { .min = 2, .max = 256 },
395 	.m1 = { .min = 0, .max = 0 },
396 	.m2 = { .min = 0, .max = 254 },
397 	.p = { .min = 7, .max = 112 },
398 	.p1 = { .min = 1, .max = 8 },
399 	.p2 = { .dot_limit = 112000,
400 		.p2_slow = 14, .p2_fast = 14 },
401 };
402 
403 /* Ironlake / Sandybridge
404  *
405  * We calculate clock using (register_value + 2) for N/M1/M2, so here
406  * the range value for them is (actual_value - 2).
407  */
408 static const intel_limit_t intel_limits_ironlake_dac = {
409 	.dot = { .min = 25000, .max = 350000 },
410 	.vco = { .min = 1760000, .max = 3510000 },
411 	.n = { .min = 1, .max = 5 },
412 	.m = { .min = 79, .max = 127 },
413 	.m1 = { .min = 12, .max = 22 },
414 	.m2 = { .min = 5, .max = 9 },
415 	.p = { .min = 5, .max = 80 },
416 	.p1 = { .min = 1, .max = 8 },
417 	.p2 = { .dot_limit = 225000,
418 		.p2_slow = 10, .p2_fast = 5 },
419 };
420 
421 static const intel_limit_t intel_limits_ironlake_single_lvds = {
422 	.dot = { .min = 25000, .max = 350000 },
423 	.vco = { .min = 1760000, .max = 3510000 },
424 	.n = { .min = 1, .max = 3 },
425 	.m = { .min = 79, .max = 118 },
426 	.m1 = { .min = 12, .max = 22 },
427 	.m2 = { .min = 5, .max = 9 },
428 	.p = { .min = 28, .max = 112 },
429 	.p1 = { .min = 2, .max = 8 },
430 	.p2 = { .dot_limit = 225000,
431 		.p2_slow = 14, .p2_fast = 14 },
432 };
433 
434 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
435 	.dot = { .min = 25000, .max = 350000 },
436 	.vco = { .min = 1760000, .max = 3510000 },
437 	.n = { .min = 1, .max = 3 },
438 	.m = { .min = 79, .max = 127 },
439 	.m1 = { .min = 12, .max = 22 },
440 	.m2 = { .min = 5, .max = 9 },
441 	.p = { .min = 14, .max = 56 },
442 	.p1 = { .min = 2, .max = 8 },
443 	.p2 = { .dot_limit = 225000,
444 		.p2_slow = 7, .p2_fast = 7 },
445 };
446 
447 /* LVDS 100mhz refclk limits. */
448 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
449 	.dot = { .min = 25000, .max = 350000 },
450 	.vco = { .min = 1760000, .max = 3510000 },
451 	.n = { .min = 1, .max = 2 },
452 	.m = { .min = 79, .max = 126 },
453 	.m1 = { .min = 12, .max = 22 },
454 	.m2 = { .min = 5, .max = 9 },
455 	.p = { .min = 28, .max = 112 },
456 	.p1 = { .min = 2, .max = 8 },
457 	.p2 = { .dot_limit = 225000,
458 		.p2_slow = 14, .p2_fast = 14 },
459 };
460 
461 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
462 	.dot = { .min = 25000, .max = 350000 },
463 	.vco = { .min = 1760000, .max = 3510000 },
464 	.n = { .min = 1, .max = 3 },
465 	.m = { .min = 79, .max = 126 },
466 	.m1 = { .min = 12, .max = 22 },
467 	.m2 = { .min = 5, .max = 9 },
468 	.p = { .min = 14, .max = 42 },
469 	.p1 = { .min = 2, .max = 6 },
470 	.p2 = { .dot_limit = 225000,
471 		.p2_slow = 7, .p2_fast = 7 },
472 };
473 
474 static const intel_limit_t intel_limits_vlv = {
475 	 /*
476 	  * These are the data rate limits (measured in fast clocks)
477 	  * since those are the strictest limits we have. The fast
478 	  * clock and actual rate limits are more relaxed, so checking
479 	  * them would make no difference.
480 	  */
481 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
482 	.vco = { .min = 4000000, .max = 6000000 },
483 	.n = { .min = 1, .max = 7 },
484 	.m1 = { .min = 2, .max = 3 },
485 	.m2 = { .min = 11, .max = 156 },
486 	.p1 = { .min = 2, .max = 3 },
487 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
488 };
489 
490 static const intel_limit_t intel_limits_chv = {
491 	/*
492 	 * These are the data rate limits (measured in fast clocks)
493 	 * since those are the strictest limits we have.  The fast
494 	 * clock and actual rate limits are more relaxed, so checking
495 	 * them would make no difference.
496 	 */
497 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
498 	.vco = { .min = 4800000, .max = 6480000 },
499 	.n = { .min = 1, .max = 1 },
500 	.m1 = { .min = 2, .max = 2 },
501 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
502 	.p1 = { .min = 2, .max = 4 },
503 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
504 };
505 
506 static const intel_limit_t intel_limits_bxt = {
507 	/* FIXME: find real dot limits */
508 	.dot = { .min = 0, .max = INT_MAX },
509 	.vco = { .min = 4800000, .max = 6700000 },
510 	.n = { .min = 1, .max = 1 },
511 	.m1 = { .min = 2, .max = 2 },
512 	/* FIXME: find real m2 limits */
513 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
514 	.p1 = { .min = 2, .max = 4 },
515 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
516 };
517 
518 static bool
519 needs_modeset(struct drm_crtc_state *state)
520 {
521 	return drm_atomic_crtc_needs_modeset(state);
522 }
523 
524 /**
525  * Returns whether any output on the specified pipe is of the specified type
526  */
527 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
528 {
529 	struct drm_device *dev = crtc->base.dev;
530 	struct intel_encoder *encoder;
531 
532 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
533 		if (encoder->type == type)
534 			return true;
535 
536 	return false;
537 }
538 
539 /**
540  * Returns whether any output on the specified pipe will have the specified
541  * type after a staged modeset is complete, i.e., the same as
542  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
543  * encoder->crtc.
544  */
545 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
546 				      int type)
547 {
548 	struct drm_atomic_state *state = crtc_state->base.state;
549 	struct drm_connector *connector;
550 	struct drm_connector_state *connector_state;
551 	struct intel_encoder *encoder;
552 	int i, num_connectors = 0;
553 
554 	for_each_connector_in_state(state, connector, connector_state, i) {
555 		if (connector_state->crtc != crtc_state->base.crtc)
556 			continue;
557 
558 		num_connectors++;
559 
560 		encoder = to_intel_encoder(connector_state->best_encoder);
561 		if (encoder->type == type)
562 			return true;
563 	}
564 
565 	WARN_ON(num_connectors == 0);
566 
567 	return false;
568 }
569 
570 /*
571  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
572  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
573  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
574  * The helpers' return value is the rate of the clock that is fed to the
575  * display engine's pipe which can be the above fast dot clock rate or a
576  * divided-down version of it.
577  */
578 /* m1 is reserved as 0 in Pineview, n is a ring counter */
579 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
580 {
581 	clock->m = clock->m2 + 2;
582 	clock->p = clock->p1 * clock->p2;
583 	if (WARN_ON(clock->n == 0 || clock->p == 0))
584 		return 0;
585 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
586 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
587 
588 	return clock->dot;
589 }
590 
591 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
592 {
593 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
594 }
595 
596 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
597 {
598 	clock->m = i9xx_dpll_compute_m(clock);
599 	clock->p = clock->p1 * clock->p2;
600 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
601 		return 0;
602 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
603 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
604 
605 	return clock->dot;
606 }
607 
608 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
609 {
610 	clock->m = clock->m1 * clock->m2;
611 	clock->p = clock->p1 * clock->p2;
612 	if (WARN_ON(clock->n == 0 || clock->p == 0))
613 		return 0;
614 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
615 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
616 
617 	return clock->dot / 5;
618 }
619 
620 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
621 {
622 	clock->m = clock->m1 * clock->m2;
623 	clock->p = clock->p1 * clock->p2;
624 	if (WARN_ON(clock->n == 0 || clock->p == 0))
625 		return 0;
626 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
627 			clock->n << 22);
628 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
629 
630 	return clock->dot / 5;
631 }
632 
633 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
634 /**
635  * Returns whether the given set of divisors are valid for a given refclk with
636  * the given connectors.
637  */
638 
639 static bool intel_PLL_is_valid(struct drm_device *dev,
640 			       const intel_limit_t *limit,
641 			       const intel_clock_t *clock)
642 {
643 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
644 		INTELPllInvalid("n out of range\n");
645 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
646 		INTELPllInvalid("p1 out of range\n");
647 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
648 		INTELPllInvalid("m2 out of range\n");
649 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
650 		INTELPllInvalid("m1 out of range\n");
651 
652 	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
653 	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
654 		if (clock->m1 <= clock->m2)
655 			INTELPllInvalid("m1 <= m2\n");
656 
657 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
658 		if (clock->p < limit->p.min || limit->p.max < clock->p)
659 			INTELPllInvalid("p out of range\n");
660 		if (clock->m < limit->m.min || limit->m.max < clock->m)
661 			INTELPllInvalid("m out of range\n");
662 	}
663 
664 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
665 		INTELPllInvalid("vco out of range\n");
666 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
667 	 * connector, etc., rather than just a single range.
668 	 */
669 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
670 		INTELPllInvalid("dot out of range\n");
671 
672 	return true;
673 }
674 
675 static int
676 i9xx_select_p2_div(const intel_limit_t *limit,
677 		   const struct intel_crtc_state *crtc_state,
678 		   int target)
679 {
680 	struct drm_device *dev = crtc_state->base.crtc->dev;
681 
682 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
683 		/*
684 		 * For LVDS just rely on its current settings for dual-channel.
685 		 * We haven't figured out how to reliably set up different
686 		 * single/dual channel state, if we even can.
687 		 */
688 		if (intel_is_dual_link_lvds(dev))
689 			return limit->p2.p2_fast;
690 		else
691 			return limit->p2.p2_slow;
692 	} else {
693 		if (target < limit->p2.dot_limit)
694 			return limit->p2.p2_slow;
695 		else
696 			return limit->p2.p2_fast;
697 	}
698 }
699 
700 /*
701  * Returns a set of divisors for the desired target clock with the given
702  * refclk, or FALSE.  The returned values represent the clock equation:
703  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
704  *
705  * Target and reference clocks are specified in kHz.
706  *
707  * If match_clock is provided, then best_clock P divider must match the P
708  * divider from @match_clock used for LVDS downclocking.
709  */
710 static bool
711 i9xx_find_best_dpll(const intel_limit_t *limit,
712 		    struct intel_crtc_state *crtc_state,
713 		    int target, int refclk, intel_clock_t *match_clock,
714 		    intel_clock_t *best_clock)
715 {
716 	struct drm_device *dev = crtc_state->base.crtc->dev;
717 	intel_clock_t clock;
718 	int err = target;
719 
720 	memset(best_clock, 0, sizeof(*best_clock));
721 
722 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
723 
724 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
725 	     clock.m1++) {
726 		for (clock.m2 = limit->m2.min;
727 		     clock.m2 <= limit->m2.max; clock.m2++) {
728 			if (clock.m2 >= clock.m1)
729 				break;
730 			for (clock.n = limit->n.min;
731 			     clock.n <= limit->n.max; clock.n++) {
732 				for (clock.p1 = limit->p1.min;
733 					clock.p1 <= limit->p1.max; clock.p1++) {
734 					int this_err;
735 
736 					i9xx_calc_dpll_params(refclk, &clock);
737 					if (!intel_PLL_is_valid(dev, limit,
738 								&clock))
739 						continue;
740 					if (match_clock &&
741 					    clock.p != match_clock->p)
742 						continue;
743 
744 					this_err = abs(clock.dot - target);
745 					if (this_err < err) {
746 						*best_clock = clock;
747 						err = this_err;
748 					}
749 				}
750 			}
751 		}
752 	}
753 
754 	return (err != target);
755 }
756 
757 /*
758  * Returns a set of divisors for the desired target clock with the given
759  * refclk, or FALSE.  The returned values represent the clock equation:
760  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
761  *
762  * Target and reference clocks are specified in kHz.
763  *
764  * If match_clock is provided, then best_clock P divider must match the P
765  * divider from @match_clock used for LVDS downclocking.
766  */
767 static bool
768 pnv_find_best_dpll(const intel_limit_t *limit,
769 		   struct intel_crtc_state *crtc_state,
770 		   int target, int refclk, intel_clock_t *match_clock,
771 		   intel_clock_t *best_clock)
772 {
773 	struct drm_device *dev = crtc_state->base.crtc->dev;
774 	intel_clock_t clock;
775 	int err = target;
776 
777 	memset(best_clock, 0, sizeof(*best_clock));
778 
779 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
780 
781 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
782 	     clock.m1++) {
783 		for (clock.m2 = limit->m2.min;
784 		     clock.m2 <= limit->m2.max; clock.m2++) {
785 			for (clock.n = limit->n.min;
786 			     clock.n <= limit->n.max; clock.n++) {
787 				for (clock.p1 = limit->p1.min;
788 					clock.p1 <= limit->p1.max; clock.p1++) {
789 					int this_err;
790 
791 					pnv_calc_dpll_params(refclk, &clock);
792 					if (!intel_PLL_is_valid(dev, limit,
793 								&clock))
794 						continue;
795 					if (match_clock &&
796 					    clock.p != match_clock->p)
797 						continue;
798 
799 					this_err = abs(clock.dot - target);
800 					if (this_err < err) {
801 						*best_clock = clock;
802 						err = this_err;
803 					}
804 				}
805 			}
806 		}
807 	}
808 
809 	return (err != target);
810 }
811 
812 /*
813  * Returns a set of divisors for the desired target clock with the given
814  * refclk, or FALSE.  The returned values represent the clock equation:
815  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
816  *
817  * Target and reference clocks are specified in kHz.
818  *
819  * If match_clock is provided, then best_clock P divider must match the P
820  * divider from @match_clock used for LVDS downclocking.
821  */
822 static bool
823 g4x_find_best_dpll(const intel_limit_t *limit,
824 		   struct intel_crtc_state *crtc_state,
825 		   int target, int refclk, intel_clock_t *match_clock,
826 		   intel_clock_t *best_clock)
827 {
828 	struct drm_device *dev = crtc_state->base.crtc->dev;
829 	intel_clock_t clock;
830 	int max_n;
831 	bool found = false;
832 	/* approximately equals target * 0.00585 */
833 	int err_most = (target >> 8) + (target >> 9);
834 
835 	memset(best_clock, 0, sizeof(*best_clock));
836 
837 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
838 
839 	max_n = limit->n.max;
840 	/* based on hardware requirement, prefer smaller n to precision */
841 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
842 		/* based on hardware requirement, prefere larger m1,m2 */
843 		for (clock.m1 = limit->m1.max;
844 		     clock.m1 >= limit->m1.min; clock.m1--) {
845 			for (clock.m2 = limit->m2.max;
846 			     clock.m2 >= limit->m2.min; clock.m2--) {
847 				for (clock.p1 = limit->p1.max;
848 				     clock.p1 >= limit->p1.min; clock.p1--) {
849 					int this_err;
850 
851 					i9xx_calc_dpll_params(refclk, &clock);
852 					if (!intel_PLL_is_valid(dev, limit,
853 								&clock))
854 						continue;
855 
856 					this_err = abs(clock.dot - target);
857 					if (this_err < err_most) {
858 						*best_clock = clock;
859 						err_most = this_err;
860 						max_n = clock.n;
861 						found = true;
862 					}
863 				}
864 			}
865 		}
866 	}
867 	return found;
868 }
869 
870 /*
871  * Check if the calculated PLL configuration is more optimal compared to the
872  * best configuration and error found so far. Return the calculated error.
873  */
874 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
875 			       const intel_clock_t *calculated_clock,
876 			       const intel_clock_t *best_clock,
877 			       unsigned int best_error_ppm,
878 			       unsigned int *error_ppm)
879 {
880 	/*
881 	 * For CHV ignore the error and consider only the P value.
882 	 * Prefer a bigger P value based on HW requirements.
883 	 */
884 	if (IS_CHERRYVIEW(dev)) {
885 		*error_ppm = 0;
886 
887 		return calculated_clock->p > best_clock->p;
888 	}
889 
890 	if (WARN_ON_ONCE(!target_freq))
891 		return false;
892 
893 	*error_ppm = div_u64(1000000ULL *
894 				abs(target_freq - calculated_clock->dot),
895 			     target_freq);
896 	/*
897 	 * Prefer a better P value over a better (smaller) error if the error
898 	 * is small. Ensure this preference for future configurations too by
899 	 * setting the error to 0.
900 	 */
901 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
902 		*error_ppm = 0;
903 
904 		return true;
905 	}
906 
907 	return *error_ppm + 10 < best_error_ppm;
908 }
909 
910 /*
911  * Returns a set of divisors for the desired target clock with the given
912  * refclk, or FALSE.  The returned values represent the clock equation:
913  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
914  */
915 static bool
916 vlv_find_best_dpll(const intel_limit_t *limit,
917 		   struct intel_crtc_state *crtc_state,
918 		   int target, int refclk, intel_clock_t *match_clock,
919 		   intel_clock_t *best_clock)
920 {
921 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
922 	struct drm_device *dev = crtc->base.dev;
923 	intel_clock_t clock;
924 	unsigned int bestppm = 1000000;
925 	/* min update 19.2 MHz */
926 	int max_n = min(limit->n.max, refclk / 19200);
927 	bool found = false;
928 
929 	target *= 5; /* fast clock */
930 
931 	memset(best_clock, 0, sizeof(*best_clock));
932 
933 	/* based on hardware requirement, prefer smaller n to precision */
934 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
935 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
936 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
937 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
938 				clock.p = clock.p1 * clock.p2;
939 				/* based on hardware requirement, prefer bigger m1,m2 values */
940 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
941 					unsigned int ppm;
942 
943 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
944 								     refclk * clock.m1);
945 
946 					vlv_calc_dpll_params(refclk, &clock);
947 
948 					if (!intel_PLL_is_valid(dev, limit,
949 								&clock))
950 						continue;
951 
952 					if (!vlv_PLL_is_optimal(dev, target,
953 								&clock,
954 								best_clock,
955 								bestppm, &ppm))
956 						continue;
957 
958 					*best_clock = clock;
959 					bestppm = ppm;
960 					found = true;
961 				}
962 			}
963 		}
964 	}
965 
966 	return found;
967 }
968 
969 /*
970  * Returns a set of divisors for the desired target clock with the given
971  * refclk, or FALSE.  The returned values represent the clock equation:
972  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
973  */
974 static bool
975 chv_find_best_dpll(const intel_limit_t *limit,
976 		   struct intel_crtc_state *crtc_state,
977 		   int target, int refclk, intel_clock_t *match_clock,
978 		   intel_clock_t *best_clock)
979 {
980 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
981 	struct drm_device *dev = crtc->base.dev;
982 	unsigned int best_error_ppm;
983 	intel_clock_t clock;
984 	uint64_t m2;
985 	int found = false;
986 
987 	memset(best_clock, 0, sizeof(*best_clock));
988 	best_error_ppm = 1000000;
989 
990 	/*
991 	 * Based on hardware doc, the n always set to 1, and m1 always
992 	 * set to 2.  If requires to support 200Mhz refclk, we need to
993 	 * revisit this because n may not 1 anymore.
994 	 */
995 	clock.n = 1, clock.m1 = 2;
996 	target *= 5;	/* fast clock */
997 
998 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
999 		for (clock.p2 = limit->p2.p2_fast;
1000 				clock.p2 >= limit->p2.p2_slow;
1001 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1002 			unsigned int error_ppm;
1003 
1004 			clock.p = clock.p1 * clock.p2;
1005 
1006 			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1007 					clock.n) << 22, refclk * clock.m1);
1008 
1009 			if (m2 > INT_MAX/clock.m1)
1010 				continue;
1011 
1012 			clock.m2 = m2;
1013 
1014 			chv_calc_dpll_params(refclk, &clock);
1015 
1016 			if (!intel_PLL_is_valid(dev, limit, &clock))
1017 				continue;
1018 
1019 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1020 						best_error_ppm, &error_ppm))
1021 				continue;
1022 
1023 			*best_clock = clock;
1024 			best_error_ppm = error_ppm;
1025 			found = true;
1026 		}
1027 	}
1028 
1029 	return found;
1030 }
1031 
1032 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1033 			intel_clock_t *best_clock)
1034 {
1035 	int refclk = 100000;
1036 	const intel_limit_t *limit = &intel_limits_bxt;
1037 
1038 	return chv_find_best_dpll(limit, crtc_state,
1039 				  target_clock, refclk, NULL, best_clock);
1040 }
1041 
1042 bool intel_crtc_active(struct drm_crtc *crtc)
1043 {
1044 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1045 
1046 	/* Be paranoid as we can arrive here with only partial
1047 	 * state retrieved from the hardware during setup.
1048 	 *
1049 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1050 	 * as Haswell has gained clock readout/fastboot support.
1051 	 *
1052 	 * We can ditch the crtc->primary->fb check as soon as we can
1053 	 * properly reconstruct framebuffers.
1054 	 *
1055 	 * FIXME: The intel_crtc->active here should be switched to
1056 	 * crtc->state->active once we have proper CRTC states wired up
1057 	 * for atomic.
1058 	 */
1059 	return intel_crtc->active && crtc->primary->state->fb &&
1060 		intel_crtc->config->base.adjusted_mode.crtc_clock;
1061 }
1062 
1063 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1064 					     enum i915_pipe pipe)
1065 {
1066 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1067 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1068 
1069 	return intel_crtc->config->cpu_transcoder;
1070 }
1071 
1072 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe)
1073 {
1074 	struct drm_i915_private *dev_priv = dev->dev_private;
1075 	i915_reg_t reg = PIPEDSL(pipe);
1076 	u32 line1, line2;
1077 	u32 line_mask;
1078 
1079 	if (IS_GEN2(dev))
1080 		line_mask = DSL_LINEMASK_GEN2;
1081 	else
1082 		line_mask = DSL_LINEMASK_GEN3;
1083 
1084 	line1 = I915_READ(reg) & line_mask;
1085 	msleep(5);
1086 	line2 = I915_READ(reg) & line_mask;
1087 
1088 	return line1 == line2;
1089 }
1090 
1091 /*
1092  * intel_wait_for_pipe_off - wait for pipe to turn off
1093  * @crtc: crtc whose pipe to wait for
1094  *
1095  * After disabling a pipe, we can't wait for vblank in the usual way,
1096  * spinning on the vblank interrupt status bit, since we won't actually
1097  * see an interrupt when the pipe is disabled.
1098  *
1099  * On Gen4 and above:
1100  *   wait for the pipe register state bit to turn off
1101  *
1102  * Otherwise:
1103  *   wait for the display line value to settle (it usually
1104  *   ends up stopping at the start of the next frame).
1105  *
1106  */
1107 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1108 {
1109 	struct drm_device *dev = crtc->base.dev;
1110 	struct drm_i915_private *dev_priv = dev->dev_private;
1111 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1112 	enum i915_pipe pipe = crtc->pipe;
1113 
1114 	if (INTEL_INFO(dev)->gen >= 4) {
1115 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1116 
1117 		/* Wait for the Pipe State to go off */
1118 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1119 			     100))
1120 			WARN(1, "pipe_off wait timed out\n");
1121 	} else {
1122 		/* Wait for the display line to settle */
1123 		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1124 			WARN(1, "pipe_off wait timed out\n");
1125 	}
1126 }
1127 
1128 /* Only for pre-ILK configs */
1129 void assert_pll(struct drm_i915_private *dev_priv,
1130 		enum i915_pipe pipe, bool state)
1131 {
1132 	u32 val;
1133 	bool cur_state;
1134 
1135 	val = I915_READ(DPLL(pipe));
1136 	cur_state = !!(val & DPLL_VCO_ENABLE);
1137 	I915_STATE_WARN(cur_state != state,
1138 	     "PLL state assertion failure (expected %s, current %s)\n",
1139 			onoff(state), onoff(cur_state));
1140 }
1141 
1142 /* XXX: the dsi pll is shared between MIPI DSI ports */
1143 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1144 {
1145 	u32 val;
1146 	bool cur_state;
1147 
1148 	mutex_lock(&dev_priv->sb_lock);
1149 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1150 	mutex_unlock(&dev_priv->sb_lock);
1151 
1152 	cur_state = val & DSI_PLL_VCO_EN;
1153 	I915_STATE_WARN(cur_state != state,
1154 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1155 			onoff(state), onoff(cur_state));
1156 }
1157 
1158 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1159 			  enum i915_pipe pipe, bool state)
1160 {
1161 	bool cur_state;
1162 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1163 								      pipe);
1164 
1165 	if (HAS_DDI(dev_priv)) {
1166 		/* DDI does not have a specific FDI_TX register */
1167 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1168 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1169 	} else {
1170 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1171 		cur_state = !!(val & FDI_TX_ENABLE);
1172 	}
1173 	I915_STATE_WARN(cur_state != state,
1174 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1175 			onoff(state), onoff(cur_state));
1176 }
1177 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1178 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1179 
1180 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1181 			  enum i915_pipe pipe, bool state)
1182 {
1183 	u32 val;
1184 	bool cur_state;
1185 
1186 	val = I915_READ(FDI_RX_CTL(pipe));
1187 	cur_state = !!(val & FDI_RX_ENABLE);
1188 	I915_STATE_WARN(cur_state != state,
1189 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1190 			onoff(state), onoff(cur_state));
1191 }
1192 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1193 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1194 
1195 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1196 				      enum i915_pipe pipe)
1197 {
1198 	u32 val;
1199 
1200 	/* ILK FDI PLL is always enabled */
1201 	if (INTEL_INFO(dev_priv)->gen == 5)
1202 		return;
1203 
1204 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1205 	if (HAS_DDI(dev_priv))
1206 		return;
1207 
1208 	val = I915_READ(FDI_TX_CTL(pipe));
1209 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1210 }
1211 
1212 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1213 		       enum i915_pipe pipe, bool state)
1214 {
1215 	u32 val;
1216 	bool cur_state;
1217 
1218 	val = I915_READ(FDI_RX_CTL(pipe));
1219 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1220 	I915_STATE_WARN(cur_state != state,
1221 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1222 			onoff(state), onoff(cur_state));
1223 }
1224 
1225 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1226 			   enum i915_pipe pipe)
1227 {
1228 	struct drm_device *dev = dev_priv->dev;
1229 	i915_reg_t pp_reg;
1230 	u32 val;
1231 	enum i915_pipe panel_pipe = PIPE_A;
1232 	bool locked = true;
1233 
1234 	if (WARN_ON(HAS_DDI(dev)))
1235 		return;
1236 
1237 	if (HAS_PCH_SPLIT(dev)) {
1238 		u32 port_sel;
1239 
1240 		pp_reg = PCH_PP_CONTROL;
1241 		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1242 
1243 		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1244 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1245 			panel_pipe = PIPE_B;
1246 		/* XXX: else fix for eDP */
1247 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1248 		/* presumably write lock depends on pipe, not port select */
1249 		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1250 		panel_pipe = pipe;
1251 	} else {
1252 		pp_reg = PP_CONTROL;
1253 		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1254 			panel_pipe = PIPE_B;
1255 	}
1256 
1257 	val = I915_READ(pp_reg);
1258 	if (!(val & PANEL_POWER_ON) ||
1259 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1260 		locked = false;
1261 
1262 	I915_STATE_WARN(panel_pipe == pipe && locked,
1263 	     "panel assertion failure, pipe %c regs locked\n",
1264 	     pipe_name(pipe));
1265 }
1266 
1267 static void assert_cursor(struct drm_i915_private *dev_priv,
1268 			  enum i915_pipe pipe, bool state)
1269 {
1270 	struct drm_device *dev = dev_priv->dev;
1271 	bool cur_state;
1272 
1273 	if (IS_845G(dev) || IS_I865G(dev))
1274 		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1275 	else
1276 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1277 
1278 	I915_STATE_WARN(cur_state != state,
1279 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1280 			pipe_name(pipe), onoff(state), onoff(cur_state));
1281 }
1282 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1283 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1284 
1285 void assert_pipe(struct drm_i915_private *dev_priv,
1286 		 enum i915_pipe pipe, bool state)
1287 {
1288 	bool cur_state;
1289 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1290 								      pipe);
1291 	enum intel_display_power_domain power_domain;
1292 
1293 	/* if we need the pipe quirk it must be always on */
1294 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1295 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1296 		state = true;
1297 
1298 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1299 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1300 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1301 		cur_state = !!(val & PIPECONF_ENABLE);
1302 
1303 		intel_display_power_put(dev_priv, power_domain);
1304 	} else {
1305 		cur_state = false;
1306 	}
1307 
1308 	I915_STATE_WARN(cur_state != state,
1309 	     "pipe %c assertion failure (expected %s, current %s)\n",
1310 			pipe_name(pipe), onoff(state), onoff(cur_state));
1311 }
1312 
1313 static void assert_plane(struct drm_i915_private *dev_priv,
1314 			 enum plane plane, bool state)
1315 {
1316 	u32 val;
1317 	bool cur_state;
1318 
1319 	val = I915_READ(DSPCNTR(plane));
1320 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1321 	I915_STATE_WARN(cur_state != state,
1322 	     "plane %c assertion failure (expected %s, current %s)\n",
1323 			plane_name(plane), onoff(state), onoff(cur_state));
1324 }
1325 
1326 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1327 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1328 
1329 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1330 				   enum i915_pipe pipe)
1331 {
1332 	struct drm_device *dev = dev_priv->dev;
1333 	int i;
1334 
1335 	/* Primary planes are fixed to pipes on gen4+ */
1336 	if (INTEL_INFO(dev)->gen >= 4) {
1337 		u32 val = I915_READ(DSPCNTR(pipe));
1338 		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1339 		     "plane %c assertion failure, should be disabled but not\n",
1340 		     plane_name(pipe));
1341 		return;
1342 	}
1343 
1344 	/* Need to check both planes against the pipe */
1345 	for_each_pipe(dev_priv, i) {
1346 		u32 val = I915_READ(DSPCNTR(i));
1347 		enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1348 			DISPPLANE_SEL_PIPE_SHIFT;
1349 		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1350 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1351 		     plane_name(i), pipe_name(pipe));
1352 	}
1353 }
1354 
1355 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1356 				    enum i915_pipe pipe)
1357 {
1358 	struct drm_device *dev = dev_priv->dev;
1359 	int sprite;
1360 
1361 	if (INTEL_INFO(dev)->gen >= 9) {
1362 		for_each_sprite(dev_priv, pipe, sprite) {
1363 			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1364 			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1365 			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1366 			     sprite, pipe_name(pipe));
1367 		}
1368 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1369 		for_each_sprite(dev_priv, pipe, sprite) {
1370 			u32 val = I915_READ(SPCNTR(pipe, sprite));
1371 			I915_STATE_WARN(val & SP_ENABLE,
1372 			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1373 			     sprite_name(pipe, sprite), pipe_name(pipe));
1374 		}
1375 	} else if (INTEL_INFO(dev)->gen >= 7) {
1376 		u32 val = I915_READ(SPRCTL(pipe));
1377 		I915_STATE_WARN(val & SPRITE_ENABLE,
1378 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1379 		     plane_name(pipe), pipe_name(pipe));
1380 	} else if (INTEL_INFO(dev)->gen >= 5) {
1381 		u32 val = I915_READ(DVSCNTR(pipe));
1382 		I915_STATE_WARN(val & DVS_ENABLE,
1383 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1384 		     plane_name(pipe), pipe_name(pipe));
1385 	}
1386 }
1387 
1388 static void assert_vblank_disabled(struct drm_crtc *crtc)
1389 {
1390 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1391 		drm_crtc_vblank_put(crtc);
1392 }
1393 
1394 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1395 				    enum i915_pipe pipe)
1396 {
1397 	u32 val;
1398 	bool enabled;
1399 
1400 	val = I915_READ(PCH_TRANSCONF(pipe));
1401 	enabled = !!(val & TRANS_ENABLE);
1402 	I915_STATE_WARN(enabled,
1403 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1404 	     pipe_name(pipe));
1405 }
1406 
1407 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1408 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1409 {
1410 	if ((val & DP_PORT_EN) == 0)
1411 		return false;
1412 
1413 	if (HAS_PCH_CPT(dev_priv)) {
1414 		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1415 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1416 			return false;
1417 	} else if (IS_CHERRYVIEW(dev_priv)) {
1418 		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1419 			return false;
1420 	} else {
1421 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1422 			return false;
1423 	}
1424 	return true;
1425 }
1426 
1427 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1428 			      enum i915_pipe pipe, u32 val)
1429 {
1430 	if ((val & SDVO_ENABLE) == 0)
1431 		return false;
1432 
1433 	if (HAS_PCH_CPT(dev_priv)) {
1434 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1435 			return false;
1436 	} else if (IS_CHERRYVIEW(dev_priv)) {
1437 		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1438 			return false;
1439 	} else {
1440 		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1441 			return false;
1442 	}
1443 	return true;
1444 }
1445 
1446 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1447 			      enum i915_pipe pipe, u32 val)
1448 {
1449 	if ((val & LVDS_PORT_EN) == 0)
1450 		return false;
1451 
1452 	if (HAS_PCH_CPT(dev_priv)) {
1453 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1454 			return false;
1455 	} else {
1456 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1457 			return false;
1458 	}
1459 	return true;
1460 }
1461 
1462 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1463 			      enum i915_pipe pipe, u32 val)
1464 {
1465 	if ((val & ADPA_DAC_ENABLE) == 0)
1466 		return false;
1467 	if (HAS_PCH_CPT(dev_priv)) {
1468 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1469 			return false;
1470 	} else {
1471 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1472 			return false;
1473 	}
1474 	return true;
1475 }
1476 
1477 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1478 				   enum i915_pipe pipe, i915_reg_t reg,
1479 				   u32 port_sel)
1480 {
1481 	u32 val = I915_READ(reg);
1482 	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1483 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1484 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1485 
1486 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1487 	     && (val & DP_PIPEB_SELECT),
1488 	     "IBX PCH dp port still using transcoder B\n");
1489 }
1490 
1491 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1492 				     enum i915_pipe pipe, i915_reg_t reg)
1493 {
1494 	u32 val = I915_READ(reg);
1495 	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1496 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1497 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1498 
1499 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1500 	     && (val & SDVO_PIPE_B_SELECT),
1501 	     "IBX PCH hdmi port still using transcoder B\n");
1502 }
1503 
1504 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1505 				      enum i915_pipe pipe)
1506 {
1507 	u32 val;
1508 
1509 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1510 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1511 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1512 
1513 	val = I915_READ(PCH_ADPA);
1514 	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1515 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1516 	     pipe_name(pipe));
1517 
1518 	val = I915_READ(PCH_LVDS);
1519 	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1520 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1521 	     pipe_name(pipe));
1522 
1523 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1524 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1525 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1526 }
1527 
1528 static void _vlv_enable_pll(struct intel_crtc *crtc,
1529 			    const struct intel_crtc_state *pipe_config)
1530 {
1531 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1532 	enum i915_pipe pipe = crtc->pipe;
1533 
1534 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1535 	POSTING_READ(DPLL(pipe));
1536 	udelay(150);
1537 
1538 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1539 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
1540 }
1541 
1542 static void vlv_enable_pll(struct intel_crtc *crtc,
1543 			   const struct intel_crtc_state *pipe_config)
1544 {
1545 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1546 	enum i915_pipe pipe = crtc->pipe;
1547 
1548 	assert_pipe_disabled(dev_priv, pipe);
1549 
1550 	/* PLL is protected by panel, make sure we can write it */
1551 	assert_panel_unlocked(dev_priv, pipe);
1552 
1553 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1554 		_vlv_enable_pll(crtc, pipe_config);
1555 
1556 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1557 	POSTING_READ(DPLL_MD(pipe));
1558 }
1559 
1560 
1561 static void _chv_enable_pll(struct intel_crtc *crtc,
1562 			    const struct intel_crtc_state *pipe_config)
1563 {
1564 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1565 	enum i915_pipe pipe = crtc->pipe;
1566 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1567 	u32 tmp;
1568 
1569 	mutex_lock(&dev_priv->sb_lock);
1570 
1571 	/* Enable back the 10bit clock to display controller */
1572 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1573 	tmp |= DPIO_DCLKP_EN;
1574 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1575 
1576 	mutex_unlock(&dev_priv->sb_lock);
1577 
1578 	/*
1579 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1580 	 */
1581 	udelay(1);
1582 
1583 	/* Enable PLL */
1584 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1585 
1586 	/* Check PLL is locked */
1587 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1588 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1589 }
1590 
1591 static void chv_enable_pll(struct intel_crtc *crtc,
1592 			   const struct intel_crtc_state *pipe_config)
1593 {
1594 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1595 	enum i915_pipe pipe = crtc->pipe;
1596 
1597 	assert_pipe_disabled(dev_priv, pipe);
1598 
1599 	/* PLL is protected by panel, make sure we can write it */
1600 	assert_panel_unlocked(dev_priv, pipe);
1601 
1602 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1603 		_chv_enable_pll(crtc, pipe_config);
1604 
1605 	if (pipe != PIPE_A) {
1606 		/*
1607 		 * WaPixelRepeatModeFixForC0:chv
1608 		 *
1609 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1610 		 * the value from DPLLBMD to either pipe B or C.
1611 		 */
1612 		I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1613 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1614 		I915_WRITE(CBR4_VLV, 0);
1615 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1616 
1617 		/*
1618 		 * DPLLB VGA mode also seems to cause problems.
1619 		 * We should always have it disabled.
1620 		 */
1621 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1622 	} else {
1623 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1624 		POSTING_READ(DPLL_MD(pipe));
1625 	}
1626 }
1627 
1628 static int intel_num_dvo_pipes(struct drm_device *dev)
1629 {
1630 	struct intel_crtc *crtc;
1631 	int count = 0;
1632 
1633 	for_each_intel_crtc(dev, crtc)
1634 		count += crtc->base.state->active &&
1635 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1636 
1637 	return count;
1638 }
1639 
1640 static void i9xx_enable_pll(struct intel_crtc *crtc)
1641 {
1642 	struct drm_device *dev = crtc->base.dev;
1643 	struct drm_i915_private *dev_priv = dev->dev_private;
1644 	i915_reg_t reg = DPLL(crtc->pipe);
1645 	u32 dpll = crtc->config->dpll_hw_state.dpll;
1646 
1647 	assert_pipe_disabled(dev_priv, crtc->pipe);
1648 
1649 	/* PLL is protected by panel, make sure we can write it */
1650 	if (IS_MOBILE(dev) && !IS_I830(dev))
1651 		assert_panel_unlocked(dev_priv, crtc->pipe);
1652 
1653 	/* Enable DVO 2x clock on both PLLs if necessary */
1654 	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1655 		/*
1656 		 * It appears to be important that we don't enable this
1657 		 * for the current pipe before otherwise configuring the
1658 		 * PLL. No idea how this should be handled if multiple
1659 		 * DVO outputs are enabled simultaneosly.
1660 		 */
1661 		dpll |= DPLL_DVO_2X_MODE;
1662 		I915_WRITE(DPLL(!crtc->pipe),
1663 			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1664 	}
1665 
1666 	/*
1667 	 * Apparently we need to have VGA mode enabled prior to changing
1668 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1669 	 * dividers, even though the register value does change.
1670 	 */
1671 	I915_WRITE(reg, 0);
1672 
1673 	I915_WRITE(reg, dpll);
1674 
1675 	/* Wait for the clocks to stabilize. */
1676 	POSTING_READ(reg);
1677 	udelay(150);
1678 
1679 	if (INTEL_INFO(dev)->gen >= 4) {
1680 		I915_WRITE(DPLL_MD(crtc->pipe),
1681 			   crtc->config->dpll_hw_state.dpll_md);
1682 	} else {
1683 		/* The pixel multiplier can only be updated once the
1684 		 * DPLL is enabled and the clocks are stable.
1685 		 *
1686 		 * So write it again.
1687 		 */
1688 		I915_WRITE(reg, dpll);
1689 	}
1690 
1691 	/* We do this three times for luck */
1692 	I915_WRITE(reg, dpll);
1693 	POSTING_READ(reg);
1694 	udelay(150); /* wait for warmup */
1695 	I915_WRITE(reg, dpll);
1696 	POSTING_READ(reg);
1697 	udelay(150); /* wait for warmup */
1698 	I915_WRITE(reg, dpll);
1699 	POSTING_READ(reg);
1700 	udelay(150); /* wait for warmup */
1701 }
1702 
1703 /**
1704  * i9xx_disable_pll - disable a PLL
1705  * @dev_priv: i915 private structure
1706  * @pipe: pipe PLL to disable
1707  *
1708  * Disable the PLL for @pipe, making sure the pipe is off first.
1709  *
1710  * Note!  This is for pre-ILK only.
1711  */
1712 static void i9xx_disable_pll(struct intel_crtc *crtc)
1713 {
1714 	struct drm_device *dev = crtc->base.dev;
1715 	struct drm_i915_private *dev_priv = dev->dev_private;
1716 	enum i915_pipe pipe = crtc->pipe;
1717 
1718 	/* Disable DVO 2x clock on both PLLs if necessary */
1719 	if (IS_I830(dev) &&
1720 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1721 	    !intel_num_dvo_pipes(dev)) {
1722 		I915_WRITE(DPLL(PIPE_B),
1723 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1724 		I915_WRITE(DPLL(PIPE_A),
1725 			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1726 	}
1727 
1728 	/* Don't disable pipe or pipe PLLs if needed */
1729 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1730 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1731 		return;
1732 
1733 	/* Make sure the pipe isn't still relying on us */
1734 	assert_pipe_disabled(dev_priv, pipe);
1735 
1736 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1737 	POSTING_READ(DPLL(pipe));
1738 }
1739 
1740 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1741 {
1742 	u32 val;
1743 
1744 	/* Make sure the pipe isn't still relying on us */
1745 	assert_pipe_disabled(dev_priv, pipe);
1746 
1747 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1748 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1749 	if (pipe != PIPE_A)
1750 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1751 
1752 	I915_WRITE(DPLL(pipe), val);
1753 	POSTING_READ(DPLL(pipe));
1754 }
1755 
1756 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1757 {
1758 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1759 	u32 val;
1760 
1761 	/* Make sure the pipe isn't still relying on us */
1762 	assert_pipe_disabled(dev_priv, pipe);
1763 
1764 	val = DPLL_SSC_REF_CLK_CHV |
1765 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1766 	if (pipe != PIPE_A)
1767 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1768 
1769 	I915_WRITE(DPLL(pipe), val);
1770 	POSTING_READ(DPLL(pipe));
1771 
1772 	mutex_lock(&dev_priv->sb_lock);
1773 
1774 	/* Disable 10bit clock to display controller */
1775 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1776 	val &= ~DPIO_DCLKP_EN;
1777 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1778 
1779 	mutex_unlock(&dev_priv->sb_lock);
1780 }
1781 
1782 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1783 			 struct intel_digital_port *dport,
1784 			 unsigned int expected_mask)
1785 {
1786 	u32 port_mask;
1787 	i915_reg_t dpll_reg;
1788 
1789 	switch (dport->port) {
1790 	case PORT_B:
1791 		port_mask = DPLL_PORTB_READY_MASK;
1792 		dpll_reg = DPLL(0);
1793 		break;
1794 	case PORT_C:
1795 		port_mask = DPLL_PORTC_READY_MASK;
1796 		dpll_reg = DPLL(0);
1797 		expected_mask <<= 4;
1798 		break;
1799 	case PORT_D:
1800 		port_mask = DPLL_PORTD_READY_MASK;
1801 		dpll_reg = DPIO_PHY_STATUS;
1802 		break;
1803 	default:
1804 		BUG();
1805 	}
1806 
1807 	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1808 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1809 		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1810 }
1811 
1812 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1813 					   enum i915_pipe pipe)
1814 {
1815 	struct drm_device *dev = dev_priv->dev;
1816 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1817 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1818 	i915_reg_t reg;
1819 	uint32_t val, pipeconf_val;
1820 
1821 	/* Make sure PCH DPLL is enabled */
1822 	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1823 
1824 	/* FDI must be feeding us bits for PCH ports */
1825 	assert_fdi_tx_enabled(dev_priv, pipe);
1826 	assert_fdi_rx_enabled(dev_priv, pipe);
1827 
1828 	if (HAS_PCH_CPT(dev)) {
1829 		/* Workaround: Set the timing override bit before enabling the
1830 		 * pch transcoder. */
1831 		reg = TRANS_CHICKEN2(pipe);
1832 		val = I915_READ(reg);
1833 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1834 		I915_WRITE(reg, val);
1835 	}
1836 
1837 	reg = PCH_TRANSCONF(pipe);
1838 	val = I915_READ(reg);
1839 	pipeconf_val = I915_READ(PIPECONF(pipe));
1840 
1841 	if (HAS_PCH_IBX(dev_priv)) {
1842 		/*
1843 		 * Make the BPC in transcoder be consistent with
1844 		 * that in pipeconf reg. For HDMI we must use 8bpc
1845 		 * here for both 8bpc and 12bpc.
1846 		 */
1847 		val &= ~PIPECONF_BPC_MASK;
1848 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1849 			val |= PIPECONF_8BPC;
1850 		else
1851 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1852 	}
1853 
1854 	val &= ~TRANS_INTERLACE_MASK;
1855 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1856 		if (HAS_PCH_IBX(dev_priv) &&
1857 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1858 			val |= TRANS_LEGACY_INTERLACED_ILK;
1859 		else
1860 			val |= TRANS_INTERLACED;
1861 	else
1862 		val |= TRANS_PROGRESSIVE;
1863 
1864 	I915_WRITE(reg, val | TRANS_ENABLE);
1865 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1866 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1867 }
1868 
1869 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1870 				      enum transcoder cpu_transcoder)
1871 {
1872 	u32 val, pipeconf_val;
1873 
1874 	/* FDI must be feeding us bits for PCH ports */
1875 	assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1876 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1877 
1878 	/* Workaround: set timing override bit. */
1879 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1880 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1881 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1882 
1883 	val = TRANS_ENABLE;
1884 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1885 
1886 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1887 	    PIPECONF_INTERLACED_ILK)
1888 		val |= TRANS_INTERLACED;
1889 	else
1890 		val |= TRANS_PROGRESSIVE;
1891 
1892 	I915_WRITE(LPT_TRANSCONF, val);
1893 	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1894 		DRM_ERROR("Failed to enable PCH transcoder\n");
1895 }
1896 
1897 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1898 					    enum i915_pipe pipe)
1899 {
1900 	struct drm_device *dev = dev_priv->dev;
1901 	i915_reg_t reg;
1902 	uint32_t val;
1903 
1904 	/* FDI relies on the transcoder */
1905 	assert_fdi_tx_disabled(dev_priv, pipe);
1906 	assert_fdi_rx_disabled(dev_priv, pipe);
1907 
1908 	/* Ports must be off as well */
1909 	assert_pch_ports_disabled(dev_priv, pipe);
1910 
1911 	reg = PCH_TRANSCONF(pipe);
1912 	val = I915_READ(reg);
1913 	val &= ~TRANS_ENABLE;
1914 	I915_WRITE(reg, val);
1915 	/* wait for PCH transcoder off, transcoder state */
1916 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1917 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1918 
1919 	if (HAS_PCH_CPT(dev)) {
1920 		/* Workaround: Clear the timing override chicken bit again. */
1921 		reg = TRANS_CHICKEN2(pipe);
1922 		val = I915_READ(reg);
1923 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1924 		I915_WRITE(reg, val);
1925 	}
1926 }
1927 
1928 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1929 {
1930 	u32 val;
1931 
1932 	val = I915_READ(LPT_TRANSCONF);
1933 	val &= ~TRANS_ENABLE;
1934 	I915_WRITE(LPT_TRANSCONF, val);
1935 	/* wait for PCH transcoder off, transcoder state */
1936 	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1937 		DRM_ERROR("Failed to disable PCH transcoder\n");
1938 
1939 	/* Workaround: clear timing override bit. */
1940 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1941 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1942 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1943 }
1944 
1945 /**
1946  * intel_enable_pipe - enable a pipe, asserting requirements
1947  * @crtc: crtc responsible for the pipe
1948  *
1949  * Enable @crtc's pipe, making sure that various hardware specific requirements
1950  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1951  */
1952 static void intel_enable_pipe(struct intel_crtc *crtc)
1953 {
1954 	struct drm_device *dev = crtc->base.dev;
1955 	struct drm_i915_private *dev_priv = dev->dev_private;
1956 	enum i915_pipe pipe = crtc->pipe;
1957 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1958 	enum i915_pipe pch_transcoder;
1959 	i915_reg_t reg;
1960 	u32 val;
1961 
1962 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1963 
1964 	assert_planes_disabled(dev_priv, pipe);
1965 	assert_cursor_disabled(dev_priv, pipe);
1966 	assert_sprites_disabled(dev_priv, pipe);
1967 
1968 	if (HAS_PCH_LPT(dev_priv))
1969 		pch_transcoder = TRANSCODER_A;
1970 	else
1971 		pch_transcoder = pipe;
1972 
1973 	/*
1974 	 * A pipe without a PLL won't actually be able to drive bits from
1975 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1976 	 * need the check.
1977 	 */
1978 	if (HAS_GMCH_DISPLAY(dev_priv))
1979 		if (crtc->config->has_dsi_encoder)
1980 			assert_dsi_pll_enabled(dev_priv);
1981 		else
1982 			assert_pll_enabled(dev_priv, pipe);
1983 	else {
1984 		if (crtc->config->has_pch_encoder) {
1985 			/* if driving the PCH, we need FDI enabled */
1986 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1987 			assert_fdi_tx_pll_enabled(dev_priv,
1988 						  (enum i915_pipe) cpu_transcoder);
1989 		}
1990 		/* FIXME: assert CPU port conditions for SNB+ */
1991 	}
1992 
1993 	reg = PIPECONF(cpu_transcoder);
1994 	val = I915_READ(reg);
1995 	if (val & PIPECONF_ENABLE) {
1996 		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1997 			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
1998 		return;
1999 	}
2000 
2001 	I915_WRITE(reg, val | PIPECONF_ENABLE);
2002 	POSTING_READ(reg);
2003 
2004 	/*
2005 	 * Until the pipe starts DSL will read as 0, which would cause
2006 	 * an apparent vblank timestamp jump, which messes up also the
2007 	 * frame count when it's derived from the timestamps. So let's
2008 	 * wait for the pipe to start properly before we call
2009 	 * drm_crtc_vblank_on()
2010 	 */
2011 	if (dev->max_vblank_count == 0 &&
2012 	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2013 		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2014 }
2015 
2016 /**
2017  * intel_disable_pipe - disable a pipe, asserting requirements
2018  * @crtc: crtc whose pipes is to be disabled
2019  *
2020  * Disable the pipe of @crtc, making sure that various hardware
2021  * specific requirements are met, if applicable, e.g. plane
2022  * disabled, panel fitter off, etc.
2023  *
2024  * Will wait until the pipe has shut down before returning.
2025  */
2026 static void intel_disable_pipe(struct intel_crtc *crtc)
2027 {
2028 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2029 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2030 	enum i915_pipe pipe = crtc->pipe;
2031 	i915_reg_t reg;
2032 	u32 val;
2033 
2034 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2035 
2036 	/*
2037 	 * Make sure planes won't keep trying to pump pixels to us,
2038 	 * or we might hang the display.
2039 	 */
2040 	assert_planes_disabled(dev_priv, pipe);
2041 	assert_cursor_disabled(dev_priv, pipe);
2042 	assert_sprites_disabled(dev_priv, pipe);
2043 
2044 	reg = PIPECONF(cpu_transcoder);
2045 	val = I915_READ(reg);
2046 	if ((val & PIPECONF_ENABLE) == 0)
2047 		return;
2048 
2049 	/*
2050 	 * Double wide has implications for planes
2051 	 * so best keep it disabled when not needed.
2052 	 */
2053 	if (crtc->config->double_wide)
2054 		val &= ~PIPECONF_DOUBLE_WIDE;
2055 
2056 	/* Don't disable pipe or pipe PLLs if needed */
2057 	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2058 	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2059 		val &= ~PIPECONF_ENABLE;
2060 
2061 	I915_WRITE(reg, val);
2062 	if ((val & PIPECONF_ENABLE) == 0)
2063 		intel_wait_for_pipe_off(crtc);
2064 }
2065 
2066 static bool need_vtd_wa(struct drm_device *dev)
2067 {
2068 #ifdef CONFIG_INTEL_IOMMU
2069 	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2070 		return true;
2071 #endif
2072 	return false;
2073 }
2074 
2075 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2076 {
2077 	return IS_GEN2(dev_priv) ? 2048 : 4096;
2078 }
2079 
2080 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2081 					   uint64_t fb_modifier, unsigned int cpp)
2082 {
2083 	switch (fb_modifier) {
2084 	case DRM_FORMAT_MOD_NONE:
2085 		return cpp;
2086 	case I915_FORMAT_MOD_X_TILED:
2087 		if (IS_GEN2(dev_priv))
2088 			return 128;
2089 		else
2090 			return 512;
2091 	case I915_FORMAT_MOD_Y_TILED:
2092 		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2093 			return 128;
2094 		else
2095 			return 512;
2096 	case I915_FORMAT_MOD_Yf_TILED:
2097 		switch (cpp) {
2098 		case 1:
2099 			return 64;
2100 		case 2:
2101 		case 4:
2102 			return 128;
2103 		case 8:
2104 		case 16:
2105 			return 256;
2106 		default:
2107 			MISSING_CASE(cpp);
2108 			return cpp;
2109 		}
2110 		break;
2111 	default:
2112 		MISSING_CASE(fb_modifier);
2113 		return cpp;
2114 	}
2115 }
2116 
2117 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2118 			       uint64_t fb_modifier, unsigned int cpp)
2119 {
2120 	if (fb_modifier == DRM_FORMAT_MOD_NONE)
2121 		return 1;
2122 	else
2123 		return intel_tile_size(dev_priv) /
2124 			intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2125 }
2126 
2127 /* Return the tile dimensions in pixel units */
2128 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2129 			    unsigned int *tile_width,
2130 			    unsigned int *tile_height,
2131 			    uint64_t fb_modifier,
2132 			    unsigned int cpp)
2133 {
2134 	unsigned int tile_width_bytes =
2135 		intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2136 
2137 	*tile_width = tile_width_bytes / cpp;
2138 	*tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2139 }
2140 
2141 unsigned int
2142 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2143 		      uint32_t pixel_format, uint64_t fb_modifier)
2144 {
2145 	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2146 	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2147 
2148 	return ALIGN(height, tile_height);
2149 }
2150 
2151 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2152 {
2153 	unsigned int size = 0;
2154 	int i;
2155 
2156 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2157 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2158 
2159 	return size;
2160 }
2161 
2162 static void
2163 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2164 			struct drm_framebuffer *fb,
2165 			unsigned int rotation)
2166 {
2167 	if (intel_rotation_90_or_270(rotation)) {
2168 		*view = i915_ggtt_view_rotated;
2169 		view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2170 	} else {
2171 		*view = i915_ggtt_view_normal;
2172 	}
2173 }
2174 
2175 static void
2176 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2177 		   struct drm_framebuffer *fb)
2178 {
2179 	struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2180 	unsigned int tile_size, tile_width, tile_height, cpp;
2181 
2182 	tile_size = intel_tile_size(dev_priv);
2183 
2184 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2185 	intel_tile_dims(dev_priv, &tile_width, &tile_height,
2186 			fb->modifier[0], cpp);
2187 
2188 	info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2189 	info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
2190 
2191 	if (info->pixel_format == DRM_FORMAT_NV12) {
2192 		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2193 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2194 				fb->modifier[1], cpp);
2195 
2196 		info->uv_offset = fb->offsets[1];
2197 		info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2198 		info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
2199 	}
2200 }
2201 
2202 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2203 {
2204 	if (INTEL_INFO(dev_priv)->gen >= 9)
2205 		return 256 * 1024;
2206 	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2207 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2208 		return 128 * 1024;
2209 	else if (INTEL_INFO(dev_priv)->gen >= 4)
2210 		return 4 * 1024;
2211 	else
2212 		return 0;
2213 }
2214 
2215 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2216 					 uint64_t fb_modifier)
2217 {
2218 	switch (fb_modifier) {
2219 	case DRM_FORMAT_MOD_NONE:
2220 		return intel_linear_alignment(dev_priv);
2221 	case I915_FORMAT_MOD_X_TILED:
2222 		if (INTEL_INFO(dev_priv)->gen >= 9)
2223 			return 256 * 1024;
2224 		return 0;
2225 	case I915_FORMAT_MOD_Y_TILED:
2226 	case I915_FORMAT_MOD_Yf_TILED:
2227 		return 1 * 1024 * 1024;
2228 	default:
2229 		MISSING_CASE(fb_modifier);
2230 		return 0;
2231 	}
2232 }
2233 
2234 int
2235 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2236 			   unsigned int rotation)
2237 {
2238 	struct drm_device *dev = fb->dev;
2239 	struct drm_i915_private *dev_priv = dev->dev_private;
2240 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2241 	struct i915_ggtt_view view;
2242 	u32 alignment;
2243 	int ret;
2244 
2245 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2246 
2247 	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2248 
2249 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2250 
2251 	/* Note that the w/a also requires 64 PTE of padding following the
2252 	 * bo. We currently fill all unused PTE with the shadow page and so
2253 	 * we should always have valid PTE following the scanout preventing
2254 	 * the VT-d warning.
2255 	 */
2256 	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2257 		alignment = 256 * 1024;
2258 
2259 	/*
2260 	 * Global gtt pte registers are special registers which actually forward
2261 	 * writes to a chunk of system memory. Which means that there is no risk
2262 	 * that the register values disappear as soon as we call
2263 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2264 	 * pin/unpin/fence and not more.
2265 	 */
2266 	intel_runtime_pm_get(dev_priv);
2267 
2268 	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2269 						   &view);
2270 	if (ret)
2271 		goto err_pm;
2272 
2273 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2274 	 * fence, whereas 965+ only requires a fence if using
2275 	 * framebuffer compression.  For simplicity, we always install
2276 	 * a fence as the cost is not that onerous.
2277 	 */
2278 	if (view.type == I915_GGTT_VIEW_NORMAL) {
2279 		ret = i915_gem_object_get_fence(obj);
2280 		if (ret == -EDEADLK) {
2281 			/*
2282 			 * -EDEADLK means there are no free fences
2283 			 * no pending flips.
2284 			 *
2285 			 * This is propagated to atomic, but it uses
2286 			 * -EDEADLK to force a locking recovery, so
2287 			 * change the returned error to -EBUSY.
2288 			 */
2289 			ret = -EBUSY;
2290 			goto err_unpin;
2291 		} else if (ret)
2292 			goto err_unpin;
2293 
2294 		i915_gem_object_pin_fence(obj);
2295 	}
2296 
2297 	intel_runtime_pm_put(dev_priv);
2298 	return 0;
2299 
2300 err_unpin:
2301 	i915_gem_object_unpin_from_display_plane(obj, &view);
2302 err_pm:
2303 	intel_runtime_pm_put(dev_priv);
2304 	return ret;
2305 }
2306 
2307 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2308 {
2309 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2310 	struct i915_ggtt_view view;
2311 
2312 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2313 
2314 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2315 
2316 	if (view.type == I915_GGTT_VIEW_NORMAL)
2317 		i915_gem_object_unpin_fence(obj);
2318 
2319 	i915_gem_object_unpin_from_display_plane(obj, &view);
2320 }
2321 
2322 /*
2323  * Adjust the tile offset by moving the difference into
2324  * the x/y offsets.
2325  *
2326  * Input tile dimensions and pitch must already be
2327  * rotated to match x and y, and in pixel units.
2328  */
2329 static u32 intel_adjust_tile_offset(int *x, int *y,
2330 				    unsigned int tile_width,
2331 				    unsigned int tile_height,
2332 				    unsigned int tile_size,
2333 				    unsigned int pitch_tiles,
2334 				    u32 old_offset,
2335 				    u32 new_offset)
2336 {
2337 	unsigned int tiles;
2338 
2339 	WARN_ON(old_offset & (tile_size - 1));
2340 	WARN_ON(new_offset & (tile_size - 1));
2341 	WARN_ON(new_offset > old_offset);
2342 
2343 	tiles = (old_offset - new_offset) / tile_size;
2344 
2345 	*y += tiles / pitch_tiles * tile_height;
2346 	*x += tiles % pitch_tiles * tile_width;
2347 
2348 	return new_offset;
2349 }
2350 
2351 /*
2352  * Computes the linear offset to the base tile and adjusts
2353  * x, y. bytes per pixel is assumed to be a power-of-two.
2354  *
2355  * In the 90/270 rotated case, x and y are assumed
2356  * to be already rotated to match the rotated GTT view, and
2357  * pitch is the tile_height aligned framebuffer height.
2358  */
2359 u32 intel_compute_tile_offset(int *x, int *y,
2360 			      const struct drm_framebuffer *fb, int plane,
2361 			      unsigned int pitch,
2362 			      unsigned int rotation)
2363 {
2364 	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2365 	uint64_t fb_modifier = fb->modifier[plane];
2366 	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2367 	u32 offset, offset_aligned, alignment;
2368 
2369 	alignment = intel_surf_alignment(dev_priv, fb_modifier);
2370 	if (alignment)
2371 		alignment--;
2372 
2373 	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2374 		unsigned int tile_size, tile_width, tile_height;
2375 		unsigned int tile_rows, tiles, pitch_tiles;
2376 
2377 		tile_size = intel_tile_size(dev_priv);
2378 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2379 				fb_modifier, cpp);
2380 
2381 		if (intel_rotation_90_or_270(rotation)) {
2382 			pitch_tiles = pitch / tile_height;
2383 			swap(tile_width, tile_height);
2384 		} else {
2385 			pitch_tiles = pitch / (tile_width * cpp);
2386 		}
2387 
2388 		tile_rows = *y / tile_height;
2389 		*y %= tile_height;
2390 
2391 		tiles = *x / tile_width;
2392 		*x %= tile_width;
2393 
2394 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2395 		offset_aligned = offset & ~alignment;
2396 
2397 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2398 					 tile_size, pitch_tiles,
2399 					 offset, offset_aligned);
2400 	} else {
2401 		offset = *y * pitch + *x * cpp;
2402 		offset_aligned = offset & ~alignment;
2403 
2404 		*y = (offset & alignment) / pitch;
2405 		*x = ((offset & alignment) - *y * pitch) / cpp;
2406 	}
2407 
2408 	return offset_aligned;
2409 }
2410 
2411 static int i9xx_format_to_fourcc(int format)
2412 {
2413 	switch (format) {
2414 	case DISPPLANE_8BPP:
2415 		return DRM_FORMAT_C8;
2416 	case DISPPLANE_BGRX555:
2417 		return DRM_FORMAT_XRGB1555;
2418 	case DISPPLANE_BGRX565:
2419 		return DRM_FORMAT_RGB565;
2420 	default:
2421 	case DISPPLANE_BGRX888:
2422 		return DRM_FORMAT_XRGB8888;
2423 	case DISPPLANE_RGBX888:
2424 		return DRM_FORMAT_XBGR8888;
2425 	case DISPPLANE_BGRX101010:
2426 		return DRM_FORMAT_XRGB2101010;
2427 	case DISPPLANE_RGBX101010:
2428 		return DRM_FORMAT_XBGR2101010;
2429 	}
2430 }
2431 
2432 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2433 {
2434 	switch (format) {
2435 	case PLANE_CTL_FORMAT_RGB_565:
2436 		return DRM_FORMAT_RGB565;
2437 	default:
2438 	case PLANE_CTL_FORMAT_XRGB_8888:
2439 		if (rgb_order) {
2440 			if (alpha)
2441 				return DRM_FORMAT_ABGR8888;
2442 			else
2443 				return DRM_FORMAT_XBGR8888;
2444 		} else {
2445 			if (alpha)
2446 				return DRM_FORMAT_ARGB8888;
2447 			else
2448 				return DRM_FORMAT_XRGB8888;
2449 		}
2450 	case PLANE_CTL_FORMAT_XRGB_2101010:
2451 		if (rgb_order)
2452 			return DRM_FORMAT_XBGR2101010;
2453 		else
2454 			return DRM_FORMAT_XRGB2101010;
2455 	}
2456 }
2457 
2458 static bool
2459 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2460 			      struct intel_initial_plane_config *plane_config)
2461 {
2462 	struct drm_device *dev = crtc->base.dev;
2463 	struct drm_i915_private *dev_priv = to_i915(dev);
2464 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2465 	struct drm_i915_gem_object *obj = NULL;
2466 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2467 	struct drm_framebuffer *fb = &plane_config->fb->base;
2468 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2469 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2470 				    PAGE_SIZE);
2471 
2472 	size_aligned -= base_aligned;
2473 
2474 	if (plane_config->size == 0)
2475 		return false;
2476 
2477 	/* If the FB is too big, just don't use it since fbdev is not very
2478 	 * important and we should probably use that space with FBC or other
2479 	 * features. */
2480 	if (size_aligned * 2 > ggtt->stolen_usable_size)
2481 		return false;
2482 
2483 	mutex_lock(&dev->struct_mutex);
2484 
2485 	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2486 							     base_aligned,
2487 							     base_aligned,
2488 							     size_aligned);
2489 	if (!obj) {
2490 		mutex_unlock(&dev->struct_mutex);
2491 		return false;
2492 	}
2493 
2494 	obj->tiling_mode = plane_config->tiling;
2495 	if (obj->tiling_mode == I915_TILING_X)
2496 		obj->stride = fb->pitches[0];
2497 
2498 	mode_cmd.pixel_format = fb->pixel_format;
2499 	mode_cmd.width = fb->width;
2500 	mode_cmd.height = fb->height;
2501 	mode_cmd.pitches[0] = fb->pitches[0];
2502 	mode_cmd.modifier[0] = fb->modifier[0];
2503 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2504 
2505 	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2506 				   &mode_cmd, obj)) {
2507 		DRM_DEBUG_KMS("intel fb init failed\n");
2508 		goto out_unref_obj;
2509 	}
2510 
2511 	mutex_unlock(&dev->struct_mutex);
2512 
2513 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2514 	return true;
2515 
2516 out_unref_obj:
2517 	drm_gem_object_unreference(&obj->base);
2518 	mutex_unlock(&dev->struct_mutex);
2519 	return false;
2520 }
2521 
2522 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2523 static void
2524 update_state_fb(struct drm_plane *plane)
2525 {
2526 	if (plane->fb == plane->state->fb)
2527 		return;
2528 
2529 	if (plane->state->fb)
2530 		drm_framebuffer_unreference(plane->state->fb);
2531 	plane->state->fb = plane->fb;
2532 	if (plane->state->fb)
2533 		drm_framebuffer_reference(plane->state->fb);
2534 }
2535 
2536 static void
2537 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2538 			     struct intel_initial_plane_config *plane_config)
2539 {
2540 	struct drm_device *dev = intel_crtc->base.dev;
2541 	struct drm_i915_private *dev_priv = dev->dev_private;
2542 	struct drm_crtc *c;
2543 	struct intel_crtc *i;
2544 	struct drm_i915_gem_object *obj;
2545 	struct drm_plane *primary = intel_crtc->base.primary;
2546 	struct drm_plane_state *plane_state = primary->state;
2547 	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2548 	struct intel_plane *intel_plane = to_intel_plane(primary);
2549 	struct intel_plane_state *intel_state =
2550 		to_intel_plane_state(plane_state);
2551 	struct drm_framebuffer *fb;
2552 
2553 	if (!plane_config->fb)
2554 		return;
2555 
2556 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2557 		fb = &plane_config->fb->base;
2558 		goto valid_fb;
2559 	}
2560 
2561 	kfree(plane_config->fb);
2562 
2563 	/*
2564 	 * Failed to alloc the obj, check to see if we should share
2565 	 * an fb with another CRTC instead
2566 	 */
2567 	for_each_crtc(dev, c) {
2568 		i = to_intel_crtc(c);
2569 
2570 		if (c == &intel_crtc->base)
2571 			continue;
2572 
2573 		if (!i->active)
2574 			continue;
2575 
2576 		fb = c->primary->fb;
2577 		if (!fb)
2578 			continue;
2579 
2580 		obj = intel_fb_obj(fb);
2581 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2582 			drm_framebuffer_reference(fb);
2583 			goto valid_fb;
2584 		}
2585 	}
2586 
2587 	/*
2588 	 * We've failed to reconstruct the BIOS FB.  Current display state
2589 	 * indicates that the primary plane is visible, but has a NULL FB,
2590 	 * which will lead to problems later if we don't fix it up.  The
2591 	 * simplest solution is to just disable the primary plane now and
2592 	 * pretend the BIOS never had it enabled.
2593 	 */
2594 	to_intel_plane_state(plane_state)->visible = false;
2595 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2596 	intel_pre_disable_primary_noatomic(&intel_crtc->base);
2597 	intel_plane->disable_plane(primary, &intel_crtc->base);
2598 
2599 	return;
2600 
2601 valid_fb:
2602 	plane_state->src_x = 0;
2603 	plane_state->src_y = 0;
2604 	plane_state->src_w = fb->width << 16;
2605 	plane_state->src_h = fb->height << 16;
2606 
2607 	plane_state->crtc_x = 0;
2608 	plane_state->crtc_y = 0;
2609 	plane_state->crtc_w = fb->width;
2610 	plane_state->crtc_h = fb->height;
2611 
2612 	intel_state->src.x1 = plane_state->src_x;
2613 	intel_state->src.y1 = plane_state->src_y;
2614 	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2615 	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2616 	intel_state->dst.x1 = plane_state->crtc_x;
2617 	intel_state->dst.y1 = plane_state->crtc_y;
2618 	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2619 	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2620 
2621 	obj = intel_fb_obj(fb);
2622 	if (obj->tiling_mode != I915_TILING_NONE)
2623 		dev_priv->preserve_bios_swizzle = true;
2624 
2625 	drm_framebuffer_reference(fb);
2626 	primary->fb = primary->state->fb = fb;
2627 	primary->crtc = primary->state->crtc = &intel_crtc->base;
2628 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2629 	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2630 }
2631 
2632 static void i9xx_update_primary_plane(struct drm_plane *primary,
2633 				      const struct intel_crtc_state *crtc_state,
2634 				      const struct intel_plane_state *plane_state)
2635 {
2636 	struct drm_device *dev = primary->dev;
2637 	struct drm_i915_private *dev_priv = dev->dev_private;
2638 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2639 	struct drm_framebuffer *fb = plane_state->base.fb;
2640 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2641 	int plane = intel_crtc->plane;
2642 	u32 linear_offset;
2643 	u32 dspcntr;
2644 	i915_reg_t reg = DSPCNTR(plane);
2645 	unsigned int rotation = plane_state->base.rotation;
2646 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2647 	int x = plane_state->src.x1 >> 16;
2648 	int y = plane_state->src.y1 >> 16;
2649 
2650 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2651 
2652 	dspcntr |= DISPLAY_PLANE_ENABLE;
2653 
2654 	if (INTEL_INFO(dev)->gen < 4) {
2655 		if (intel_crtc->pipe == PIPE_B)
2656 			dspcntr |= DISPPLANE_SEL_PIPE_B;
2657 
2658 		/* pipesrc and dspsize control the size that is scaled from,
2659 		 * which should always be the user's requested size.
2660 		 */
2661 		I915_WRITE(DSPSIZE(plane),
2662 			   ((crtc_state->pipe_src_h - 1) << 16) |
2663 			   (crtc_state->pipe_src_w - 1));
2664 		I915_WRITE(DSPPOS(plane), 0);
2665 	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2666 		I915_WRITE(PRIMSIZE(plane),
2667 			   ((crtc_state->pipe_src_h - 1) << 16) |
2668 			   (crtc_state->pipe_src_w - 1));
2669 		I915_WRITE(PRIMPOS(plane), 0);
2670 		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2671 	}
2672 
2673 	switch (fb->pixel_format) {
2674 	case DRM_FORMAT_C8:
2675 		dspcntr |= DISPPLANE_8BPP;
2676 		break;
2677 	case DRM_FORMAT_XRGB1555:
2678 		dspcntr |= DISPPLANE_BGRX555;
2679 		break;
2680 	case DRM_FORMAT_RGB565:
2681 		dspcntr |= DISPPLANE_BGRX565;
2682 		break;
2683 	case DRM_FORMAT_XRGB8888:
2684 		dspcntr |= DISPPLANE_BGRX888;
2685 		break;
2686 	case DRM_FORMAT_XBGR8888:
2687 		dspcntr |= DISPPLANE_RGBX888;
2688 		break;
2689 	case DRM_FORMAT_XRGB2101010:
2690 		dspcntr |= DISPPLANE_BGRX101010;
2691 		break;
2692 	case DRM_FORMAT_XBGR2101010:
2693 		dspcntr |= DISPPLANE_RGBX101010;
2694 		break;
2695 	default:
2696 		BUG();
2697 	}
2698 
2699 	if (INTEL_INFO(dev)->gen >= 4 &&
2700 	    obj->tiling_mode != I915_TILING_NONE)
2701 		dspcntr |= DISPPLANE_TILED;
2702 
2703 	if (IS_G4X(dev))
2704 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2705 
2706 	linear_offset = y * fb->pitches[0] + x * cpp;
2707 
2708 	if (INTEL_INFO(dev)->gen >= 4) {
2709 		intel_crtc->dspaddr_offset =
2710 			intel_compute_tile_offset(&x, &y, fb, 0,
2711 						  fb->pitches[0], rotation);
2712 		linear_offset -= intel_crtc->dspaddr_offset;
2713 	} else {
2714 		intel_crtc->dspaddr_offset = linear_offset;
2715 	}
2716 
2717 	if (rotation == BIT(DRM_ROTATE_180)) {
2718 		dspcntr |= DISPPLANE_ROTATE_180;
2719 
2720 		x += (crtc_state->pipe_src_w - 1);
2721 		y += (crtc_state->pipe_src_h - 1);
2722 
2723 		/* Finding the last pixel of the last line of the display
2724 		data and adding to linear_offset*/
2725 		linear_offset +=
2726 			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2727 			(crtc_state->pipe_src_w - 1) * cpp;
2728 	}
2729 
2730 	intel_crtc->adjusted_x = x;
2731 	intel_crtc->adjusted_y = y;
2732 
2733 	I915_WRITE(reg, dspcntr);
2734 
2735 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2736 	if (INTEL_INFO(dev)->gen >= 4) {
2737 		I915_WRITE(DSPSURF(plane),
2738 			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2739 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2740 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2741 	} else
2742 		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2743 	POSTING_READ(reg);
2744 }
2745 
2746 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2747 				       struct drm_crtc *crtc)
2748 {
2749 	struct drm_device *dev = crtc->dev;
2750 	struct drm_i915_private *dev_priv = dev->dev_private;
2751 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2752 	int plane = intel_crtc->plane;
2753 
2754 	I915_WRITE(DSPCNTR(plane), 0);
2755 	if (INTEL_INFO(dev_priv)->gen >= 4)
2756 		I915_WRITE(DSPSURF(plane), 0);
2757 	else
2758 		I915_WRITE(DSPADDR(plane), 0);
2759 	POSTING_READ(DSPCNTR(plane));
2760 }
2761 
2762 static void ironlake_update_primary_plane(struct drm_plane *primary,
2763 					  const struct intel_crtc_state *crtc_state,
2764 					  const struct intel_plane_state *plane_state)
2765 {
2766 	struct drm_device *dev = primary->dev;
2767 	struct drm_i915_private *dev_priv = dev->dev_private;
2768 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2769 	struct drm_framebuffer *fb = plane_state->base.fb;
2770 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2771 	int plane = intel_crtc->plane;
2772 	u32 linear_offset;
2773 	u32 dspcntr;
2774 	i915_reg_t reg = DSPCNTR(plane);
2775 	unsigned int rotation = plane_state->base.rotation;
2776 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2777 	int x = plane_state->src.x1 >> 16;
2778 	int y = plane_state->src.y1 >> 16;
2779 
2780 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2781 	dspcntr |= DISPLAY_PLANE_ENABLE;
2782 
2783 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2784 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2785 
2786 	switch (fb->pixel_format) {
2787 	case DRM_FORMAT_C8:
2788 		dspcntr |= DISPPLANE_8BPP;
2789 		break;
2790 	case DRM_FORMAT_RGB565:
2791 		dspcntr |= DISPPLANE_BGRX565;
2792 		break;
2793 	case DRM_FORMAT_XRGB8888:
2794 		dspcntr |= DISPPLANE_BGRX888;
2795 		break;
2796 	case DRM_FORMAT_XBGR8888:
2797 		dspcntr |= DISPPLANE_RGBX888;
2798 		break;
2799 	case DRM_FORMAT_XRGB2101010:
2800 		dspcntr |= DISPPLANE_BGRX101010;
2801 		break;
2802 	case DRM_FORMAT_XBGR2101010:
2803 		dspcntr |= DISPPLANE_RGBX101010;
2804 		break;
2805 	default:
2806 		BUG();
2807 	}
2808 
2809 	if (obj->tiling_mode != I915_TILING_NONE)
2810 		dspcntr |= DISPPLANE_TILED;
2811 
2812 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2813 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2814 
2815 	linear_offset = y * fb->pitches[0] + x * cpp;
2816 	intel_crtc->dspaddr_offset =
2817 		intel_compute_tile_offset(&x, &y, fb, 0,
2818 					  fb->pitches[0], rotation);
2819 	linear_offset -= intel_crtc->dspaddr_offset;
2820 	if (rotation == BIT(DRM_ROTATE_180)) {
2821 		dspcntr |= DISPPLANE_ROTATE_180;
2822 
2823 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2824 			x += (crtc_state->pipe_src_w - 1);
2825 			y += (crtc_state->pipe_src_h - 1);
2826 
2827 			/* Finding the last pixel of the last line of the display
2828 			data and adding to linear_offset*/
2829 			linear_offset +=
2830 				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2831 				(crtc_state->pipe_src_w - 1) * cpp;
2832 		}
2833 	}
2834 
2835 	intel_crtc->adjusted_x = x;
2836 	intel_crtc->adjusted_y = y;
2837 
2838 	I915_WRITE(reg, dspcntr);
2839 
2840 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2841 	I915_WRITE(DSPSURF(plane),
2842 		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2843 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2844 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2845 	} else {
2846 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2847 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2848 	}
2849 	POSTING_READ(reg);
2850 }
2851 
2852 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2853 			      uint64_t fb_modifier, uint32_t pixel_format)
2854 {
2855 	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2856 		return 64;
2857 	} else {
2858 		int cpp = drm_format_plane_cpp(pixel_format, 0);
2859 
2860 		return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2861 	}
2862 }
2863 
2864 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2865 			   struct drm_i915_gem_object *obj,
2866 			   unsigned int plane)
2867 {
2868 	struct i915_ggtt_view view;
2869 	struct i915_vma *vma;
2870 	u64 offset;
2871 
2872 	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2873 				intel_plane->base.state->rotation);
2874 
2875 	vma = i915_gem_obj_to_ggtt_view(obj, &view);
2876 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2877 		view.type))
2878 		return -1;
2879 
2880 	offset = vma->node.start;
2881 
2882 	if (plane == 1) {
2883 		offset += vma->ggtt_view.params.rotated.uv_start_page *
2884 			  PAGE_SIZE;
2885 	}
2886 
2887 	WARN_ON(upper_32_bits(offset));
2888 
2889 	return lower_32_bits(offset);
2890 }
2891 
2892 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2893 {
2894 	struct drm_device *dev = intel_crtc->base.dev;
2895 	struct drm_i915_private *dev_priv = dev->dev_private;
2896 
2897 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2898 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2899 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2900 }
2901 
2902 /*
2903  * This function detaches (aka. unbinds) unused scalers in hardware
2904  */
2905 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2906 {
2907 	struct intel_crtc_scaler_state *scaler_state;
2908 	int i;
2909 
2910 	scaler_state = &intel_crtc->config->scaler_state;
2911 
2912 	/* loop through and disable scalers that aren't in use */
2913 	for (i = 0; i < intel_crtc->num_scalers; i++) {
2914 		if (!scaler_state->scalers[i].in_use)
2915 			skl_detach_scaler(intel_crtc, i);
2916 	}
2917 }
2918 
2919 u32 skl_plane_ctl_format(uint32_t pixel_format)
2920 {
2921 	switch (pixel_format) {
2922 	case DRM_FORMAT_C8:
2923 		return PLANE_CTL_FORMAT_INDEXED;
2924 	case DRM_FORMAT_RGB565:
2925 		return PLANE_CTL_FORMAT_RGB_565;
2926 	case DRM_FORMAT_XBGR8888:
2927 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
2928 	case DRM_FORMAT_XRGB8888:
2929 		return PLANE_CTL_FORMAT_XRGB_8888;
2930 	/*
2931 	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2932 	 * to be already pre-multiplied. We need to add a knob (or a different
2933 	 * DRM_FORMAT) for user-space to configure that.
2934 	 */
2935 	case DRM_FORMAT_ABGR8888:
2936 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
2937 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2938 	case DRM_FORMAT_ARGB8888:
2939 		return PLANE_CTL_FORMAT_XRGB_8888 |
2940 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2941 	case DRM_FORMAT_XRGB2101010:
2942 		return PLANE_CTL_FORMAT_XRGB_2101010;
2943 	case DRM_FORMAT_XBGR2101010:
2944 		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
2945 	case DRM_FORMAT_YUYV:
2946 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
2947 	case DRM_FORMAT_YVYU:
2948 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
2949 	case DRM_FORMAT_UYVY:
2950 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
2951 	case DRM_FORMAT_VYUY:
2952 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
2953 	default:
2954 		MISSING_CASE(pixel_format);
2955 	}
2956 
2957 	return 0;
2958 }
2959 
2960 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2961 {
2962 	switch (fb_modifier) {
2963 	case DRM_FORMAT_MOD_NONE:
2964 		break;
2965 	case I915_FORMAT_MOD_X_TILED:
2966 		return PLANE_CTL_TILED_X;
2967 	case I915_FORMAT_MOD_Y_TILED:
2968 		return PLANE_CTL_TILED_Y;
2969 	case I915_FORMAT_MOD_Yf_TILED:
2970 		return PLANE_CTL_TILED_YF;
2971 	default:
2972 		MISSING_CASE(fb_modifier);
2973 	}
2974 
2975 	return 0;
2976 }
2977 
2978 u32 skl_plane_ctl_rotation(unsigned int rotation)
2979 {
2980 	switch (rotation) {
2981 	case BIT(DRM_ROTATE_0):
2982 		break;
2983 	/*
2984 	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2985 	 * while i915 HW rotation is clockwise, thats why this swapping.
2986 	 */
2987 	case BIT(DRM_ROTATE_90):
2988 		return PLANE_CTL_ROTATE_270;
2989 	case BIT(DRM_ROTATE_180):
2990 		return PLANE_CTL_ROTATE_180;
2991 	case BIT(DRM_ROTATE_270):
2992 		return PLANE_CTL_ROTATE_90;
2993 	default:
2994 		MISSING_CASE(rotation);
2995 	}
2996 
2997 	return 0;
2998 }
2999 
3000 static void skylake_update_primary_plane(struct drm_plane *plane,
3001 					 const struct intel_crtc_state *crtc_state,
3002 					 const struct intel_plane_state *plane_state)
3003 {
3004 	struct drm_device *dev = plane->dev;
3005 	struct drm_i915_private *dev_priv = dev->dev_private;
3006 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3007 	struct drm_framebuffer *fb = plane_state->base.fb;
3008 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3009 	int pipe = intel_crtc->pipe;
3010 	u32 plane_ctl, stride_div, stride;
3011 	u32 tile_height, plane_offset, plane_size;
3012 	unsigned int rotation = plane_state->base.rotation;
3013 	int x_offset, y_offset;
3014 	u32 surf_addr;
3015 	int scaler_id = plane_state->scaler_id;
3016 	int src_x = plane_state->src.x1 >> 16;
3017 	int src_y = plane_state->src.y1 >> 16;
3018 	int src_w = drm_rect_width(&plane_state->src) >> 16;
3019 	int src_h = drm_rect_height(&plane_state->src) >> 16;
3020 	int dst_x = plane_state->dst.x1;
3021 	int dst_y = plane_state->dst.y1;
3022 	int dst_w = drm_rect_width(&plane_state->dst);
3023 	int dst_h = drm_rect_height(&plane_state->dst);
3024 
3025 	plane_ctl = PLANE_CTL_ENABLE |
3026 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3027 		    PLANE_CTL_PIPE_CSC_ENABLE;
3028 
3029 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3030 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3031 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3032 	plane_ctl |= skl_plane_ctl_rotation(rotation);
3033 
3034 	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3035 					       fb->pixel_format);
3036 	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3037 
3038 	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3039 
3040 	if (intel_rotation_90_or_270(rotation)) {
3041 		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3042 
3043 		/* stride = Surface height in tiles */
3044 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3045 		stride = DIV_ROUND_UP(fb->height, tile_height);
3046 		x_offset = stride * tile_height - src_y - src_h;
3047 		y_offset = src_x;
3048 		plane_size = (src_w - 1) << 16 | (src_h - 1);
3049 	} else {
3050 		stride = fb->pitches[0] / stride_div;
3051 		x_offset = src_x;
3052 		y_offset = src_y;
3053 		plane_size = (src_h - 1) << 16 | (src_w - 1);
3054 	}
3055 	plane_offset = y_offset << 16 | x_offset;
3056 
3057 	intel_crtc->adjusted_x = x_offset;
3058 	intel_crtc->adjusted_y = y_offset;
3059 
3060 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3061 	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3062 	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3063 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3064 
3065 	if (scaler_id >= 0) {
3066 		uint32_t ps_ctrl = 0;
3067 
3068 		WARN_ON(!dst_w || !dst_h);
3069 		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3070 			crtc_state->scaler_state.scalers[scaler_id].mode;
3071 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3072 		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3073 		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3074 		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3075 		I915_WRITE(PLANE_POS(pipe, 0), 0);
3076 	} else {
3077 		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3078 	}
3079 
3080 	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3081 
3082 	POSTING_READ(PLANE_SURF(pipe, 0));
3083 }
3084 
3085 static void skylake_disable_primary_plane(struct drm_plane *primary,
3086 					  struct drm_crtc *crtc)
3087 {
3088 	struct drm_device *dev = crtc->dev;
3089 	struct drm_i915_private *dev_priv = dev->dev_private;
3090 	int pipe = to_intel_crtc(crtc)->pipe;
3091 
3092 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
3093 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
3094 	POSTING_READ(PLANE_SURF(pipe, 0));
3095 }
3096 
3097 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3098 static int
3099 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3100 			   int x, int y, enum mode_set_atomic state)
3101 {
3102 	/* Support for kgdboc is disabled, this needs a major rework. */
3103 	DRM_ERROR("legacy panic handler not supported any more.\n");
3104 
3105 	return -ENODEV;
3106 }
3107 
3108 static void intel_complete_page_flips(struct drm_device *dev)
3109 {
3110 	struct drm_crtc *crtc;
3111 
3112 	for_each_crtc(dev, crtc) {
3113 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3114 		enum plane plane = intel_crtc->plane;
3115 
3116 		intel_prepare_page_flip(dev, plane);
3117 		intel_finish_page_flip_plane(dev, plane);
3118 	}
3119 }
3120 
3121 static void intel_update_primary_planes(struct drm_device *dev)
3122 {
3123 	struct drm_crtc *crtc;
3124 
3125 	for_each_crtc(dev, crtc) {
3126 		struct intel_plane *plane = to_intel_plane(crtc->primary);
3127 		struct intel_plane_state *plane_state;
3128 
3129 		drm_modeset_lock_crtc(crtc, &plane->base);
3130 		plane_state = to_intel_plane_state(plane->base.state);
3131 
3132 		if (plane_state->visible)
3133 			plane->update_plane(&plane->base,
3134 					    to_intel_crtc_state(crtc->state),
3135 					    plane_state);
3136 
3137 		drm_modeset_unlock_crtc(crtc);
3138 	}
3139 }
3140 
3141 void intel_prepare_reset(struct drm_device *dev)
3142 {
3143 	/* no reset support for gen2 */
3144 	if (IS_GEN2(dev))
3145 		return;
3146 
3147 	/* reset doesn't touch the display */
3148 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3149 		return;
3150 
3151 	drm_modeset_lock_all(dev);
3152 	/*
3153 	 * Disabling the crtcs gracefully seems nicer. Also the
3154 	 * g33 docs say we should at least disable all the planes.
3155 	 */
3156 	intel_display_suspend(dev);
3157 }
3158 
3159 void intel_finish_reset(struct drm_device *dev)
3160 {
3161 	struct drm_i915_private *dev_priv = to_i915(dev);
3162 
3163 	/*
3164 	 * Flips in the rings will be nuked by the reset,
3165 	 * so complete all pending flips so that user space
3166 	 * will get its events and not get stuck.
3167 	 */
3168 	intel_complete_page_flips(dev);
3169 
3170 	/* no reset support for gen2 */
3171 	if (IS_GEN2(dev))
3172 		return;
3173 
3174 	/* reset doesn't touch the display */
3175 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3176 		/*
3177 		 * Flips in the rings have been nuked by the reset,
3178 		 * so update the base address of all primary
3179 		 * planes to the the last fb to make sure we're
3180 		 * showing the correct fb after a reset.
3181 		 *
3182 		 * FIXME: Atomic will make this obsolete since we won't schedule
3183 		 * CS-based flips (which might get lost in gpu resets) any more.
3184 		 */
3185 		intel_update_primary_planes(dev);
3186 		return;
3187 	}
3188 
3189 	/*
3190 	 * The display has been reset as well,
3191 	 * so need a full re-initialization.
3192 	 */
3193 	intel_runtime_pm_disable_interrupts(dev_priv);
3194 	intel_runtime_pm_enable_interrupts(dev_priv);
3195 
3196 	intel_modeset_init_hw(dev);
3197 
3198 	spin_lock_irq(&dev_priv->irq_lock);
3199 	if (dev_priv->display.hpd_irq_setup)
3200 		dev_priv->display.hpd_irq_setup(dev);
3201 	spin_unlock_irq(&dev_priv->irq_lock);
3202 
3203 	intel_display_resume(dev);
3204 
3205 	intel_hpd_init(dev_priv);
3206 
3207 	drm_modeset_unlock_all(dev);
3208 }
3209 
3210 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3211 {
3212 	struct drm_device *dev = crtc->dev;
3213 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3214 	unsigned reset_counter;
3215 	bool pending;
3216 
3217 	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3218 	if (intel_crtc->reset_counter != reset_counter)
3219 		return false;
3220 
3221 	spin_lock_irq(&dev->event_lock);
3222 	pending = to_intel_crtc(crtc)->unpin_work != NULL;
3223 	spin_unlock_irq(&dev->event_lock);
3224 
3225 	return pending;
3226 }
3227 
3228 static void intel_update_pipe_config(struct intel_crtc *crtc,
3229 				     struct intel_crtc_state *old_crtc_state)
3230 {
3231 	struct drm_device *dev = crtc->base.dev;
3232 	struct drm_i915_private *dev_priv = dev->dev_private;
3233 	struct intel_crtc_state *pipe_config =
3234 		to_intel_crtc_state(crtc->base.state);
3235 
3236 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3237 	crtc->base.mode = crtc->base.state->mode;
3238 
3239 	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3240 		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3241 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3242 
3243 	/*
3244 	 * Update pipe size and adjust fitter if needed: the reason for this is
3245 	 * that in compute_mode_changes we check the native mode (not the pfit
3246 	 * mode) to see if we can flip rather than do a full mode set. In the
3247 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3248 	 * pfit state, we'll end up with a big fb scanned out into the wrong
3249 	 * sized surface.
3250 	 */
3251 
3252 	I915_WRITE(PIPESRC(crtc->pipe),
3253 		   ((pipe_config->pipe_src_w - 1) << 16) |
3254 		   (pipe_config->pipe_src_h - 1));
3255 
3256 	/* on skylake this is done by detaching scalers */
3257 	if (INTEL_INFO(dev)->gen >= 9) {
3258 		skl_detach_scalers(crtc);
3259 
3260 		if (pipe_config->pch_pfit.enabled)
3261 			skylake_pfit_enable(crtc);
3262 	} else if (HAS_PCH_SPLIT(dev)) {
3263 		if (pipe_config->pch_pfit.enabled)
3264 			ironlake_pfit_enable(crtc);
3265 		else if (old_crtc_state->pch_pfit.enabled)
3266 			ironlake_pfit_disable(crtc, true);
3267 	}
3268 }
3269 
3270 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3271 {
3272 	struct drm_device *dev = crtc->dev;
3273 	struct drm_i915_private *dev_priv = dev->dev_private;
3274 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3275 	int pipe = intel_crtc->pipe;
3276 	i915_reg_t reg;
3277 	u32 temp;
3278 
3279 	/* enable normal train */
3280 	reg = FDI_TX_CTL(pipe);
3281 	temp = I915_READ(reg);
3282 	if (IS_IVYBRIDGE(dev)) {
3283 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3284 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3285 	} else {
3286 		temp &= ~FDI_LINK_TRAIN_NONE;
3287 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3288 	}
3289 	I915_WRITE(reg, temp);
3290 
3291 	reg = FDI_RX_CTL(pipe);
3292 	temp = I915_READ(reg);
3293 	if (HAS_PCH_CPT(dev)) {
3294 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3295 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3296 	} else {
3297 		temp &= ~FDI_LINK_TRAIN_NONE;
3298 		temp |= FDI_LINK_TRAIN_NONE;
3299 	}
3300 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3301 
3302 	/* wait one idle pattern time */
3303 	POSTING_READ(reg);
3304 	udelay(1000);
3305 
3306 	/* IVB wants error correction enabled */
3307 	if (IS_IVYBRIDGE(dev))
3308 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3309 			   FDI_FE_ERRC_ENABLE);
3310 }
3311 
3312 /* The FDI link training functions for ILK/Ibexpeak. */
3313 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3314 {
3315 	struct drm_device *dev = crtc->dev;
3316 	struct drm_i915_private *dev_priv = dev->dev_private;
3317 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3318 	int pipe = intel_crtc->pipe;
3319 	i915_reg_t reg;
3320 	u32 temp, tries;
3321 
3322 	/* FDI needs bits from pipe first */
3323 	assert_pipe_enabled(dev_priv, pipe);
3324 
3325 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3326 	   for train result */
3327 	reg = FDI_RX_IMR(pipe);
3328 	temp = I915_READ(reg);
3329 	temp &= ~FDI_RX_SYMBOL_LOCK;
3330 	temp &= ~FDI_RX_BIT_LOCK;
3331 	I915_WRITE(reg, temp);
3332 	I915_READ(reg);
3333 	udelay(150);
3334 
3335 	/* enable CPU FDI TX and PCH FDI RX */
3336 	reg = FDI_TX_CTL(pipe);
3337 	temp = I915_READ(reg);
3338 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3339 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3340 	temp &= ~FDI_LINK_TRAIN_NONE;
3341 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3342 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3343 
3344 	reg = FDI_RX_CTL(pipe);
3345 	temp = I915_READ(reg);
3346 	temp &= ~FDI_LINK_TRAIN_NONE;
3347 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3348 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3349 
3350 	POSTING_READ(reg);
3351 	udelay(150);
3352 
3353 	/* Ironlake workaround, enable clock pointer after FDI enable*/
3354 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3355 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3356 		   FDI_RX_PHASE_SYNC_POINTER_EN);
3357 
3358 	reg = FDI_RX_IIR(pipe);
3359 	for (tries = 0; tries < 5; tries++) {
3360 		temp = I915_READ(reg);
3361 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3362 
3363 		if ((temp & FDI_RX_BIT_LOCK)) {
3364 			DRM_DEBUG_KMS("FDI train 1 done.\n");
3365 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3366 			break;
3367 		}
3368 	}
3369 	if (tries == 5)
3370 		DRM_ERROR("FDI train 1 fail!\n");
3371 
3372 	/* Train 2 */
3373 	reg = FDI_TX_CTL(pipe);
3374 	temp = I915_READ(reg);
3375 	temp &= ~FDI_LINK_TRAIN_NONE;
3376 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3377 	I915_WRITE(reg, temp);
3378 
3379 	reg = FDI_RX_CTL(pipe);
3380 	temp = I915_READ(reg);
3381 	temp &= ~FDI_LINK_TRAIN_NONE;
3382 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3383 	I915_WRITE(reg, temp);
3384 
3385 	POSTING_READ(reg);
3386 	udelay(150);
3387 
3388 	reg = FDI_RX_IIR(pipe);
3389 	for (tries = 0; tries < 5; tries++) {
3390 		temp = I915_READ(reg);
3391 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3392 
3393 		if (temp & FDI_RX_SYMBOL_LOCK) {
3394 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3395 			DRM_DEBUG_KMS("FDI train 2 done.\n");
3396 			break;
3397 		}
3398 	}
3399 	if (tries == 5)
3400 		DRM_ERROR("FDI train 2 fail!\n");
3401 
3402 	DRM_DEBUG_KMS("FDI train done\n");
3403 
3404 }
3405 
3406 static const int snb_b_fdi_train_param[] = {
3407 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3408 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3409 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3410 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3411 };
3412 
3413 /* The FDI link training functions for SNB/Cougarpoint. */
3414 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3415 {
3416 	struct drm_device *dev = crtc->dev;
3417 	struct drm_i915_private *dev_priv = dev->dev_private;
3418 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3419 	int pipe = intel_crtc->pipe;
3420 	i915_reg_t reg;
3421 	u32 temp, i, retry;
3422 
3423 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3424 	   for train result */
3425 	reg = FDI_RX_IMR(pipe);
3426 	temp = I915_READ(reg);
3427 	temp &= ~FDI_RX_SYMBOL_LOCK;
3428 	temp &= ~FDI_RX_BIT_LOCK;
3429 	I915_WRITE(reg, temp);
3430 
3431 	POSTING_READ(reg);
3432 	udelay(150);
3433 
3434 	/* enable CPU FDI TX and PCH FDI RX */
3435 	reg = FDI_TX_CTL(pipe);
3436 	temp = I915_READ(reg);
3437 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3438 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3439 	temp &= ~FDI_LINK_TRAIN_NONE;
3440 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3441 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3442 	/* SNB-B */
3443 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3444 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3445 
3446 	I915_WRITE(FDI_RX_MISC(pipe),
3447 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3448 
3449 	reg = FDI_RX_CTL(pipe);
3450 	temp = I915_READ(reg);
3451 	if (HAS_PCH_CPT(dev)) {
3452 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3453 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3454 	} else {
3455 		temp &= ~FDI_LINK_TRAIN_NONE;
3456 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3457 	}
3458 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3459 
3460 	POSTING_READ(reg);
3461 	udelay(150);
3462 
3463 	for (i = 0; i < 4; i++) {
3464 		reg = FDI_TX_CTL(pipe);
3465 		temp = I915_READ(reg);
3466 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3467 		temp |= snb_b_fdi_train_param[i];
3468 		I915_WRITE(reg, temp);
3469 
3470 		POSTING_READ(reg);
3471 		udelay(500);
3472 
3473 		for (retry = 0; retry < 5; retry++) {
3474 			reg = FDI_RX_IIR(pipe);
3475 			temp = I915_READ(reg);
3476 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3477 			if (temp & FDI_RX_BIT_LOCK) {
3478 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3479 				DRM_DEBUG_KMS("FDI train 1 done.\n");
3480 				break;
3481 			}
3482 			udelay(50);
3483 		}
3484 		if (retry < 5)
3485 			break;
3486 	}
3487 	if (i == 4)
3488 		DRM_ERROR("FDI train 1 fail!\n");
3489 
3490 	/* Train 2 */
3491 	reg = FDI_TX_CTL(pipe);
3492 	temp = I915_READ(reg);
3493 	temp &= ~FDI_LINK_TRAIN_NONE;
3494 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3495 	if (IS_GEN6(dev)) {
3496 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3497 		/* SNB-B */
3498 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3499 	}
3500 	I915_WRITE(reg, temp);
3501 
3502 	reg = FDI_RX_CTL(pipe);
3503 	temp = I915_READ(reg);
3504 	if (HAS_PCH_CPT(dev)) {
3505 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3506 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3507 	} else {
3508 		temp &= ~FDI_LINK_TRAIN_NONE;
3509 		temp |= FDI_LINK_TRAIN_PATTERN_2;
3510 	}
3511 	I915_WRITE(reg, temp);
3512 
3513 	POSTING_READ(reg);
3514 	udelay(150);
3515 
3516 	for (i = 0; i < 4; i++) {
3517 		reg = FDI_TX_CTL(pipe);
3518 		temp = I915_READ(reg);
3519 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3520 		temp |= snb_b_fdi_train_param[i];
3521 		I915_WRITE(reg, temp);
3522 
3523 		POSTING_READ(reg);
3524 		udelay(500);
3525 
3526 		for (retry = 0; retry < 5; retry++) {
3527 			reg = FDI_RX_IIR(pipe);
3528 			temp = I915_READ(reg);
3529 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3530 			if (temp & FDI_RX_SYMBOL_LOCK) {
3531 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3532 				DRM_DEBUG_KMS("FDI train 2 done.\n");
3533 				break;
3534 			}
3535 			udelay(50);
3536 		}
3537 		if (retry < 5)
3538 			break;
3539 	}
3540 	if (i == 4)
3541 		DRM_ERROR("FDI train 2 fail!\n");
3542 
3543 	DRM_DEBUG_KMS("FDI train done.\n");
3544 }
3545 
3546 /* Manual link training for Ivy Bridge A0 parts */
3547 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3548 {
3549 	struct drm_device *dev = crtc->dev;
3550 	struct drm_i915_private *dev_priv = dev->dev_private;
3551 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3552 	int pipe = intel_crtc->pipe;
3553 	i915_reg_t reg;
3554 	u32 temp, i, j;
3555 
3556 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3557 	   for train result */
3558 	reg = FDI_RX_IMR(pipe);
3559 	temp = I915_READ(reg);
3560 	temp &= ~FDI_RX_SYMBOL_LOCK;
3561 	temp &= ~FDI_RX_BIT_LOCK;
3562 	I915_WRITE(reg, temp);
3563 
3564 	POSTING_READ(reg);
3565 	udelay(150);
3566 
3567 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3568 		      I915_READ(FDI_RX_IIR(pipe)));
3569 
3570 	/* Try each vswing and preemphasis setting twice before moving on */
3571 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3572 		/* disable first in case we need to retry */
3573 		reg = FDI_TX_CTL(pipe);
3574 		temp = I915_READ(reg);
3575 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3576 		temp &= ~FDI_TX_ENABLE;
3577 		I915_WRITE(reg, temp);
3578 
3579 		reg = FDI_RX_CTL(pipe);
3580 		temp = I915_READ(reg);
3581 		temp &= ~FDI_LINK_TRAIN_AUTO;
3582 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3583 		temp &= ~FDI_RX_ENABLE;
3584 		I915_WRITE(reg, temp);
3585 
3586 		/* enable CPU FDI TX and PCH FDI RX */
3587 		reg = FDI_TX_CTL(pipe);
3588 		temp = I915_READ(reg);
3589 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3590 		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3591 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3592 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3593 		temp |= snb_b_fdi_train_param[j/2];
3594 		temp |= FDI_COMPOSITE_SYNC;
3595 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3596 
3597 		I915_WRITE(FDI_RX_MISC(pipe),
3598 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3599 
3600 		reg = FDI_RX_CTL(pipe);
3601 		temp = I915_READ(reg);
3602 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3603 		temp |= FDI_COMPOSITE_SYNC;
3604 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3605 
3606 		POSTING_READ(reg);
3607 		udelay(1); /* should be 0.5us */
3608 
3609 		for (i = 0; i < 4; i++) {
3610 			reg = FDI_RX_IIR(pipe);
3611 			temp = I915_READ(reg);
3612 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3613 
3614 			if (temp & FDI_RX_BIT_LOCK ||
3615 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3616 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3617 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3618 					      i);
3619 				break;
3620 			}
3621 			udelay(1); /* should be 0.5us */
3622 		}
3623 		if (i == 4) {
3624 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3625 			continue;
3626 		}
3627 
3628 		/* Train 2 */
3629 		reg = FDI_TX_CTL(pipe);
3630 		temp = I915_READ(reg);
3631 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3632 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3633 		I915_WRITE(reg, temp);
3634 
3635 		reg = FDI_RX_CTL(pipe);
3636 		temp = I915_READ(reg);
3637 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3638 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3639 		I915_WRITE(reg, temp);
3640 
3641 		POSTING_READ(reg);
3642 		udelay(2); /* should be 1.5us */
3643 
3644 		for (i = 0; i < 4; i++) {
3645 			reg = FDI_RX_IIR(pipe);
3646 			temp = I915_READ(reg);
3647 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3648 
3649 			if (temp & FDI_RX_SYMBOL_LOCK ||
3650 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3651 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3652 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3653 					      i);
3654 				goto train_done;
3655 			}
3656 			udelay(2); /* should be 1.5us */
3657 		}
3658 		if (i == 4)
3659 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3660 	}
3661 
3662 train_done:
3663 	DRM_DEBUG_KMS("FDI train done.\n");
3664 }
3665 
3666 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3667 {
3668 	struct drm_device *dev = intel_crtc->base.dev;
3669 	struct drm_i915_private *dev_priv = dev->dev_private;
3670 	int pipe = intel_crtc->pipe;
3671 	i915_reg_t reg;
3672 	u32 temp;
3673 
3674 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3675 	reg = FDI_RX_CTL(pipe);
3676 	temp = I915_READ(reg);
3677 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3678 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3679 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3680 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3681 
3682 	POSTING_READ(reg);
3683 	udelay(200);
3684 
3685 	/* Switch from Rawclk to PCDclk */
3686 	temp = I915_READ(reg);
3687 	I915_WRITE(reg, temp | FDI_PCDCLK);
3688 
3689 	POSTING_READ(reg);
3690 	udelay(200);
3691 
3692 	/* Enable CPU FDI TX PLL, always on for Ironlake */
3693 	reg = FDI_TX_CTL(pipe);
3694 	temp = I915_READ(reg);
3695 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3696 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3697 
3698 		POSTING_READ(reg);
3699 		udelay(100);
3700 	}
3701 }
3702 
3703 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3704 {
3705 	struct drm_device *dev = intel_crtc->base.dev;
3706 	struct drm_i915_private *dev_priv = dev->dev_private;
3707 	int pipe = intel_crtc->pipe;
3708 	i915_reg_t reg;
3709 	u32 temp;
3710 
3711 	/* Switch from PCDclk to Rawclk */
3712 	reg = FDI_RX_CTL(pipe);
3713 	temp = I915_READ(reg);
3714 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3715 
3716 	/* Disable CPU FDI TX PLL */
3717 	reg = FDI_TX_CTL(pipe);
3718 	temp = I915_READ(reg);
3719 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3720 
3721 	POSTING_READ(reg);
3722 	udelay(100);
3723 
3724 	reg = FDI_RX_CTL(pipe);
3725 	temp = I915_READ(reg);
3726 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3727 
3728 	/* Wait for the clocks to turn off. */
3729 	POSTING_READ(reg);
3730 	udelay(100);
3731 }
3732 
3733 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3734 {
3735 	struct drm_device *dev = crtc->dev;
3736 	struct drm_i915_private *dev_priv = dev->dev_private;
3737 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3738 	int pipe = intel_crtc->pipe;
3739 	i915_reg_t reg;
3740 	u32 temp;
3741 
3742 	/* disable CPU FDI tx and PCH FDI rx */
3743 	reg = FDI_TX_CTL(pipe);
3744 	temp = I915_READ(reg);
3745 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3746 	POSTING_READ(reg);
3747 
3748 	reg = FDI_RX_CTL(pipe);
3749 	temp = I915_READ(reg);
3750 	temp &= ~(0x7 << 16);
3751 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3752 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3753 
3754 	POSTING_READ(reg);
3755 	udelay(100);
3756 
3757 	/* Ironlake workaround, disable clock pointer after downing FDI */
3758 	if (HAS_PCH_IBX(dev))
3759 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3760 
3761 	/* still set train pattern 1 */
3762 	reg = FDI_TX_CTL(pipe);
3763 	temp = I915_READ(reg);
3764 	temp &= ~FDI_LINK_TRAIN_NONE;
3765 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3766 	I915_WRITE(reg, temp);
3767 
3768 	reg = FDI_RX_CTL(pipe);
3769 	temp = I915_READ(reg);
3770 	if (HAS_PCH_CPT(dev)) {
3771 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3772 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3773 	} else {
3774 		temp &= ~FDI_LINK_TRAIN_NONE;
3775 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3776 	}
3777 	/* BPC in FDI rx is consistent with that in PIPECONF */
3778 	temp &= ~(0x07 << 16);
3779 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3780 	I915_WRITE(reg, temp);
3781 
3782 	POSTING_READ(reg);
3783 	udelay(100);
3784 }
3785 
3786 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3787 {
3788 	struct intel_crtc *crtc;
3789 
3790 	/* Note that we don't need to be called with mode_config.lock here
3791 	 * as our list of CRTC objects is static for the lifetime of the
3792 	 * device and so cannot disappear as we iterate. Similarly, we can
3793 	 * happily treat the predicates as racy, atomic checks as userspace
3794 	 * cannot claim and pin a new fb without at least acquring the
3795 	 * struct_mutex and so serialising with us.
3796 	 */
3797 	for_each_intel_crtc(dev, crtc) {
3798 		if (atomic_read(&crtc->unpin_work_count) == 0)
3799 			continue;
3800 
3801 		if (crtc->unpin_work)
3802 			intel_wait_for_vblank(dev, crtc->pipe);
3803 
3804 		return true;
3805 	}
3806 
3807 	return false;
3808 }
3809 
3810 static void page_flip_completed(struct intel_crtc *intel_crtc)
3811 {
3812 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3813 	struct intel_unpin_work *work = intel_crtc->unpin_work;
3814 
3815 	/* ensure that the unpin work is consistent wrt ->pending. */
3816 	smp_rmb();
3817 	intel_crtc->unpin_work = NULL;
3818 
3819 	if (work->event)
3820 		drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3821 
3822 	drm_crtc_vblank_put(&intel_crtc->base);
3823 
3824 	wake_up_all(&dev_priv->pending_flip_queue);
3825 	queue_work(dev_priv->wq, &work->work);
3826 
3827 	trace_i915_flip_complete(intel_crtc->plane,
3828 				 work->pending_flip_obj);
3829 }
3830 
3831 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3832 {
3833 	struct drm_device *dev = crtc->dev;
3834 	struct drm_i915_private *dev_priv = dev->dev_private;
3835 	long ret;
3836 
3837 	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3838 
3839 	ret = wait_event_interruptible_timeout(
3840 					dev_priv->pending_flip_queue,
3841 					!intel_crtc_has_pending_flip(crtc),
3842 					60*HZ);
3843 
3844 	if (ret < 0)
3845 		return ret;
3846 
3847 	if (ret == 0) {
3848 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3849 
3850 		spin_lock_irq(&dev->event_lock);
3851 		if (intel_crtc->unpin_work) {
3852 			WARN_ONCE(1, "Removing stuck page flip\n");
3853 			page_flip_completed(intel_crtc);
3854 		}
3855 		spin_unlock_irq(&dev->event_lock);
3856 	}
3857 
3858 	return 0;
3859 }
3860 
3861 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3862 {
3863 	u32 temp;
3864 
3865 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3866 
3867 	mutex_lock(&dev_priv->sb_lock);
3868 
3869 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3870 	temp |= SBI_SSCCTL_DISABLE;
3871 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3872 
3873 	mutex_unlock(&dev_priv->sb_lock);
3874 }
3875 
3876 /* Program iCLKIP clock to the desired frequency */
3877 static void lpt_program_iclkip(struct drm_crtc *crtc)
3878 {
3879 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3880 	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3881 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3882 	u32 temp;
3883 
3884 	lpt_disable_iclkip(dev_priv);
3885 
3886 	/* The iCLK virtual clock root frequency is in MHz,
3887 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
3888 	 * divisors, it is necessary to divide one by another, so we
3889 	 * convert the virtual clock precision to KHz here for higher
3890 	 * precision.
3891 	 */
3892 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3893 		u32 iclk_virtual_root_freq = 172800 * 1000;
3894 		u32 iclk_pi_range = 64;
3895 		u32 desired_divisor;
3896 
3897 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3898 						    clock << auxdiv);
3899 		divsel = (desired_divisor / iclk_pi_range) - 2;
3900 		phaseinc = desired_divisor % iclk_pi_range;
3901 
3902 		/*
3903 		 * Near 20MHz is a corner case which is
3904 		 * out of range for the 7-bit divisor
3905 		 */
3906 		if (divsel <= 0x7f)
3907 			break;
3908 	}
3909 
3910 	/* This should not happen with any sane values */
3911 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3912 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3913 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3914 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3915 
3916 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3917 			clock,
3918 			auxdiv,
3919 			divsel,
3920 			phasedir,
3921 			phaseinc);
3922 
3923 	mutex_lock(&dev_priv->sb_lock);
3924 
3925 	/* Program SSCDIVINTPHASE6 */
3926 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3927 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3928 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3929 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3930 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3931 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3932 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3933 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3934 
3935 	/* Program SSCAUXDIV */
3936 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3937 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3938 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3939 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3940 
3941 	/* Enable modulator and associated divider */
3942 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3943 	temp &= ~SBI_SSCCTL_DISABLE;
3944 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3945 
3946 	mutex_unlock(&dev_priv->sb_lock);
3947 
3948 	/* Wait for initialization time */
3949 	udelay(24);
3950 
3951 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3952 }
3953 
3954 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3955 {
3956 	u32 divsel, phaseinc, auxdiv;
3957 	u32 iclk_virtual_root_freq = 172800 * 1000;
3958 	u32 iclk_pi_range = 64;
3959 	u32 desired_divisor;
3960 	u32 temp;
3961 
3962 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3963 		return 0;
3964 
3965 	mutex_lock(&dev_priv->sb_lock);
3966 
3967 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3968 	if (temp & SBI_SSCCTL_DISABLE) {
3969 		mutex_unlock(&dev_priv->sb_lock);
3970 		return 0;
3971 	}
3972 
3973 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3974 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3975 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3976 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3977 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3978 
3979 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3980 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3981 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3982 
3983 	mutex_unlock(&dev_priv->sb_lock);
3984 
3985 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3986 
3987 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3988 				 desired_divisor << auxdiv);
3989 }
3990 
3991 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3992 						enum i915_pipe pch_transcoder)
3993 {
3994 	struct drm_device *dev = crtc->base.dev;
3995 	struct drm_i915_private *dev_priv = dev->dev_private;
3996 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3997 
3998 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3999 		   I915_READ(HTOTAL(cpu_transcoder)));
4000 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4001 		   I915_READ(HBLANK(cpu_transcoder)));
4002 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4003 		   I915_READ(HSYNC(cpu_transcoder)));
4004 
4005 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4006 		   I915_READ(VTOTAL(cpu_transcoder)));
4007 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4008 		   I915_READ(VBLANK(cpu_transcoder)));
4009 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4010 		   I915_READ(VSYNC(cpu_transcoder)));
4011 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4012 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4013 }
4014 
4015 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4016 {
4017 	struct drm_i915_private *dev_priv = dev->dev_private;
4018 	uint32_t temp;
4019 
4020 	temp = I915_READ(SOUTH_CHICKEN1);
4021 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4022 		return;
4023 
4024 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4025 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4026 
4027 	temp &= ~FDI_BC_BIFURCATION_SELECT;
4028 	if (enable)
4029 		temp |= FDI_BC_BIFURCATION_SELECT;
4030 
4031 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4032 	I915_WRITE(SOUTH_CHICKEN1, temp);
4033 	POSTING_READ(SOUTH_CHICKEN1);
4034 }
4035 
4036 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4037 {
4038 	struct drm_device *dev = intel_crtc->base.dev;
4039 
4040 	switch (intel_crtc->pipe) {
4041 	case PIPE_A:
4042 		break;
4043 	case PIPE_B:
4044 		if (intel_crtc->config->fdi_lanes > 2)
4045 			cpt_set_fdi_bc_bifurcation(dev, false);
4046 		else
4047 			cpt_set_fdi_bc_bifurcation(dev, true);
4048 
4049 		break;
4050 	case PIPE_C:
4051 		cpt_set_fdi_bc_bifurcation(dev, true);
4052 
4053 		break;
4054 	default:
4055 		BUG();
4056 	}
4057 }
4058 
4059 /* Return which DP Port should be selected for Transcoder DP control */
4060 static enum port
4061 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4062 {
4063 	struct drm_device *dev = crtc->dev;
4064 	struct intel_encoder *encoder;
4065 
4066 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4067 		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4068 		    encoder->type == INTEL_OUTPUT_EDP)
4069 			return enc_to_dig_port(&encoder->base)->port;
4070 	}
4071 
4072 	return -1;
4073 }
4074 
4075 /*
4076  * Enable PCH resources required for PCH ports:
4077  *   - PCH PLLs
4078  *   - FDI training & RX/TX
4079  *   - update transcoder timings
4080  *   - DP transcoding bits
4081  *   - transcoder
4082  */
4083 static void ironlake_pch_enable(struct drm_crtc *crtc)
4084 {
4085 	struct drm_device *dev = crtc->dev;
4086 	struct drm_i915_private *dev_priv = dev->dev_private;
4087 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4088 	int pipe = intel_crtc->pipe;
4089 	u32 temp;
4090 
4091 	assert_pch_transcoder_disabled(dev_priv, pipe);
4092 
4093 	if (IS_IVYBRIDGE(dev))
4094 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4095 
4096 	/* Write the TU size bits before fdi link training, so that error
4097 	 * detection works. */
4098 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4099 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4100 
4101 	/* For PCH output, training FDI link */
4102 	dev_priv->display.fdi_link_train(crtc);
4103 
4104 	/* We need to program the right clock selection before writing the pixel
4105 	 * mutliplier into the DPLL. */
4106 	if (HAS_PCH_CPT(dev)) {
4107 		u32 sel;
4108 
4109 		temp = I915_READ(PCH_DPLL_SEL);
4110 		temp |= TRANS_DPLL_ENABLE(pipe);
4111 		sel = TRANS_DPLLB_SEL(pipe);
4112 		if (intel_crtc->config->shared_dpll ==
4113 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4114 			temp |= sel;
4115 		else
4116 			temp &= ~sel;
4117 		I915_WRITE(PCH_DPLL_SEL, temp);
4118 	}
4119 
4120 	/* XXX: pch pll's can be enabled any time before we enable the PCH
4121 	 * transcoder, and we actually should do this to not upset any PCH
4122 	 * transcoder that already use the clock when we share it.
4123 	 *
4124 	 * Note that enable_shared_dpll tries to do the right thing, but
4125 	 * get_shared_dpll unconditionally resets the pll - we need that to have
4126 	 * the right LVDS enable sequence. */
4127 	intel_enable_shared_dpll(intel_crtc);
4128 
4129 	/* set transcoder timing, panel must allow it */
4130 	assert_panel_unlocked(dev_priv, pipe);
4131 	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4132 
4133 	intel_fdi_normal_train(crtc);
4134 
4135 	/* For PCH DP, enable TRANS_DP_CTL */
4136 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4137 		const struct drm_display_mode *adjusted_mode =
4138 			&intel_crtc->config->base.adjusted_mode;
4139 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4140 		i915_reg_t reg = TRANS_DP_CTL(pipe);
4141 		temp = I915_READ(reg);
4142 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4143 			  TRANS_DP_SYNC_MASK |
4144 			  TRANS_DP_BPC_MASK);
4145 		temp |= TRANS_DP_OUTPUT_ENABLE;
4146 		temp |= bpc << 9; /* same format but at 11:9 */
4147 
4148 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4149 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4150 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4151 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4152 
4153 		switch (intel_trans_dp_port_sel(crtc)) {
4154 		case PORT_B:
4155 			temp |= TRANS_DP_PORT_SEL_B;
4156 			break;
4157 		case PORT_C:
4158 			temp |= TRANS_DP_PORT_SEL_C;
4159 			break;
4160 		case PORT_D:
4161 			temp |= TRANS_DP_PORT_SEL_D;
4162 			break;
4163 		default:
4164 			BUG();
4165 		}
4166 
4167 		I915_WRITE(reg, temp);
4168 	}
4169 
4170 	ironlake_enable_pch_transcoder(dev_priv, pipe);
4171 }
4172 
4173 static void lpt_pch_enable(struct drm_crtc *crtc)
4174 {
4175 	struct drm_device *dev = crtc->dev;
4176 	struct drm_i915_private *dev_priv = dev->dev_private;
4177 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4178 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4179 
4180 	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4181 
4182 	lpt_program_iclkip(crtc);
4183 
4184 	/* Set transcoder timing. */
4185 	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4186 
4187 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4188 }
4189 
4190 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4191 {
4192 	struct drm_i915_private *dev_priv = dev->dev_private;
4193 	i915_reg_t dslreg = PIPEDSL(pipe);
4194 	u32 temp;
4195 
4196 	temp = I915_READ(dslreg);
4197 	udelay(500);
4198 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4199 		if (wait_for(I915_READ(dslreg) != temp, 5))
4200 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4201 	}
4202 }
4203 
4204 static int
4205 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4206 		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4207 		  int src_w, int src_h, int dst_w, int dst_h)
4208 {
4209 	struct intel_crtc_scaler_state *scaler_state =
4210 		&crtc_state->scaler_state;
4211 	struct intel_crtc *intel_crtc =
4212 		to_intel_crtc(crtc_state->base.crtc);
4213 	int need_scaling;
4214 
4215 	need_scaling = intel_rotation_90_or_270(rotation) ?
4216 		(src_h != dst_w || src_w != dst_h):
4217 		(src_w != dst_w || src_h != dst_h);
4218 
4219 	/*
4220 	 * if plane is being disabled or scaler is no more required or force detach
4221 	 *  - free scaler binded to this plane/crtc
4222 	 *  - in order to do this, update crtc->scaler_usage
4223 	 *
4224 	 * Here scaler state in crtc_state is set free so that
4225 	 * scaler can be assigned to other user. Actual register
4226 	 * update to free the scaler is done in plane/panel-fit programming.
4227 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4228 	 */
4229 	if (force_detach || !need_scaling) {
4230 		if (*scaler_id >= 0) {
4231 			scaler_state->scaler_users &= ~(1 << scaler_user);
4232 			scaler_state->scalers[*scaler_id].in_use = 0;
4233 
4234 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4235 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4236 				intel_crtc->pipe, scaler_user, *scaler_id,
4237 				scaler_state->scaler_users);
4238 			*scaler_id = -1;
4239 		}
4240 		return 0;
4241 	}
4242 
4243 	/* range checks */
4244 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4245 		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4246 
4247 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4248 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4249 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4250 			"size is out of scaler range\n",
4251 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4252 		return -EINVAL;
4253 	}
4254 
4255 	/* mark this plane as a scaler user in crtc_state */
4256 	scaler_state->scaler_users |= (1 << scaler_user);
4257 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4258 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4259 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4260 		scaler_state->scaler_users);
4261 
4262 	return 0;
4263 }
4264 
4265 /**
4266  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4267  *
4268  * @state: crtc's scaler state
4269  *
4270  * Return
4271  *     0 - scaler_usage updated successfully
4272  *    error - requested scaling cannot be supported or other error condition
4273  */
4274 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4275 {
4276 	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4277 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4278 
4279 	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4280 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4281 
4282 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4283 		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4284 		state->pipe_src_w, state->pipe_src_h,
4285 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4286 }
4287 
4288 /**
4289  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4290  *
4291  * @state: crtc's scaler state
4292  * @plane_state: atomic plane state to update
4293  *
4294  * Return
4295  *     0 - scaler_usage updated successfully
4296  *    error - requested scaling cannot be supported or other error condition
4297  */
4298 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4299 				   struct intel_plane_state *plane_state)
4300 {
4301 
4302 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4303 	struct intel_plane *intel_plane =
4304 		to_intel_plane(plane_state->base.plane);
4305 	struct drm_framebuffer *fb = plane_state->base.fb;
4306 	int ret;
4307 
4308 	bool force_detach = !fb || !plane_state->visible;
4309 
4310 	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4311 		      intel_plane->base.base.id, intel_crtc->pipe,
4312 		      drm_plane_index(&intel_plane->base));
4313 
4314 	ret = skl_update_scaler(crtc_state, force_detach,
4315 				drm_plane_index(&intel_plane->base),
4316 				&plane_state->scaler_id,
4317 				plane_state->base.rotation,
4318 				drm_rect_width(&plane_state->src) >> 16,
4319 				drm_rect_height(&plane_state->src) >> 16,
4320 				drm_rect_width(&plane_state->dst),
4321 				drm_rect_height(&plane_state->dst));
4322 
4323 	if (ret || plane_state->scaler_id < 0)
4324 		return ret;
4325 
4326 	/* check colorkey */
4327 	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4328 		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4329 			      intel_plane->base.base.id);
4330 		return -EINVAL;
4331 	}
4332 
4333 	/* Check src format */
4334 	switch (fb->pixel_format) {
4335 	case DRM_FORMAT_RGB565:
4336 	case DRM_FORMAT_XBGR8888:
4337 	case DRM_FORMAT_XRGB8888:
4338 	case DRM_FORMAT_ABGR8888:
4339 	case DRM_FORMAT_ARGB8888:
4340 	case DRM_FORMAT_XRGB2101010:
4341 	case DRM_FORMAT_XBGR2101010:
4342 	case DRM_FORMAT_YUYV:
4343 	case DRM_FORMAT_YVYU:
4344 	case DRM_FORMAT_UYVY:
4345 	case DRM_FORMAT_VYUY:
4346 		break;
4347 	default:
4348 		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4349 			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4350 		return -EINVAL;
4351 	}
4352 
4353 	return 0;
4354 }
4355 
4356 static void skylake_scaler_disable(struct intel_crtc *crtc)
4357 {
4358 	int i;
4359 
4360 	for (i = 0; i < crtc->num_scalers; i++)
4361 		skl_detach_scaler(crtc, i);
4362 }
4363 
4364 static void skylake_pfit_enable(struct intel_crtc *crtc)
4365 {
4366 	struct drm_device *dev = crtc->base.dev;
4367 	struct drm_i915_private *dev_priv = dev->dev_private;
4368 	int pipe = crtc->pipe;
4369 	struct intel_crtc_scaler_state *scaler_state =
4370 		&crtc->config->scaler_state;
4371 
4372 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4373 
4374 	if (crtc->config->pch_pfit.enabled) {
4375 		int id;
4376 
4377 		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4378 			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4379 			return;
4380 		}
4381 
4382 		id = scaler_state->scaler_id;
4383 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4384 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4385 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4386 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4387 
4388 		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4389 	}
4390 }
4391 
4392 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4393 {
4394 	struct drm_device *dev = crtc->base.dev;
4395 	struct drm_i915_private *dev_priv = dev->dev_private;
4396 	int pipe = crtc->pipe;
4397 
4398 	if (crtc->config->pch_pfit.enabled) {
4399 		/* Force use of hard-coded filter coefficients
4400 		 * as some pre-programmed values are broken,
4401 		 * e.g. x201.
4402 		 */
4403 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4404 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4405 						 PF_PIPE_SEL_IVB(pipe));
4406 		else
4407 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4408 		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4409 		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4410 	}
4411 }
4412 
4413 void hsw_enable_ips(struct intel_crtc *crtc)
4414 {
4415 	struct drm_device *dev = crtc->base.dev;
4416 	struct drm_i915_private *dev_priv = dev->dev_private;
4417 
4418 	if (!crtc->config->ips_enabled)
4419 		return;
4420 
4421 	/*
4422 	 * We can only enable IPS after we enable a plane and wait for a vblank
4423 	 * This function is called from post_plane_update, which is run after
4424 	 * a vblank wait.
4425 	 */
4426 
4427 	assert_plane_enabled(dev_priv, crtc->plane);
4428 	if (IS_BROADWELL(dev)) {
4429 		mutex_lock(&dev_priv->rps.hw_lock);
4430 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4431 		mutex_unlock(&dev_priv->rps.hw_lock);
4432 		/* Quoting Art Runyan: "its not safe to expect any particular
4433 		 * value in IPS_CTL bit 31 after enabling IPS through the
4434 		 * mailbox." Moreover, the mailbox may return a bogus state,
4435 		 * so we need to just enable it and continue on.
4436 		 */
4437 	} else {
4438 		I915_WRITE(IPS_CTL, IPS_ENABLE);
4439 		/* The bit only becomes 1 in the next vblank, so this wait here
4440 		 * is essentially intel_wait_for_vblank. If we don't have this
4441 		 * and don't wait for vblanks until the end of crtc_enable, then
4442 		 * the HW state readout code will complain that the expected
4443 		 * IPS_CTL value is not the one we read. */
4444 		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4445 			DRM_ERROR("Timed out waiting for IPS enable\n");
4446 	}
4447 }
4448 
4449 void hsw_disable_ips(struct intel_crtc *crtc)
4450 {
4451 	struct drm_device *dev = crtc->base.dev;
4452 	struct drm_i915_private *dev_priv = dev->dev_private;
4453 
4454 	if (!crtc->config->ips_enabled)
4455 		return;
4456 
4457 	assert_plane_enabled(dev_priv, crtc->plane);
4458 	if (IS_BROADWELL(dev)) {
4459 		mutex_lock(&dev_priv->rps.hw_lock);
4460 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4461 		mutex_unlock(&dev_priv->rps.hw_lock);
4462 		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4463 		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4464 			DRM_ERROR("Timed out waiting for IPS disable\n");
4465 	} else {
4466 		I915_WRITE(IPS_CTL, 0);
4467 		POSTING_READ(IPS_CTL);
4468 	}
4469 
4470 	/* We need to wait for a vblank before we can disable the plane. */
4471 	intel_wait_for_vblank(dev, crtc->pipe);
4472 }
4473 
4474 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4475 {
4476 	if (intel_crtc->overlay) {
4477 		struct drm_device *dev = intel_crtc->base.dev;
4478 		struct drm_i915_private *dev_priv = dev->dev_private;
4479 
4480 		mutex_lock(&dev->struct_mutex);
4481 		dev_priv->mm.interruptible = false;
4482 		(void) intel_overlay_switch_off(intel_crtc->overlay);
4483 		dev_priv->mm.interruptible = true;
4484 		mutex_unlock(&dev->struct_mutex);
4485 	}
4486 
4487 	/* Let userspace switch the overlay on again. In most cases userspace
4488 	 * has to recompute where to put it anyway.
4489 	 */
4490 }
4491 
4492 /**
4493  * intel_post_enable_primary - Perform operations after enabling primary plane
4494  * @crtc: the CRTC whose primary plane was just enabled
4495  *
4496  * Performs potentially sleeping operations that must be done after the primary
4497  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4498  * called due to an explicit primary plane update, or due to an implicit
4499  * re-enable that is caused when a sprite plane is updated to no longer
4500  * completely hide the primary plane.
4501  */
4502 static void
4503 intel_post_enable_primary(struct drm_crtc *crtc)
4504 {
4505 	struct drm_device *dev = crtc->dev;
4506 	struct drm_i915_private *dev_priv = dev->dev_private;
4507 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4508 	int pipe = intel_crtc->pipe;
4509 
4510 	/*
4511 	 * FIXME IPS should be fine as long as one plane is
4512 	 * enabled, but in practice it seems to have problems
4513 	 * when going from primary only to sprite only and vice
4514 	 * versa.
4515 	 */
4516 	hsw_enable_ips(intel_crtc);
4517 
4518 	/*
4519 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4520 	 * So don't enable underrun reporting before at least some planes
4521 	 * are enabled.
4522 	 * FIXME: Need to fix the logic to work when we turn off all planes
4523 	 * but leave the pipe running.
4524 	 */
4525 	if (IS_GEN2(dev))
4526 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4527 
4528 	/* Underruns don't always raise interrupts, so check manually. */
4529 	intel_check_cpu_fifo_underruns(dev_priv);
4530 	intel_check_pch_fifo_underruns(dev_priv);
4531 }
4532 
4533 /* FIXME move all this to pre_plane_update() with proper state tracking */
4534 static void
4535 intel_pre_disable_primary(struct drm_crtc *crtc)
4536 {
4537 	struct drm_device *dev = crtc->dev;
4538 	struct drm_i915_private *dev_priv = dev->dev_private;
4539 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4540 	int pipe = intel_crtc->pipe;
4541 
4542 	/*
4543 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4544 	 * So diasble underrun reporting before all the planes get disabled.
4545 	 * FIXME: Need to fix the logic to work when we turn off all planes
4546 	 * but leave the pipe running.
4547 	 */
4548 	if (IS_GEN2(dev))
4549 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4550 
4551 	/*
4552 	 * FIXME IPS should be fine as long as one plane is
4553 	 * enabled, but in practice it seems to have problems
4554 	 * when going from primary only to sprite only and vice
4555 	 * versa.
4556 	 */
4557 	hsw_disable_ips(intel_crtc);
4558 }
4559 
4560 /* FIXME get rid of this and use pre_plane_update */
4561 static void
4562 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4563 {
4564 	struct drm_device *dev = crtc->dev;
4565 	struct drm_i915_private *dev_priv = dev->dev_private;
4566 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4567 	int pipe = intel_crtc->pipe;
4568 
4569 	intel_pre_disable_primary(crtc);
4570 
4571 	/*
4572 	 * Vblank time updates from the shadow to live plane control register
4573 	 * are blocked if the memory self-refresh mode is active at that
4574 	 * moment. So to make sure the plane gets truly disabled, disable
4575 	 * first the self-refresh mode. The self-refresh enable bit in turn
4576 	 * will be checked/applied by the HW only at the next frame start
4577 	 * event which is after the vblank start event, so we need to have a
4578 	 * wait-for-vblank between disabling the plane and the pipe.
4579 	 */
4580 	if (HAS_GMCH_DISPLAY(dev)) {
4581 		intel_set_memory_cxsr(dev_priv, false);
4582 		dev_priv->wm.vlv.cxsr = false;
4583 		intel_wait_for_vblank(dev, pipe);
4584 	}
4585 }
4586 
4587 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4588 {
4589 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4590 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4591 	struct intel_crtc_state *pipe_config =
4592 		to_intel_crtc_state(crtc->base.state);
4593 	struct drm_device *dev = crtc->base.dev;
4594 	struct drm_plane *primary = crtc->base.primary;
4595 	struct drm_plane_state *old_pri_state =
4596 		drm_atomic_get_existing_plane_state(old_state, primary);
4597 
4598 	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4599 
4600 	crtc->wm.cxsr_allowed = true;
4601 
4602 	if (pipe_config->update_wm_post && pipe_config->base.active)
4603 		intel_update_watermarks(&crtc->base);
4604 
4605 	if (old_pri_state) {
4606 		struct intel_plane_state *primary_state =
4607 			to_intel_plane_state(primary->state);
4608 		struct intel_plane_state *old_primary_state =
4609 			to_intel_plane_state(old_pri_state);
4610 
4611 		intel_fbc_post_update(crtc);
4612 
4613 		if (primary_state->visible &&
4614 		    (needs_modeset(&pipe_config->base) ||
4615 		     !old_primary_state->visible))
4616 			intel_post_enable_primary(&crtc->base);
4617 	}
4618 }
4619 
4620 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4621 {
4622 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4623 	struct drm_device *dev = crtc->base.dev;
4624 	struct drm_i915_private *dev_priv = dev->dev_private;
4625 	struct intel_crtc_state *pipe_config =
4626 		to_intel_crtc_state(crtc->base.state);
4627 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4628 	struct drm_plane *primary = crtc->base.primary;
4629 	struct drm_plane_state *old_pri_state =
4630 		drm_atomic_get_existing_plane_state(old_state, primary);
4631 	bool modeset = needs_modeset(&pipe_config->base);
4632 
4633 	if (old_pri_state) {
4634 		struct intel_plane_state *primary_state =
4635 			to_intel_plane_state(primary->state);
4636 		struct intel_plane_state *old_primary_state =
4637 			to_intel_plane_state(old_pri_state);
4638 
4639 		intel_fbc_pre_update(crtc);
4640 
4641 		if (old_primary_state->visible &&
4642 		    (modeset || !primary_state->visible))
4643 			intel_pre_disable_primary(&crtc->base);
4644 	}
4645 
4646 	if (pipe_config->disable_cxsr) {
4647 		crtc->wm.cxsr_allowed = false;
4648 
4649 		/*
4650 		 * Vblank time updates from the shadow to live plane control register
4651 		 * are blocked if the memory self-refresh mode is active at that
4652 		 * moment. So to make sure the plane gets truly disabled, disable
4653 		 * first the self-refresh mode. The self-refresh enable bit in turn
4654 		 * will be checked/applied by the HW only at the next frame start
4655 		 * event which is after the vblank start event, so we need to have a
4656 		 * wait-for-vblank between disabling the plane and the pipe.
4657 		 */
4658 		if (old_crtc_state->base.active) {
4659 			intel_set_memory_cxsr(dev_priv, false);
4660 			dev_priv->wm.vlv.cxsr = false;
4661 			intel_wait_for_vblank(dev, crtc->pipe);
4662 		}
4663 	}
4664 
4665 	/*
4666 	 * IVB workaround: must disable low power watermarks for at least
4667 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
4668 	 * when scaling is disabled.
4669 	 *
4670 	 * WaCxSRDisabledForSpriteScaling:ivb
4671 	 */
4672 	if (pipe_config->disable_lp_wm) {
4673 		ilk_disable_lp_wm(dev);
4674 		intel_wait_for_vblank(dev, crtc->pipe);
4675 	}
4676 
4677 	/*
4678 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
4679 	 * watermark programming here.
4680 	 */
4681 	if (needs_modeset(&pipe_config->base))
4682 		return;
4683 
4684 	/*
4685 	 * For platforms that support atomic watermarks, program the
4686 	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
4687 	 * will be the intermediate values that are safe for both pre- and
4688 	 * post- vblank; when vblank happens, the 'active' values will be set
4689 	 * to the final 'target' values and we'll do this again to get the
4690 	 * optimal watermarks.  For gen9+ platforms, the values we program here
4691 	 * will be the final target values which will get automatically latched
4692 	 * at vblank time; no further programming will be necessary.
4693 	 *
4694 	 * If a platform hasn't been transitioned to atomic watermarks yet,
4695 	 * we'll continue to update watermarks the old way, if flags tell
4696 	 * us to.
4697 	 */
4698 	if (dev_priv->display.initial_watermarks != NULL)
4699 		dev_priv->display.initial_watermarks(pipe_config);
4700 	else if (pipe_config->update_wm_pre)
4701 		intel_update_watermarks(&crtc->base);
4702 }
4703 
4704 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4705 {
4706 	struct drm_device *dev = crtc->dev;
4707 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4708 	struct drm_plane *p;
4709 	int pipe = intel_crtc->pipe;
4710 
4711 	intel_crtc_dpms_overlay_disable(intel_crtc);
4712 
4713 	drm_for_each_plane_mask(p, dev, plane_mask)
4714 		to_intel_plane(p)->disable_plane(p, crtc);
4715 
4716 	/*
4717 	 * FIXME: Once we grow proper nuclear flip support out of this we need
4718 	 * to compute the mask of flip planes precisely. For the time being
4719 	 * consider this a flip to a NULL plane.
4720 	 */
4721 	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4722 }
4723 
4724 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4725 {
4726 	struct drm_device *dev = crtc->dev;
4727 	struct drm_i915_private *dev_priv = dev->dev_private;
4728 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4729 	struct intel_encoder *encoder;
4730 	int pipe = intel_crtc->pipe;
4731 	struct intel_crtc_state *pipe_config =
4732 		to_intel_crtc_state(crtc->state);
4733 
4734 	if (WARN_ON(intel_crtc->active))
4735 		return;
4736 
4737 	/*
4738 	 * Sometimes spurious CPU pipe underruns happen during FDI
4739 	 * training, at least with VGA+HDMI cloning. Suppress them.
4740 	 *
4741 	 * On ILK we get an occasional spurious CPU pipe underruns
4742 	 * between eDP port A enable and vdd enable. Also PCH port
4743 	 * enable seems to result in the occasional CPU pipe underrun.
4744 	 *
4745 	 * Spurious PCH underruns also occur during PCH enabling.
4746 	 */
4747 	if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4748 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4749 	if (intel_crtc->config->has_pch_encoder)
4750 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4751 
4752 	if (intel_crtc->config->has_pch_encoder)
4753 		intel_prepare_shared_dpll(intel_crtc);
4754 
4755 	if (intel_crtc->config->has_dp_encoder)
4756 		intel_dp_set_m_n(intel_crtc, M1_N1);
4757 
4758 	intel_set_pipe_timings(intel_crtc);
4759 	intel_set_pipe_src_size(intel_crtc);
4760 
4761 	if (intel_crtc->config->has_pch_encoder) {
4762 		intel_cpu_transcoder_set_m_n(intel_crtc,
4763 				     &intel_crtc->config->fdi_m_n, NULL);
4764 	}
4765 
4766 	ironlake_set_pipeconf(crtc);
4767 
4768 	intel_crtc->active = true;
4769 
4770 	for_each_encoder_on_crtc(dev, crtc, encoder)
4771 		if (encoder->pre_enable)
4772 			encoder->pre_enable(encoder);
4773 
4774 	if (intel_crtc->config->has_pch_encoder) {
4775 		/* Note: FDI PLL enabling _must_ be done before we enable the
4776 		 * cpu pipes, hence this is separate from all the other fdi/pch
4777 		 * enabling. */
4778 		ironlake_fdi_pll_enable(intel_crtc);
4779 	} else {
4780 		assert_fdi_tx_disabled(dev_priv, pipe);
4781 		assert_fdi_rx_disabled(dev_priv, pipe);
4782 	}
4783 
4784 	ironlake_pfit_enable(intel_crtc);
4785 
4786 	/*
4787 	 * On ILK+ LUT must be loaded before the pipe is running but with
4788 	 * clocks enabled
4789 	 */
4790 	intel_color_load_luts(&pipe_config->base);
4791 
4792 	if (dev_priv->display.initial_watermarks != NULL)
4793 		dev_priv->display.initial_watermarks(intel_crtc->config);
4794 	intel_enable_pipe(intel_crtc);
4795 
4796 	if (intel_crtc->config->has_pch_encoder)
4797 		ironlake_pch_enable(crtc);
4798 
4799 	assert_vblank_disabled(crtc);
4800 	drm_crtc_vblank_on(crtc);
4801 
4802 	for_each_encoder_on_crtc(dev, crtc, encoder)
4803 		encoder->enable(encoder);
4804 
4805 	if (HAS_PCH_CPT(dev))
4806 		cpt_verify_modeset(dev, intel_crtc->pipe);
4807 
4808 	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4809 	if (intel_crtc->config->has_pch_encoder)
4810 		intel_wait_for_vblank(dev, pipe);
4811 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4812 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4813 }
4814 
4815 /* IPS only exists on ULT machines and is tied to pipe A. */
4816 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4817 {
4818 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4819 }
4820 
4821 static void haswell_crtc_enable(struct drm_crtc *crtc)
4822 {
4823 	struct drm_device *dev = crtc->dev;
4824 	struct drm_i915_private *dev_priv = dev->dev_private;
4825 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4826 	struct intel_encoder *encoder;
4827 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4828 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4829 	struct intel_crtc_state *pipe_config =
4830 		to_intel_crtc_state(crtc->state);
4831 
4832 	if (WARN_ON(intel_crtc->active))
4833 		return;
4834 
4835 	if (intel_crtc->config->has_pch_encoder)
4836 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4837 						      false);
4838 
4839 	if (intel_crtc->config->shared_dpll)
4840 		intel_enable_shared_dpll(intel_crtc);
4841 
4842 	if (intel_crtc->config->has_dp_encoder)
4843 		intel_dp_set_m_n(intel_crtc, M1_N1);
4844 
4845 	if (!intel_crtc->config->has_dsi_encoder)
4846 		intel_set_pipe_timings(intel_crtc);
4847 
4848 	intel_set_pipe_src_size(intel_crtc);
4849 
4850 	if (cpu_transcoder != TRANSCODER_EDP &&
4851 	    !transcoder_is_dsi(cpu_transcoder)) {
4852 		I915_WRITE(PIPE_MULT(cpu_transcoder),
4853 			   intel_crtc->config->pixel_multiplier - 1);
4854 	}
4855 
4856 	if (intel_crtc->config->has_pch_encoder) {
4857 		intel_cpu_transcoder_set_m_n(intel_crtc,
4858 				     &intel_crtc->config->fdi_m_n, NULL);
4859 	}
4860 
4861 	if (!intel_crtc->config->has_dsi_encoder)
4862 		haswell_set_pipeconf(crtc);
4863 
4864 	haswell_set_pipemisc(crtc);
4865 
4866 	intel_color_set_csc(&pipe_config->base);
4867 
4868 	intel_crtc->active = true;
4869 
4870 	if (intel_crtc->config->has_pch_encoder)
4871 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4872 	else
4873 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4874 
4875 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4876 		if (encoder->pre_enable)
4877 			encoder->pre_enable(encoder);
4878 	}
4879 
4880 	if (intel_crtc->config->has_pch_encoder)
4881 		dev_priv->display.fdi_link_train(crtc);
4882 
4883 	if (!intel_crtc->config->has_dsi_encoder)
4884 		intel_ddi_enable_pipe_clock(intel_crtc);
4885 
4886 	if (INTEL_INFO(dev)->gen >= 9)
4887 		skylake_pfit_enable(intel_crtc);
4888 	else
4889 		ironlake_pfit_enable(intel_crtc);
4890 
4891 	/*
4892 	 * On ILK+ LUT must be loaded before the pipe is running but with
4893 	 * clocks enabled
4894 	 */
4895 	intel_color_load_luts(&pipe_config->base);
4896 
4897 	intel_ddi_set_pipe_settings(crtc);
4898 	if (!intel_crtc->config->has_dsi_encoder)
4899 		intel_ddi_enable_transcoder_func(crtc);
4900 
4901 	if (dev_priv->display.initial_watermarks != NULL)
4902 		dev_priv->display.initial_watermarks(pipe_config);
4903 	else
4904 		intel_update_watermarks(crtc);
4905 
4906 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
4907 	if (!intel_crtc->config->has_dsi_encoder)
4908 		intel_enable_pipe(intel_crtc);
4909 
4910 	if (intel_crtc->config->has_pch_encoder)
4911 		lpt_pch_enable(crtc);
4912 
4913 	if (intel_crtc->config->dp_encoder_is_mst)
4914 		intel_ddi_set_vc_payload_alloc(crtc, true);
4915 
4916 	assert_vblank_disabled(crtc);
4917 	drm_crtc_vblank_on(crtc);
4918 
4919 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4920 		encoder->enable(encoder);
4921 		intel_opregion_notify_encoder(encoder, true);
4922 	}
4923 
4924 	if (intel_crtc->config->has_pch_encoder) {
4925 		intel_wait_for_vblank(dev, pipe);
4926 		intel_wait_for_vblank(dev, pipe);
4927 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4928 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4929 						      true);
4930 	}
4931 
4932 	/* If we change the relative order between pipe/planes enabling, we need
4933 	 * to change the workaround. */
4934 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4935 	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4936 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4937 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4938 	}
4939 }
4940 
4941 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4942 {
4943 	struct drm_device *dev = crtc->base.dev;
4944 	struct drm_i915_private *dev_priv = dev->dev_private;
4945 	int pipe = crtc->pipe;
4946 
4947 	/* To avoid upsetting the power well on haswell only disable the pfit if
4948 	 * it's in use. The hw state code will make sure we get this right. */
4949 	if (force || crtc->config->pch_pfit.enabled) {
4950 		I915_WRITE(PF_CTL(pipe), 0);
4951 		I915_WRITE(PF_WIN_POS(pipe), 0);
4952 		I915_WRITE(PF_WIN_SZ(pipe), 0);
4953 	}
4954 }
4955 
4956 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4957 {
4958 	struct drm_device *dev = crtc->dev;
4959 	struct drm_i915_private *dev_priv = dev->dev_private;
4960 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4961 	struct intel_encoder *encoder;
4962 	int pipe = intel_crtc->pipe;
4963 
4964 	/*
4965 	 * Sometimes spurious CPU pipe underruns happen when the
4966 	 * pipe is already disabled, but FDI RX/TX is still enabled.
4967 	 * Happens at least with VGA+HDMI cloning. Suppress them.
4968 	 */
4969 	if (intel_crtc->config->has_pch_encoder) {
4970 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4971 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4972 	}
4973 
4974 	for_each_encoder_on_crtc(dev, crtc, encoder)
4975 		encoder->disable(encoder);
4976 
4977 	drm_crtc_vblank_off(crtc);
4978 	assert_vblank_disabled(crtc);
4979 
4980 	intel_disable_pipe(intel_crtc);
4981 
4982 	ironlake_pfit_disable(intel_crtc, false);
4983 
4984 	if (intel_crtc->config->has_pch_encoder)
4985 		ironlake_fdi_disable(crtc);
4986 
4987 	for_each_encoder_on_crtc(dev, crtc, encoder)
4988 		if (encoder->post_disable)
4989 			encoder->post_disable(encoder);
4990 
4991 	if (intel_crtc->config->has_pch_encoder) {
4992 		ironlake_disable_pch_transcoder(dev_priv, pipe);
4993 
4994 		if (HAS_PCH_CPT(dev)) {
4995 			i915_reg_t reg;
4996 			u32 temp;
4997 
4998 			/* disable TRANS_DP_CTL */
4999 			reg = TRANS_DP_CTL(pipe);
5000 			temp = I915_READ(reg);
5001 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5002 				  TRANS_DP_PORT_SEL_MASK);
5003 			temp |= TRANS_DP_PORT_SEL_NONE;
5004 			I915_WRITE(reg, temp);
5005 
5006 			/* disable DPLL_SEL */
5007 			temp = I915_READ(PCH_DPLL_SEL);
5008 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5009 			I915_WRITE(PCH_DPLL_SEL, temp);
5010 		}
5011 
5012 		ironlake_fdi_pll_disable(intel_crtc);
5013 	}
5014 
5015 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5016 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5017 }
5018 
5019 static void haswell_crtc_disable(struct drm_crtc *crtc)
5020 {
5021 	struct drm_device *dev = crtc->dev;
5022 	struct drm_i915_private *dev_priv = dev->dev_private;
5023 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5024 	struct intel_encoder *encoder;
5025 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5026 
5027 	if (intel_crtc->config->has_pch_encoder)
5028 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5029 						      false);
5030 
5031 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5032 		intel_opregion_notify_encoder(encoder, false);
5033 		encoder->disable(encoder);
5034 	}
5035 
5036 	drm_crtc_vblank_off(crtc);
5037 	assert_vblank_disabled(crtc);
5038 
5039 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
5040 	if (!intel_crtc->config->has_dsi_encoder)
5041 		intel_disable_pipe(intel_crtc);
5042 
5043 	if (intel_crtc->config->dp_encoder_is_mst)
5044 		intel_ddi_set_vc_payload_alloc(crtc, false);
5045 
5046 	if (!intel_crtc->config->has_dsi_encoder)
5047 		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5048 
5049 	if (INTEL_INFO(dev)->gen >= 9)
5050 		skylake_scaler_disable(intel_crtc);
5051 	else
5052 		ironlake_pfit_disable(intel_crtc, false);
5053 
5054 	if (!intel_crtc->config->has_dsi_encoder)
5055 		intel_ddi_disable_pipe_clock(intel_crtc);
5056 
5057 	for_each_encoder_on_crtc(dev, crtc, encoder)
5058 		if (encoder->post_disable)
5059 			encoder->post_disable(encoder);
5060 
5061 	if (intel_crtc->config->has_pch_encoder) {
5062 		lpt_disable_pch_transcoder(dev_priv);
5063 		lpt_disable_iclkip(dev_priv);
5064 		intel_ddi_fdi_disable(crtc);
5065 
5066 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5067 						      true);
5068 	}
5069 }
5070 
5071 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5072 {
5073 	struct drm_device *dev = crtc->base.dev;
5074 	struct drm_i915_private *dev_priv = dev->dev_private;
5075 	struct intel_crtc_state *pipe_config = crtc->config;
5076 
5077 	if (!pipe_config->gmch_pfit.control)
5078 		return;
5079 
5080 	/*
5081 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5082 	 * according to register description and PRM.
5083 	 */
5084 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5085 	assert_pipe_disabled(dev_priv, crtc->pipe);
5086 
5087 	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5088 	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5089 
5090 	/* Border color in case we don't scale up to the full screen. Black by
5091 	 * default, change to something else for debugging. */
5092 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5093 }
5094 
5095 static enum intel_display_power_domain port_to_power_domain(enum port port)
5096 {
5097 	switch (port) {
5098 	case PORT_A:
5099 		return POWER_DOMAIN_PORT_DDI_A_LANES;
5100 	case PORT_B:
5101 		return POWER_DOMAIN_PORT_DDI_B_LANES;
5102 	case PORT_C:
5103 		return POWER_DOMAIN_PORT_DDI_C_LANES;
5104 	case PORT_D:
5105 		return POWER_DOMAIN_PORT_DDI_D_LANES;
5106 	case PORT_E:
5107 		return POWER_DOMAIN_PORT_DDI_E_LANES;
5108 	default:
5109 		MISSING_CASE(port);
5110 		return POWER_DOMAIN_PORT_OTHER;
5111 	}
5112 }
5113 
5114 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5115 {
5116 	switch (port) {
5117 	case PORT_A:
5118 		return POWER_DOMAIN_AUX_A;
5119 	case PORT_B:
5120 		return POWER_DOMAIN_AUX_B;
5121 	case PORT_C:
5122 		return POWER_DOMAIN_AUX_C;
5123 	case PORT_D:
5124 		return POWER_DOMAIN_AUX_D;
5125 	case PORT_E:
5126 		/* FIXME: Check VBT for actual wiring of PORT E */
5127 		return POWER_DOMAIN_AUX_D;
5128 	default:
5129 		MISSING_CASE(port);
5130 		return POWER_DOMAIN_AUX_A;
5131 	}
5132 }
5133 
5134 enum intel_display_power_domain
5135 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5136 {
5137 	struct drm_device *dev = intel_encoder->base.dev;
5138 	struct intel_digital_port *intel_dig_port;
5139 
5140 	switch (intel_encoder->type) {
5141 	case INTEL_OUTPUT_UNKNOWN:
5142 		/* Only DDI platforms should ever use this output type */
5143 		WARN_ON_ONCE(!HAS_DDI(dev));
5144 	case INTEL_OUTPUT_DISPLAYPORT:
5145 	case INTEL_OUTPUT_HDMI:
5146 	case INTEL_OUTPUT_EDP:
5147 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5148 		return port_to_power_domain(intel_dig_port->port);
5149 	case INTEL_OUTPUT_DP_MST:
5150 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5151 		return port_to_power_domain(intel_dig_port->port);
5152 	case INTEL_OUTPUT_ANALOG:
5153 		return POWER_DOMAIN_PORT_CRT;
5154 	case INTEL_OUTPUT_DSI:
5155 		return POWER_DOMAIN_PORT_DSI;
5156 	default:
5157 		return POWER_DOMAIN_PORT_OTHER;
5158 	}
5159 }
5160 
5161 enum intel_display_power_domain
5162 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5163 {
5164 	struct drm_device *dev = intel_encoder->base.dev;
5165 	struct intel_digital_port *intel_dig_port;
5166 
5167 	switch (intel_encoder->type) {
5168 	case INTEL_OUTPUT_UNKNOWN:
5169 	case INTEL_OUTPUT_HDMI:
5170 		/*
5171 		 * Only DDI platforms should ever use these output types.
5172 		 * We can get here after the HDMI detect code has already set
5173 		 * the type of the shared encoder. Since we can't be sure
5174 		 * what's the status of the given connectors, play safe and
5175 		 * run the DP detection too.
5176 		 */
5177 		WARN_ON_ONCE(!HAS_DDI(dev));
5178 	case INTEL_OUTPUT_DISPLAYPORT:
5179 	case INTEL_OUTPUT_EDP:
5180 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5181 		return port_to_aux_power_domain(intel_dig_port->port);
5182 	case INTEL_OUTPUT_DP_MST:
5183 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5184 		return port_to_aux_power_domain(intel_dig_port->port);
5185 	default:
5186 		MISSING_CASE(intel_encoder->type);
5187 		return POWER_DOMAIN_AUX_A;
5188 	}
5189 }
5190 
5191 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5192 					    struct intel_crtc_state *crtc_state)
5193 {
5194 	struct drm_device *dev = crtc->dev;
5195 	struct drm_encoder *encoder;
5196 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5197 	enum i915_pipe pipe = intel_crtc->pipe;
5198 	unsigned long mask;
5199 	enum transcoder transcoder = crtc_state->cpu_transcoder;
5200 
5201 	if (!crtc_state->base.active)
5202 		return 0;
5203 
5204 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5205 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5206 	if (crtc_state->pch_pfit.enabled ||
5207 	    crtc_state->pch_pfit.force_thru)
5208 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5209 
5210 	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5211 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5212 
5213 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5214 	}
5215 
5216 	if (crtc_state->shared_dpll)
5217 		mask |= BIT(POWER_DOMAIN_PLLS);
5218 
5219 	return mask;
5220 }
5221 
5222 static unsigned long
5223 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5224 			       struct intel_crtc_state *crtc_state)
5225 {
5226 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5227 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5228 	enum intel_display_power_domain domain;
5229 	unsigned long domains, new_domains, old_domains;
5230 
5231 	old_domains = intel_crtc->enabled_power_domains;
5232 	intel_crtc->enabled_power_domains = new_domains =
5233 		get_crtc_power_domains(crtc, crtc_state);
5234 
5235 	domains = new_domains & ~old_domains;
5236 
5237 	for_each_power_domain(domain, domains)
5238 		intel_display_power_get(dev_priv, domain);
5239 
5240 	return old_domains & ~new_domains;
5241 }
5242 
5243 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5244 				      unsigned long domains)
5245 {
5246 	enum intel_display_power_domain domain;
5247 
5248 	for_each_power_domain(domain, domains)
5249 		intel_display_power_put(dev_priv, domain);
5250 }
5251 
5252 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5253 {
5254 	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5255 
5256 	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5257 	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5258 		return max_cdclk_freq;
5259 	else if (IS_CHERRYVIEW(dev_priv))
5260 		return max_cdclk_freq*95/100;
5261 	else if (INTEL_INFO(dev_priv)->gen < 4)
5262 		return 2*max_cdclk_freq*90/100;
5263 	else
5264 		return max_cdclk_freq*90/100;
5265 }
5266 
5267 static void intel_update_max_cdclk(struct drm_device *dev)
5268 {
5269 	struct drm_i915_private *dev_priv = dev->dev_private;
5270 
5271 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5272 		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5273 
5274 		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5275 			dev_priv->max_cdclk_freq = 675000;
5276 		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5277 			dev_priv->max_cdclk_freq = 540000;
5278 		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5279 			dev_priv->max_cdclk_freq = 450000;
5280 		else
5281 			dev_priv->max_cdclk_freq = 337500;
5282 	} else if (IS_BROXTON(dev)) {
5283 		dev_priv->max_cdclk_freq = 624000;
5284 	} else if (IS_BROADWELL(dev))  {
5285 		/*
5286 		 * FIXME with extra cooling we can allow
5287 		 * 540 MHz for ULX and 675 Mhz for ULT.
5288 		 * How can we know if extra cooling is
5289 		 * available? PCI ID, VTB, something else?
5290 		 */
5291 		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5292 			dev_priv->max_cdclk_freq = 450000;
5293 		else if (IS_BDW_ULX(dev))
5294 			dev_priv->max_cdclk_freq = 450000;
5295 		else if (IS_BDW_ULT(dev))
5296 			dev_priv->max_cdclk_freq = 540000;
5297 		else
5298 			dev_priv->max_cdclk_freq = 675000;
5299 	} else if (IS_CHERRYVIEW(dev)) {
5300 		dev_priv->max_cdclk_freq = 320000;
5301 	} else if (IS_VALLEYVIEW(dev)) {
5302 		dev_priv->max_cdclk_freq = 400000;
5303 	} else {
5304 		/* otherwise assume cdclk is fixed */
5305 		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5306 	}
5307 
5308 	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5309 
5310 	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5311 			 dev_priv->max_cdclk_freq);
5312 
5313 	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5314 			 dev_priv->max_dotclk_freq);
5315 }
5316 
5317 static void intel_update_cdclk(struct drm_device *dev)
5318 {
5319 	struct drm_i915_private *dev_priv = dev->dev_private;
5320 
5321 	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5322 	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5323 			 dev_priv->cdclk_freq);
5324 
5325 	/*
5326 	 * Program the gmbus_freq based on the cdclk frequency.
5327 	 * BSpec erroneously claims we should aim for 4MHz, but
5328 	 * in fact 1MHz is the correct frequency.
5329 	 */
5330 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5331 		/*
5332 		 * Program the gmbus_freq based on the cdclk frequency.
5333 		 * BSpec erroneously claims we should aim for 4MHz, but
5334 		 * in fact 1MHz is the correct frequency.
5335 		 */
5336 		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5337 	}
5338 
5339 	if (dev_priv->max_cdclk_freq == 0)
5340 		intel_update_max_cdclk(dev);
5341 }
5342 
5343 static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5344 {
5345 	uint32_t divider;
5346 	uint32_t ratio;
5347 	uint32_t current_freq;
5348 	int ret;
5349 
5350 	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5351 	switch (frequency) {
5352 	case 144000:
5353 		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5354 		ratio = BXT_DE_PLL_RATIO(60);
5355 		break;
5356 	case 288000:
5357 		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5358 		ratio = BXT_DE_PLL_RATIO(60);
5359 		break;
5360 	case 384000:
5361 		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5362 		ratio = BXT_DE_PLL_RATIO(60);
5363 		break;
5364 	case 576000:
5365 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5366 		ratio = BXT_DE_PLL_RATIO(60);
5367 		break;
5368 	case 624000:
5369 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5370 		ratio = BXT_DE_PLL_RATIO(65);
5371 		break;
5372 	case 19200:
5373 		/*
5374 		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5375 		 * to suppress GCC warning.
5376 		 */
5377 		ratio = 0;
5378 		divider = 0;
5379 		break;
5380 	default:
5381 		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5382 
5383 		return;
5384 	}
5385 
5386 	mutex_lock(&dev_priv->rps.hw_lock);
5387 	/* Inform power controller of upcoming frequency change */
5388 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5389 				      0x80000000);
5390 	mutex_unlock(&dev_priv->rps.hw_lock);
5391 
5392 	if (ret) {
5393 		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5394 			  ret, frequency);
5395 		return;
5396 	}
5397 
5398 	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5399 	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5400 	current_freq = current_freq * 500 + 1000;
5401 
5402 	/*
5403 	 * DE PLL has to be disabled when
5404 	 * - setting to 19.2MHz (bypass, PLL isn't used)
5405 	 * - before setting to 624MHz (PLL needs toggling)
5406 	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5407 	 */
5408 	if (frequency == 19200 || frequency == 624000 ||
5409 	    current_freq == 624000) {
5410 		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5411 		/* Timeout 200us */
5412 		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5413 			     1))
5414 			DRM_ERROR("timout waiting for DE PLL unlock\n");
5415 	}
5416 
5417 	if (frequency != 19200) {
5418 		uint32_t val;
5419 
5420 		val = I915_READ(BXT_DE_PLL_CTL);
5421 		val &= ~BXT_DE_PLL_RATIO_MASK;
5422 		val |= ratio;
5423 		I915_WRITE(BXT_DE_PLL_CTL, val);
5424 
5425 		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5426 		/* Timeout 200us */
5427 		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5428 			DRM_ERROR("timeout waiting for DE PLL lock\n");
5429 
5430 		val = I915_READ(CDCLK_CTL);
5431 		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5432 		val |= divider;
5433 		/*
5434 		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5435 		 * enable otherwise.
5436 		 */
5437 		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5438 		if (frequency >= 500000)
5439 			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5440 
5441 		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5442 		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5443 		val |= (frequency - 1000) / 500;
5444 		I915_WRITE(CDCLK_CTL, val);
5445 	}
5446 
5447 	mutex_lock(&dev_priv->rps.hw_lock);
5448 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5449 				      DIV_ROUND_UP(frequency, 25000));
5450 	mutex_unlock(&dev_priv->rps.hw_lock);
5451 
5452 	if (ret) {
5453 		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5454 			  ret, frequency);
5455 		return;
5456 	}
5457 
5458 	intel_update_cdclk(dev_priv->dev);
5459 }
5460 
5461 static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
5462 {
5463 	if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
5464 		return false;
5465 
5466 	/* TODO: Check for a valid CDCLK rate */
5467 
5468 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
5469 		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
5470 
5471 		return false;
5472 	}
5473 
5474 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
5475 		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
5476 
5477 		return false;
5478 	}
5479 
5480 	return true;
5481 }
5482 
5483 bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
5484 {
5485 	return broxton_cdclk_is_enabled(dev_priv);
5486 }
5487 
5488 void broxton_init_cdclk(struct drm_i915_private *dev_priv)
5489 {
5490 	/* check if cd clock is enabled */
5491 	if (broxton_cdclk_is_enabled(dev_priv)) {
5492 		DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5493 		return;
5494 	}
5495 
5496 	DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
5497 
5498 	/*
5499 	 * FIXME:
5500 	 * - The initial CDCLK needs to be read from VBT.
5501 	 *   Need to make this change after VBT has changes for BXT.
5502 	 * - check if setting the max (or any) cdclk freq is really necessary
5503 	 *   here, it belongs to modeset time
5504 	 */
5505 	broxton_set_cdclk(dev_priv, 624000);
5506 
5507 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5508 	POSTING_READ(DBUF_CTL);
5509 
5510 	udelay(10);
5511 
5512 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5513 		DRM_ERROR("DBuf power enable timeout!\n");
5514 }
5515 
5516 void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
5517 {
5518 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5519 	POSTING_READ(DBUF_CTL);
5520 
5521 	udelay(10);
5522 
5523 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5524 		DRM_ERROR("DBuf power disable timeout!\n");
5525 
5526 	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5527 	broxton_set_cdclk(dev_priv, 19200);
5528 }
5529 
5530 static const struct skl_cdclk_entry {
5531 	unsigned int freq;
5532 	unsigned int vco;
5533 } skl_cdclk_frequencies[] = {
5534 	{ .freq = 308570, .vco = 8640 },
5535 	{ .freq = 337500, .vco = 8100 },
5536 	{ .freq = 432000, .vco = 8640 },
5537 	{ .freq = 450000, .vco = 8100 },
5538 	{ .freq = 540000, .vco = 8100 },
5539 	{ .freq = 617140, .vco = 8640 },
5540 	{ .freq = 675000, .vco = 8100 },
5541 };
5542 
5543 static unsigned int skl_cdclk_decimal(unsigned int freq)
5544 {
5545 	return (freq - 1000) / 500;
5546 }
5547 
5548 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5549 {
5550 	unsigned int i;
5551 
5552 	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5553 		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5554 
5555 		if (e->freq == freq)
5556 			return e->vco;
5557 	}
5558 
5559 	return 8100;
5560 }
5561 
5562 static void
5563 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5564 {
5565 	unsigned int min_freq;
5566 	u32 val;
5567 
5568 	/* select the minimum CDCLK before enabling DPLL 0 */
5569 	val = I915_READ(CDCLK_CTL);
5570 	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5571 	val |= CDCLK_FREQ_337_308;
5572 
5573 	if (required_vco == 8640)
5574 		min_freq = 308570;
5575 	else
5576 		min_freq = 337500;
5577 
5578 	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5579 
5580 	I915_WRITE(CDCLK_CTL, val);
5581 	POSTING_READ(CDCLK_CTL);
5582 
5583 	/*
5584 	 * We always enable DPLL0 with the lowest link rate possible, but still
5585 	 * taking into account the VCO required to operate the eDP panel at the
5586 	 * desired frequency. The usual DP link rates operate with a VCO of
5587 	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5588 	 * The modeset code is responsible for the selection of the exact link
5589 	 * rate later on, with the constraint of choosing a frequency that
5590 	 * works with required_vco.
5591 	 */
5592 	val = I915_READ(DPLL_CTRL1);
5593 
5594 	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5595 		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5596 	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5597 	if (required_vco == 8640)
5598 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5599 					    SKL_DPLL0);
5600 	else
5601 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5602 					    SKL_DPLL0);
5603 
5604 	I915_WRITE(DPLL_CTRL1, val);
5605 	POSTING_READ(DPLL_CTRL1);
5606 
5607 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5608 
5609 	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5610 		DRM_ERROR("DPLL0 not locked\n");
5611 }
5612 
5613 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5614 {
5615 	int ret;
5616 	u32 val;
5617 
5618 	/* inform PCU we want to change CDCLK */
5619 	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5620 	mutex_lock(&dev_priv->rps.hw_lock);
5621 	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5622 	mutex_unlock(&dev_priv->rps.hw_lock);
5623 
5624 	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5625 }
5626 
5627 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5628 {
5629 	unsigned int i;
5630 
5631 	for (i = 0; i < 15; i++) {
5632 		if (skl_cdclk_pcu_ready(dev_priv))
5633 			return true;
5634 		udelay(10);
5635 	}
5636 
5637 	return false;
5638 }
5639 
5640 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5641 {
5642 	struct drm_device *dev = dev_priv->dev;
5643 	u32 freq_select, pcu_ack;
5644 
5645 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5646 
5647 	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5648 		DRM_ERROR("failed to inform PCU about cdclk change\n");
5649 		return;
5650 	}
5651 
5652 	/* set CDCLK_CTL */
5653 	switch(freq) {
5654 	case 450000:
5655 	case 432000:
5656 		freq_select = CDCLK_FREQ_450_432;
5657 		pcu_ack = 1;
5658 		break;
5659 	case 540000:
5660 		freq_select = CDCLK_FREQ_540;
5661 		pcu_ack = 2;
5662 		break;
5663 	case 308570:
5664 	case 337500:
5665 	default:
5666 		freq_select = CDCLK_FREQ_337_308;
5667 		pcu_ack = 0;
5668 		break;
5669 	case 617140:
5670 	case 675000:
5671 		freq_select = CDCLK_FREQ_675_617;
5672 		pcu_ack = 3;
5673 		break;
5674 	}
5675 
5676 	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5677 	POSTING_READ(CDCLK_CTL);
5678 
5679 	/* inform PCU of the change */
5680 	mutex_lock(&dev_priv->rps.hw_lock);
5681 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5682 	mutex_unlock(&dev_priv->rps.hw_lock);
5683 
5684 	intel_update_cdclk(dev);
5685 }
5686 
5687 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5688 {
5689 	/* disable DBUF power */
5690 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5691 	POSTING_READ(DBUF_CTL);
5692 
5693 	udelay(10);
5694 
5695 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5696 		DRM_ERROR("DBuf power disable timeout\n");
5697 
5698 	/* disable DPLL0 */
5699 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5700 	if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5701 		DRM_ERROR("Couldn't disable DPLL0\n");
5702 }
5703 
5704 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5705 {
5706 	unsigned int required_vco;
5707 
5708 	/* DPLL0 not enabled (happens on early BIOS versions) */
5709 	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5710 		/* enable DPLL0 */
5711 		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5712 		skl_dpll0_enable(dev_priv, required_vco);
5713 	}
5714 
5715 	/* set CDCLK to the frequency the BIOS chose */
5716 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5717 
5718 	/* enable DBUF power */
5719 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5720 	POSTING_READ(DBUF_CTL);
5721 
5722 	udelay(10);
5723 
5724 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5725 		DRM_ERROR("DBuf power enable timeout\n");
5726 }
5727 
5728 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5729 {
5730 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5731 	uint32_t cdctl = I915_READ(CDCLK_CTL);
5732 	int freq = dev_priv->skl_boot_cdclk;
5733 
5734 	/*
5735 	 * check if the pre-os intialized the display
5736 	 * There is SWF18 scratchpad register defined which is set by the
5737 	 * pre-os which can be used by the OS drivers to check the status
5738 	 */
5739 	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5740 		goto sanitize;
5741 
5742 	/* Is PLL enabled and locked ? */
5743 	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5744 		goto sanitize;
5745 
5746 	/* DPLL okay; verify the cdclock
5747 	 *
5748 	 * Noticed in some instances that the freq selection is correct but
5749 	 * decimal part is programmed wrong from BIOS where pre-os does not
5750 	 * enable display. Verify the same as well.
5751 	 */
5752 	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5753 		/* All well; nothing to sanitize */
5754 		return false;
5755 sanitize:
5756 	/*
5757 	 * As of now initialize with max cdclk till
5758 	 * we get dynamic cdclk support
5759 	 * */
5760 	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5761 	skl_init_cdclk(dev_priv);
5762 
5763 	/* we did have to sanitize */
5764 	return true;
5765 }
5766 
5767 /* Adjust CDclk dividers to allow high res or save power if possible */
5768 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5769 {
5770 	struct drm_i915_private *dev_priv = dev->dev_private;
5771 	u32 val, cmd;
5772 
5773 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5774 					!= dev_priv->cdclk_freq);
5775 
5776 	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5777 		cmd = 2;
5778 	else if (cdclk == 266667)
5779 		cmd = 1;
5780 	else
5781 		cmd = 0;
5782 
5783 	mutex_lock(&dev_priv->rps.hw_lock);
5784 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5785 	val &= ~DSPFREQGUAR_MASK;
5786 	val |= (cmd << DSPFREQGUAR_SHIFT);
5787 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5788 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5789 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5790 		     50)) {
5791 		DRM_ERROR("timed out waiting for CDclk change\n");
5792 	}
5793 	mutex_unlock(&dev_priv->rps.hw_lock);
5794 
5795 	mutex_lock(&dev_priv->sb_lock);
5796 
5797 	if (cdclk == 400000) {
5798 		u32 divider;
5799 
5800 		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5801 
5802 		/* adjust cdclk divider */
5803 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5804 		val &= ~CCK_FREQUENCY_VALUES;
5805 		val |= divider;
5806 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5807 
5808 		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5809 			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5810 			     50))
5811 			DRM_ERROR("timed out waiting for CDclk change\n");
5812 	}
5813 
5814 	/* adjust self-refresh exit latency value */
5815 	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5816 	val &= ~0x7f;
5817 
5818 	/*
5819 	 * For high bandwidth configs, we set a higher latency in the bunit
5820 	 * so that the core display fetch happens in time to avoid underruns.
5821 	 */
5822 	if (cdclk == 400000)
5823 		val |= 4500 / 250; /* 4.5 usec */
5824 	else
5825 		val |= 3000 / 250; /* 3.0 usec */
5826 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5827 
5828 	mutex_unlock(&dev_priv->sb_lock);
5829 
5830 	intel_update_cdclk(dev);
5831 }
5832 
5833 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5834 {
5835 	struct drm_i915_private *dev_priv = dev->dev_private;
5836 	u32 val, cmd;
5837 
5838 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5839 						!= dev_priv->cdclk_freq);
5840 
5841 	switch (cdclk) {
5842 	case 333333:
5843 	case 320000:
5844 	case 266667:
5845 	case 200000:
5846 		break;
5847 	default:
5848 		MISSING_CASE(cdclk);
5849 		return;
5850 	}
5851 
5852 	/*
5853 	 * Specs are full of misinformation, but testing on actual
5854 	 * hardware has shown that we just need to write the desired
5855 	 * CCK divider into the Punit register.
5856 	 */
5857 	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5858 
5859 	mutex_lock(&dev_priv->rps.hw_lock);
5860 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5861 	val &= ~DSPFREQGUAR_MASK_CHV;
5862 	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5863 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5864 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5865 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5866 		     50)) {
5867 		DRM_ERROR("timed out waiting for CDclk change\n");
5868 	}
5869 	mutex_unlock(&dev_priv->rps.hw_lock);
5870 
5871 	intel_update_cdclk(dev);
5872 }
5873 
5874 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5875 				 int max_pixclk)
5876 {
5877 	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5878 	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5879 
5880 	/*
5881 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5882 	 *   200MHz
5883 	 *   267MHz
5884 	 *   320/333MHz (depends on HPLL freq)
5885 	 *   400MHz (VLV only)
5886 	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5887 	 * of the lower bin and adjust if needed.
5888 	 *
5889 	 * We seem to get an unstable or solid color picture at 200MHz.
5890 	 * Not sure what's wrong. For now use 200MHz only when all pipes
5891 	 * are off.
5892 	 */
5893 	if (!IS_CHERRYVIEW(dev_priv) &&
5894 	    max_pixclk > freq_320*limit/100)
5895 		return 400000;
5896 	else if (max_pixclk > 266667*limit/100)
5897 		return freq_320;
5898 	else if (max_pixclk > 0)
5899 		return 266667;
5900 	else
5901 		return 200000;
5902 }
5903 
5904 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
5905 			      int max_pixclk)
5906 {
5907 	/*
5908 	 * FIXME:
5909 	 * - remove the guardband, it's not needed on BXT
5910 	 * - set 19.2MHz bypass frequency if there are no active pipes
5911 	 */
5912 	if (max_pixclk > 576000*9/10)
5913 		return 624000;
5914 	else if (max_pixclk > 384000*9/10)
5915 		return 576000;
5916 	else if (max_pixclk > 288000*9/10)
5917 		return 384000;
5918 	else if (max_pixclk > 144000*9/10)
5919 		return 288000;
5920 	else
5921 		return 144000;
5922 }
5923 
5924 /* Compute the max pixel clock for new configuration. */
5925 static int intel_mode_max_pixclk(struct drm_device *dev,
5926 				 struct drm_atomic_state *state)
5927 {
5928 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5929 	struct drm_i915_private *dev_priv = dev->dev_private;
5930 	struct drm_crtc *crtc;
5931 	struct drm_crtc_state *crtc_state;
5932 	unsigned max_pixclk = 0, i;
5933 	enum i915_pipe pipe;
5934 
5935 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
5936 	       sizeof(intel_state->min_pixclk));
5937 
5938 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
5939 		int pixclk = 0;
5940 
5941 		if (crtc_state->enable)
5942 			pixclk = crtc_state->adjusted_mode.crtc_clock;
5943 
5944 		intel_state->min_pixclk[i] = pixclk;
5945 	}
5946 
5947 	for_each_pipe(dev_priv, pipe)
5948 		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
5949 
5950 	return max_pixclk;
5951 }
5952 
5953 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5954 {
5955 	struct drm_device *dev = state->dev;
5956 	struct drm_i915_private *dev_priv = dev->dev_private;
5957 	int max_pixclk = intel_mode_max_pixclk(dev, state);
5958 	struct intel_atomic_state *intel_state =
5959 		to_intel_atomic_state(state);
5960 
5961 	if (max_pixclk < 0)
5962 		return max_pixclk;
5963 
5964 	intel_state->cdclk = intel_state->dev_cdclk =
5965 		valleyview_calc_cdclk(dev_priv, max_pixclk);
5966 
5967 	if (!intel_state->active_crtcs)
5968 		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
5969 
5970 	return 0;
5971 }
5972 
5973 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5974 {
5975 	struct drm_device *dev = state->dev;
5976 	struct drm_i915_private *dev_priv = dev->dev_private;
5977 	int max_pixclk = intel_mode_max_pixclk(dev, state);
5978 	struct intel_atomic_state *intel_state =
5979 		to_intel_atomic_state(state);
5980 
5981 	if (max_pixclk < 0)
5982 		return max_pixclk;
5983 
5984 	intel_state->cdclk = intel_state->dev_cdclk =
5985 		broxton_calc_cdclk(dev_priv, max_pixclk);
5986 
5987 	if (!intel_state->active_crtcs)
5988 		intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
5989 
5990 	return 0;
5991 }
5992 
5993 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5994 {
5995 	unsigned int credits, default_credits;
5996 
5997 	if (IS_CHERRYVIEW(dev_priv))
5998 		default_credits = PFI_CREDIT(12);
5999 	else
6000 		default_credits = PFI_CREDIT(8);
6001 
6002 	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6003 		/* CHV suggested value is 31 or 63 */
6004 		if (IS_CHERRYVIEW(dev_priv))
6005 			credits = PFI_CREDIT_63;
6006 		else
6007 			credits = PFI_CREDIT(15);
6008 	} else {
6009 		credits = default_credits;
6010 	}
6011 
6012 	/*
6013 	 * WA - write default credits before re-programming
6014 	 * FIXME: should we also set the resend bit here?
6015 	 */
6016 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6017 		   default_credits);
6018 
6019 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6020 		   credits | PFI_CREDIT_RESEND);
6021 
6022 	/*
6023 	 * FIXME is this guaranteed to clear
6024 	 * immediately or should we poll for it?
6025 	 */
6026 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6027 }
6028 
6029 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6030 {
6031 	struct drm_device *dev = old_state->dev;
6032 	struct drm_i915_private *dev_priv = dev->dev_private;
6033 	struct intel_atomic_state *old_intel_state =
6034 		to_intel_atomic_state(old_state);
6035 	unsigned req_cdclk = old_intel_state->dev_cdclk;
6036 
6037 	/*
6038 	 * FIXME: We can end up here with all power domains off, yet
6039 	 * with a CDCLK frequency other than the minimum. To account
6040 	 * for this take the PIPE-A power domain, which covers the HW
6041 	 * blocks needed for the following programming. This can be
6042 	 * removed once it's guaranteed that we get here either with
6043 	 * the minimum CDCLK set, or the required power domains
6044 	 * enabled.
6045 	 */
6046 	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6047 
6048 	if (IS_CHERRYVIEW(dev))
6049 		cherryview_set_cdclk(dev, req_cdclk);
6050 	else
6051 		valleyview_set_cdclk(dev, req_cdclk);
6052 
6053 	vlv_program_pfi_credits(dev_priv);
6054 
6055 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6056 }
6057 
6058 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6059 {
6060 	struct drm_device *dev = crtc->dev;
6061 	struct drm_i915_private *dev_priv = to_i915(dev);
6062 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6063 	struct intel_encoder *encoder;
6064 	struct intel_crtc_state *pipe_config =
6065 		to_intel_crtc_state(crtc->state);
6066 	int pipe = intel_crtc->pipe;
6067 
6068 	if (WARN_ON(intel_crtc->active))
6069 		return;
6070 
6071 	if (intel_crtc->config->has_dp_encoder)
6072 		intel_dp_set_m_n(intel_crtc, M1_N1);
6073 
6074 	intel_set_pipe_timings(intel_crtc);
6075 	intel_set_pipe_src_size(intel_crtc);
6076 
6077 	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6078 		struct drm_i915_private *dev_priv = dev->dev_private;
6079 
6080 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6081 		I915_WRITE(CHV_CANVAS(pipe), 0);
6082 	}
6083 
6084 	i9xx_set_pipeconf(intel_crtc);
6085 
6086 	intel_crtc->active = true;
6087 
6088 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6089 
6090 	for_each_encoder_on_crtc(dev, crtc, encoder)
6091 		if (encoder->pre_pll_enable)
6092 			encoder->pre_pll_enable(encoder);
6093 
6094 	if (IS_CHERRYVIEW(dev)) {
6095 		chv_prepare_pll(intel_crtc, intel_crtc->config);
6096 		chv_enable_pll(intel_crtc, intel_crtc->config);
6097 	} else {
6098 		vlv_prepare_pll(intel_crtc, intel_crtc->config);
6099 		vlv_enable_pll(intel_crtc, intel_crtc->config);
6100 	}
6101 
6102 	for_each_encoder_on_crtc(dev, crtc, encoder)
6103 		if (encoder->pre_enable)
6104 			encoder->pre_enable(encoder);
6105 
6106 	i9xx_pfit_enable(intel_crtc);
6107 
6108 	intel_color_load_luts(&pipe_config->base);
6109 
6110 	intel_update_watermarks(crtc);
6111 	intel_enable_pipe(intel_crtc);
6112 
6113 	assert_vblank_disabled(crtc);
6114 	drm_crtc_vblank_on(crtc);
6115 
6116 	for_each_encoder_on_crtc(dev, crtc, encoder)
6117 		encoder->enable(encoder);
6118 }
6119 
6120 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6121 {
6122 	struct drm_device *dev = crtc->base.dev;
6123 	struct drm_i915_private *dev_priv = dev->dev_private;
6124 
6125 	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6126 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6127 }
6128 
6129 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6130 {
6131 	struct drm_device *dev = crtc->dev;
6132 	struct drm_i915_private *dev_priv = to_i915(dev);
6133 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6134 	struct intel_encoder *encoder;
6135 	struct intel_crtc_state *pipe_config =
6136 		to_intel_crtc_state(crtc->state);
6137 	enum i915_pipe pipe = intel_crtc->pipe;
6138 
6139 	if (WARN_ON(intel_crtc->active))
6140 		return;
6141 
6142 	i9xx_set_pll_dividers(intel_crtc);
6143 
6144 	if (intel_crtc->config->has_dp_encoder)
6145 		intel_dp_set_m_n(intel_crtc, M1_N1);
6146 
6147 	intel_set_pipe_timings(intel_crtc);
6148 	intel_set_pipe_src_size(intel_crtc);
6149 
6150 	i9xx_set_pipeconf(intel_crtc);
6151 
6152 	intel_crtc->active = true;
6153 
6154 	if (!IS_GEN2(dev))
6155 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6156 
6157 	for_each_encoder_on_crtc(dev, crtc, encoder)
6158 		if (encoder->pre_enable)
6159 			encoder->pre_enable(encoder);
6160 
6161 	i9xx_enable_pll(intel_crtc);
6162 
6163 	i9xx_pfit_enable(intel_crtc);
6164 
6165 	intel_color_load_luts(&pipe_config->base);
6166 
6167 	intel_update_watermarks(crtc);
6168 	intel_enable_pipe(intel_crtc);
6169 
6170 	assert_vblank_disabled(crtc);
6171 	drm_crtc_vblank_on(crtc);
6172 
6173 	for_each_encoder_on_crtc(dev, crtc, encoder)
6174 		encoder->enable(encoder);
6175 }
6176 
6177 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6178 {
6179 	struct drm_device *dev = crtc->base.dev;
6180 	struct drm_i915_private *dev_priv = dev->dev_private;
6181 
6182 	if (!crtc->config->gmch_pfit.control)
6183 		return;
6184 
6185 	assert_pipe_disabled(dev_priv, crtc->pipe);
6186 
6187 	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6188 			 I915_READ(PFIT_CONTROL));
6189 	I915_WRITE(PFIT_CONTROL, 0);
6190 }
6191 
6192 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6193 {
6194 	struct drm_device *dev = crtc->dev;
6195 	struct drm_i915_private *dev_priv = dev->dev_private;
6196 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6197 	struct intel_encoder *encoder;
6198 	int pipe = intel_crtc->pipe;
6199 
6200 	/*
6201 	 * On gen2 planes are double buffered but the pipe isn't, so we must
6202 	 * wait for planes to fully turn off before disabling the pipe.
6203 	 */
6204 	if (IS_GEN2(dev))
6205 		intel_wait_for_vblank(dev, pipe);
6206 
6207 	for_each_encoder_on_crtc(dev, crtc, encoder)
6208 		encoder->disable(encoder);
6209 
6210 	drm_crtc_vblank_off(crtc);
6211 	assert_vblank_disabled(crtc);
6212 
6213 	intel_disable_pipe(intel_crtc);
6214 
6215 	i9xx_pfit_disable(intel_crtc);
6216 
6217 	for_each_encoder_on_crtc(dev, crtc, encoder)
6218 		if (encoder->post_disable)
6219 			encoder->post_disable(encoder);
6220 
6221 	if (!intel_crtc->config->has_dsi_encoder) {
6222 		if (IS_CHERRYVIEW(dev))
6223 			chv_disable_pll(dev_priv, pipe);
6224 		else if (IS_VALLEYVIEW(dev))
6225 			vlv_disable_pll(dev_priv, pipe);
6226 		else
6227 			i9xx_disable_pll(intel_crtc);
6228 	}
6229 
6230 	for_each_encoder_on_crtc(dev, crtc, encoder)
6231 		if (encoder->post_pll_disable)
6232 			encoder->post_pll_disable(encoder);
6233 
6234 	if (!IS_GEN2(dev))
6235 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6236 }
6237 
6238 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6239 {
6240 	struct intel_encoder *encoder;
6241 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6242 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6243 	enum intel_display_power_domain domain;
6244 	unsigned long domains;
6245 
6246 	if (!intel_crtc->active)
6247 		return;
6248 
6249 	if (to_intel_plane_state(crtc->primary->state)->visible) {
6250 		WARN_ON(intel_crtc->unpin_work);
6251 
6252 		intel_pre_disable_primary_noatomic(crtc);
6253 
6254 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6255 		to_intel_plane_state(crtc->primary->state)->visible = false;
6256 	}
6257 
6258 	dev_priv->display.crtc_disable(crtc);
6259 
6260 	DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
6261 		      crtc->base.id);
6262 
6263 	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6264 	crtc->state->active = false;
6265 	intel_crtc->active = false;
6266 	crtc->enabled = false;
6267 	crtc->state->connector_mask = 0;
6268 	crtc->state->encoder_mask = 0;
6269 
6270 	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6271 		encoder->base.crtc = NULL;
6272 
6273 	intel_fbc_disable(intel_crtc);
6274 	intel_update_watermarks(crtc);
6275 	intel_disable_shared_dpll(intel_crtc);
6276 
6277 	domains = intel_crtc->enabled_power_domains;
6278 	for_each_power_domain(domain, domains)
6279 		intel_display_power_put(dev_priv, domain);
6280 	intel_crtc->enabled_power_domains = 0;
6281 
6282 	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6283 	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6284 }
6285 
6286 /*
6287  * turn all crtc's off, but do not adjust state
6288  * This has to be paired with a call to intel_modeset_setup_hw_state.
6289  */
6290 int intel_display_suspend(struct drm_device *dev)
6291 {
6292 	struct drm_i915_private *dev_priv = to_i915(dev);
6293 	struct drm_atomic_state *state;
6294 	int ret;
6295 
6296 	state = drm_atomic_helper_suspend(dev);
6297 	ret = PTR_ERR_OR_ZERO(state);
6298 	if (ret)
6299 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6300 	else
6301 		dev_priv->modeset_restore_state = state;
6302 	return ret;
6303 }
6304 
6305 void intel_encoder_destroy(struct drm_encoder *encoder)
6306 {
6307 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6308 
6309 	drm_encoder_cleanup(encoder);
6310 	kfree(intel_encoder);
6311 }
6312 
6313 /* Cross check the actual hw state with our own modeset state tracking (and it's
6314  * internal consistency). */
6315 static void intel_connector_verify_state(struct intel_connector *connector)
6316 {
6317 	struct drm_crtc *crtc = connector->base.state->crtc;
6318 
6319 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6320 		      connector->base.base.id,
6321 		      connector->base.name);
6322 
6323 	if (connector->get_hw_state(connector)) {
6324 		struct intel_encoder *encoder = connector->encoder;
6325 		struct drm_connector_state *conn_state = connector->base.state;
6326 
6327 		I915_STATE_WARN(!crtc,
6328 			 "connector enabled without attached crtc\n");
6329 
6330 		if (!crtc)
6331 			return;
6332 
6333 		I915_STATE_WARN(!crtc->state->active,
6334 		      "connector is active, but attached crtc isn't\n");
6335 
6336 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6337 			return;
6338 
6339 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6340 			"atomic encoder doesn't match attached encoder\n");
6341 
6342 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6343 			"attached encoder crtc differs from connector crtc\n");
6344 	} else {
6345 		I915_STATE_WARN(crtc && crtc->state->active,
6346 			"attached crtc is active, but connector isn't\n");
6347 		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6348 			"best encoder set without crtc!\n");
6349 	}
6350 }
6351 
6352 int intel_connector_init(struct intel_connector *connector)
6353 {
6354 	drm_atomic_helper_connector_reset(&connector->base);
6355 
6356 	if (!connector->base.state)
6357 		return -ENOMEM;
6358 
6359 	return 0;
6360 }
6361 
6362 struct intel_connector *intel_connector_alloc(void)
6363 {
6364 	struct intel_connector *connector;
6365 
6366 	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6367 	if (!connector)
6368 		return NULL;
6369 
6370 	if (intel_connector_init(connector) < 0) {
6371 		kfree(connector);
6372 		return NULL;
6373 	}
6374 
6375 	return connector;
6376 }
6377 
6378 /* Simple connector->get_hw_state implementation for encoders that support only
6379  * one connector and no cloning and hence the encoder state determines the state
6380  * of the connector. */
6381 bool intel_connector_get_hw_state(struct intel_connector *connector)
6382 {
6383 	enum i915_pipe pipe = 0;
6384 	struct intel_encoder *encoder = connector->encoder;
6385 
6386 	return encoder->get_hw_state(encoder, &pipe);
6387 }
6388 
6389 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6390 {
6391 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6392 		return crtc_state->fdi_lanes;
6393 
6394 	return 0;
6395 }
6396 
6397 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe,
6398 				     struct intel_crtc_state *pipe_config)
6399 {
6400 	struct drm_atomic_state *state = pipe_config->base.state;
6401 	struct intel_crtc *other_crtc;
6402 	struct intel_crtc_state *other_crtc_state;
6403 
6404 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6405 		      pipe_name(pipe), pipe_config->fdi_lanes);
6406 	if (pipe_config->fdi_lanes > 4) {
6407 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6408 			      pipe_name(pipe), pipe_config->fdi_lanes);
6409 		return -EINVAL;
6410 	}
6411 
6412 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6413 		if (pipe_config->fdi_lanes > 2) {
6414 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6415 				      pipe_config->fdi_lanes);
6416 			return -EINVAL;
6417 		} else {
6418 			return 0;
6419 		}
6420 	}
6421 
6422 	if (INTEL_INFO(dev)->num_pipes == 2)
6423 		return 0;
6424 
6425 	/* Ivybridge 3 pipe is really complicated */
6426 	switch (pipe) {
6427 	case PIPE_A:
6428 		return 0;
6429 	case PIPE_B:
6430 		if (pipe_config->fdi_lanes <= 2)
6431 			return 0;
6432 
6433 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6434 		other_crtc_state =
6435 			intel_atomic_get_crtc_state(state, other_crtc);
6436 		if (IS_ERR(other_crtc_state))
6437 			return PTR_ERR(other_crtc_state);
6438 
6439 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6440 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6441 				      pipe_name(pipe), pipe_config->fdi_lanes);
6442 			return -EINVAL;
6443 		}
6444 		return 0;
6445 	case PIPE_C:
6446 		if (pipe_config->fdi_lanes > 2) {
6447 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6448 				      pipe_name(pipe), pipe_config->fdi_lanes);
6449 			return -EINVAL;
6450 		}
6451 
6452 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6453 		other_crtc_state =
6454 			intel_atomic_get_crtc_state(state, other_crtc);
6455 		if (IS_ERR(other_crtc_state))
6456 			return PTR_ERR(other_crtc_state);
6457 
6458 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6459 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6460 			return -EINVAL;
6461 		}
6462 		return 0;
6463 	default:
6464 		BUG();
6465 	}
6466 }
6467 
6468 #define RETRY 1
6469 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6470 				       struct intel_crtc_state *pipe_config)
6471 {
6472 	struct drm_device *dev = intel_crtc->base.dev;
6473 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6474 	int lane, link_bw, fdi_dotclock, ret;
6475 	bool needs_recompute = false;
6476 
6477 retry:
6478 	/* FDI is a binary signal running at ~2.7GHz, encoding
6479 	 * each output octet as 10 bits. The actual frequency
6480 	 * is stored as a divider into a 100MHz clock, and the
6481 	 * mode pixel clock is stored in units of 1KHz.
6482 	 * Hence the bw of each lane in terms of the mode signal
6483 	 * is:
6484 	 */
6485 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6486 
6487 	fdi_dotclock = adjusted_mode->crtc_clock;
6488 
6489 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6490 					   pipe_config->pipe_bpp);
6491 
6492 	pipe_config->fdi_lanes = lane;
6493 
6494 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6495 			       link_bw, &pipe_config->fdi_m_n);
6496 
6497 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6498 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6499 		pipe_config->pipe_bpp -= 2*3;
6500 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6501 			      pipe_config->pipe_bpp);
6502 		needs_recompute = true;
6503 		pipe_config->bw_constrained = true;
6504 
6505 		goto retry;
6506 	}
6507 
6508 	if (needs_recompute)
6509 		return RETRY;
6510 
6511 	return ret;
6512 }
6513 
6514 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6515 				     struct intel_crtc_state *pipe_config)
6516 {
6517 	if (pipe_config->pipe_bpp > 24)
6518 		return false;
6519 
6520 	/* HSW can handle pixel rate up to cdclk? */
6521 	if (IS_HASWELL(dev_priv))
6522 		return true;
6523 
6524 	/*
6525 	 * We compare against max which means we must take
6526 	 * the increased cdclk requirement into account when
6527 	 * calculating the new cdclk.
6528 	 *
6529 	 * Should measure whether using a lower cdclk w/o IPS
6530 	 */
6531 	return ilk_pipe_pixel_rate(pipe_config) <=
6532 		dev_priv->max_cdclk_freq * 95 / 100;
6533 }
6534 
6535 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6536 				   struct intel_crtc_state *pipe_config)
6537 {
6538 	struct drm_device *dev = crtc->base.dev;
6539 	struct drm_i915_private *dev_priv = dev->dev_private;
6540 
6541 	pipe_config->ips_enabled = i915.enable_ips &&
6542 		hsw_crtc_supports_ips(crtc) &&
6543 		pipe_config_supports_ips(dev_priv, pipe_config);
6544 }
6545 
6546 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6547 {
6548 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6549 
6550 	/* GDG double wide on either pipe, otherwise pipe A only */
6551 	return INTEL_INFO(dev_priv)->gen < 4 &&
6552 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6553 }
6554 
6555 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6556 				     struct intel_crtc_state *pipe_config)
6557 {
6558 	struct drm_device *dev = crtc->base.dev;
6559 	struct drm_i915_private *dev_priv = dev->dev_private;
6560 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6561 
6562 	/* FIXME should check pixel clock limits on all platforms */
6563 	if (INTEL_INFO(dev)->gen < 4) {
6564 		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6565 
6566 		/*
6567 		 * Enable double wide mode when the dot clock
6568 		 * is > 90% of the (display) core speed.
6569 		 */
6570 		if (intel_crtc_supports_double_wide(crtc) &&
6571 		    adjusted_mode->crtc_clock > clock_limit) {
6572 			clock_limit *= 2;
6573 			pipe_config->double_wide = true;
6574 		}
6575 
6576 		if (adjusted_mode->crtc_clock > clock_limit) {
6577 			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6578 				      adjusted_mode->crtc_clock, clock_limit,
6579 				      yesno(pipe_config->double_wide));
6580 			return -EINVAL;
6581 		}
6582 	}
6583 
6584 	/*
6585 	 * Pipe horizontal size must be even in:
6586 	 * - DVO ganged mode
6587 	 * - LVDS dual channel mode
6588 	 * - Double wide pipe
6589 	 */
6590 	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6591 	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6592 		pipe_config->pipe_src_w &= ~1;
6593 
6594 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6595 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6596 	 */
6597 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6598 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6599 		return -EINVAL;
6600 
6601 	if (HAS_IPS(dev))
6602 		hsw_compute_ips_config(crtc, pipe_config);
6603 
6604 	if (pipe_config->has_pch_encoder)
6605 		return ironlake_fdi_compute_config(crtc, pipe_config);
6606 
6607 	return 0;
6608 }
6609 
6610 static int skylake_get_display_clock_speed(struct drm_device *dev)
6611 {
6612 	struct drm_i915_private *dev_priv = to_i915(dev);
6613 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6614 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6615 	uint32_t linkrate;
6616 
6617 	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6618 		return 24000; /* 24MHz is the cd freq with NSSC ref */
6619 
6620 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6621 		return 540000;
6622 
6623 	linkrate = (I915_READ(DPLL_CTRL1) &
6624 		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6625 
6626 	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6627 	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6628 		/* vco 8640 */
6629 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6630 		case CDCLK_FREQ_450_432:
6631 			return 432000;
6632 		case CDCLK_FREQ_337_308:
6633 			return 308570;
6634 		case CDCLK_FREQ_675_617:
6635 			return 617140;
6636 		default:
6637 			WARN(1, "Unknown cd freq selection\n");
6638 		}
6639 	} else {
6640 		/* vco 8100 */
6641 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6642 		case CDCLK_FREQ_450_432:
6643 			return 450000;
6644 		case CDCLK_FREQ_337_308:
6645 			return 337500;
6646 		case CDCLK_FREQ_675_617:
6647 			return 675000;
6648 		default:
6649 			WARN(1, "Unknown cd freq selection\n");
6650 		}
6651 	}
6652 
6653 	/* error case, do as if DPLL0 isn't enabled */
6654 	return 24000;
6655 }
6656 
6657 static int broxton_get_display_clock_speed(struct drm_device *dev)
6658 {
6659 	struct drm_i915_private *dev_priv = to_i915(dev);
6660 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6661 	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6662 	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6663 	int cdclk;
6664 
6665 	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6666 		return 19200;
6667 
6668 	cdclk = 19200 * pll_ratio / 2;
6669 
6670 	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6671 	case BXT_CDCLK_CD2X_DIV_SEL_1:
6672 		return cdclk;  /* 576MHz or 624MHz */
6673 	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6674 		return cdclk * 2 / 3; /* 384MHz */
6675 	case BXT_CDCLK_CD2X_DIV_SEL_2:
6676 		return cdclk / 2; /* 288MHz */
6677 	case BXT_CDCLK_CD2X_DIV_SEL_4:
6678 		return cdclk / 4; /* 144MHz */
6679 	}
6680 
6681 	/* error case, do as if DE PLL isn't enabled */
6682 	return 19200;
6683 }
6684 
6685 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6686 {
6687 	struct drm_i915_private *dev_priv = dev->dev_private;
6688 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6689 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6690 
6691 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6692 		return 800000;
6693 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6694 		return 450000;
6695 	else if (freq == LCPLL_CLK_FREQ_450)
6696 		return 450000;
6697 	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6698 		return 540000;
6699 	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6700 		return 337500;
6701 	else
6702 		return 675000;
6703 }
6704 
6705 static int haswell_get_display_clock_speed(struct drm_device *dev)
6706 {
6707 	struct drm_i915_private *dev_priv = dev->dev_private;
6708 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6709 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6710 
6711 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6712 		return 800000;
6713 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6714 		return 450000;
6715 	else if (freq == LCPLL_CLK_FREQ_450)
6716 		return 450000;
6717 	else if (IS_HSW_ULT(dev))
6718 		return 337500;
6719 	else
6720 		return 540000;
6721 }
6722 
6723 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6724 {
6725 	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6726 				      CCK_DISPLAY_CLOCK_CONTROL);
6727 }
6728 
6729 static int ilk_get_display_clock_speed(struct drm_device *dev)
6730 {
6731 	return 450000;
6732 }
6733 
6734 static int i945_get_display_clock_speed(struct drm_device *dev)
6735 {
6736 	return 400000;
6737 }
6738 
6739 static int i915_get_display_clock_speed(struct drm_device *dev)
6740 {
6741 	return 333333;
6742 }
6743 
6744 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6745 {
6746 	return 200000;
6747 }
6748 
6749 static int pnv_get_display_clock_speed(struct drm_device *dev)
6750 {
6751 	u16 gcfgc = 0;
6752 
6753 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6754 
6755 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6756 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6757 		return 266667;
6758 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6759 		return 333333;
6760 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6761 		return 444444;
6762 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6763 		return 200000;
6764 	default:
6765 		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6766 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6767 		return 133333;
6768 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6769 		return 166667;
6770 	}
6771 }
6772 
6773 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6774 {
6775 	u16 gcfgc = 0;
6776 
6777 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6778 
6779 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6780 		return 133333;
6781 	else {
6782 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6783 		case GC_DISPLAY_CLOCK_333_MHZ:
6784 			return 333333;
6785 		default:
6786 		case GC_DISPLAY_CLOCK_190_200_MHZ:
6787 			return 190000;
6788 		}
6789 	}
6790 }
6791 
6792 static int i865_get_display_clock_speed(struct drm_device *dev)
6793 {
6794 	return 266667;
6795 }
6796 
6797 static int i85x_get_display_clock_speed(struct drm_device *dev)
6798 {
6799 	u16 hpllcc = 0;
6800 
6801 	/*
6802 	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6803 	 * encoding is different :(
6804 	 * FIXME is this the right way to detect 852GM/852GMV?
6805 	 */
6806 	if (dev->pdev->revision == 0x1)
6807 		return 133333;
6808 
6809 #if 0
6810 	pci_bus_read_config_word(dev->pdev->bus,
6811 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6812 #endif
6813 
6814 	/* Assume that the hardware is in the high speed state.  This
6815 	 * should be the default.
6816 	 */
6817 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6818 	case GC_CLOCK_133_200:
6819 	case GC_CLOCK_133_200_2:
6820 	case GC_CLOCK_100_200:
6821 		return 200000;
6822 	case GC_CLOCK_166_250:
6823 		return 250000;
6824 	case GC_CLOCK_100_133:
6825 		return 133333;
6826 	case GC_CLOCK_133_266:
6827 	case GC_CLOCK_133_266_2:
6828 	case GC_CLOCK_166_266:
6829 		return 266667;
6830 	}
6831 
6832 	/* Shouldn't happen */
6833 	return 0;
6834 }
6835 
6836 static int i830_get_display_clock_speed(struct drm_device *dev)
6837 {
6838 	return 133333;
6839 }
6840 
6841 static unsigned int intel_hpll_vco(struct drm_device *dev)
6842 {
6843 	struct drm_i915_private *dev_priv = dev->dev_private;
6844 	static const unsigned int blb_vco[8] = {
6845 		[0] = 3200000,
6846 		[1] = 4000000,
6847 		[2] = 5333333,
6848 		[3] = 4800000,
6849 		[4] = 6400000,
6850 	};
6851 	static const unsigned int pnv_vco[8] = {
6852 		[0] = 3200000,
6853 		[1] = 4000000,
6854 		[2] = 5333333,
6855 		[3] = 4800000,
6856 		[4] = 2666667,
6857 	};
6858 	static const unsigned int cl_vco[8] = {
6859 		[0] = 3200000,
6860 		[1] = 4000000,
6861 		[2] = 5333333,
6862 		[3] = 6400000,
6863 		[4] = 3333333,
6864 		[5] = 3566667,
6865 		[6] = 4266667,
6866 	};
6867 	static const unsigned int elk_vco[8] = {
6868 		[0] = 3200000,
6869 		[1] = 4000000,
6870 		[2] = 5333333,
6871 		[3] = 4800000,
6872 	};
6873 	static const unsigned int ctg_vco[8] = {
6874 		[0] = 3200000,
6875 		[1] = 4000000,
6876 		[2] = 5333333,
6877 		[3] = 6400000,
6878 		[4] = 2666667,
6879 		[5] = 4266667,
6880 	};
6881 	const unsigned int *vco_table;
6882 	unsigned int vco;
6883 	uint8_t tmp = 0;
6884 
6885 	/* FIXME other chipsets? */
6886 	if (IS_GM45(dev))
6887 		vco_table = ctg_vco;
6888 	else if (IS_G4X(dev))
6889 		vco_table = elk_vco;
6890 	else if (IS_CRESTLINE(dev))
6891 		vco_table = cl_vco;
6892 	else if (IS_PINEVIEW(dev))
6893 		vco_table = pnv_vco;
6894 	else if (IS_G33(dev))
6895 		vco_table = blb_vco;
6896 	else
6897 		return 0;
6898 
6899 	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6900 
6901 	vco = vco_table[tmp & 0x7];
6902 	if (vco == 0)
6903 		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6904 	else
6905 		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6906 
6907 	return vco;
6908 }
6909 
6910 static int gm45_get_display_clock_speed(struct drm_device *dev)
6911 {
6912 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6913 	uint16_t tmp = 0;
6914 
6915 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6916 
6917 	cdclk_sel = (tmp >> 12) & 0x1;
6918 
6919 	switch (vco) {
6920 	case 2666667:
6921 	case 4000000:
6922 	case 5333333:
6923 		return cdclk_sel ? 333333 : 222222;
6924 	case 3200000:
6925 		return cdclk_sel ? 320000 : 228571;
6926 	default:
6927 		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
6928 		return 222222;
6929 	}
6930 }
6931 
6932 static int i965gm_get_display_clock_speed(struct drm_device *dev)
6933 {
6934 	static const uint8_t div_3200[] = { 16, 10,  8 };
6935 	static const uint8_t div_4000[] = { 20, 12, 10 };
6936 	static const uint8_t div_5333[] = { 24, 16, 14 };
6937 	const uint8_t *div_table;
6938 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6939 	uint16_t tmp = 0;
6940 
6941 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6942 
6943 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
6944 
6945 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
6946 		goto fail;
6947 
6948 	switch (vco) {
6949 	case 3200000:
6950 		div_table = div_3200;
6951 		break;
6952 	case 4000000:
6953 		div_table = div_4000;
6954 		break;
6955 	case 5333333:
6956 		div_table = div_5333;
6957 		break;
6958 	default:
6959 		goto fail;
6960 	}
6961 
6962 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6963 
6964 fail:
6965 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
6966 	return 200000;
6967 }
6968 
6969 static int g33_get_display_clock_speed(struct drm_device *dev)
6970 {
6971 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
6972 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
6973 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
6974 	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
6975 	const uint8_t *div_table;
6976 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6977 	uint16_t tmp = 0;
6978 
6979 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6980 
6981 	cdclk_sel = (tmp >> 4) & 0x7;
6982 
6983 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
6984 		goto fail;
6985 
6986 	switch (vco) {
6987 	case 3200000:
6988 		div_table = div_3200;
6989 		break;
6990 	case 4000000:
6991 		div_table = div_4000;
6992 		break;
6993 	case 4800000:
6994 		div_table = div_4800;
6995 		break;
6996 	case 5333333:
6997 		div_table = div_5333;
6998 		break;
6999 	default:
7000 		goto fail;
7001 	}
7002 
7003 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7004 
7005 fail:
7006 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7007 	return 190476;
7008 }
7009 
7010 static void
7011 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7012 {
7013 	while (*num > DATA_LINK_M_N_MASK ||
7014 	       *den > DATA_LINK_M_N_MASK) {
7015 		*num >>= 1;
7016 		*den >>= 1;
7017 	}
7018 }
7019 
7020 static void compute_m_n(unsigned int m, unsigned int n,
7021 			uint32_t *ret_m, uint32_t *ret_n)
7022 {
7023 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7024 	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7025 	intel_reduce_m_n_ratio(ret_m, ret_n);
7026 }
7027 
7028 void
7029 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7030 		       int pixel_clock, int link_clock,
7031 		       struct intel_link_m_n *m_n)
7032 {
7033 	m_n->tu = 64;
7034 
7035 	compute_m_n(bits_per_pixel * pixel_clock,
7036 		    link_clock * nlanes * 8,
7037 		    &m_n->gmch_m, &m_n->gmch_n);
7038 
7039 	compute_m_n(pixel_clock, link_clock,
7040 		    &m_n->link_m, &m_n->link_n);
7041 }
7042 
7043 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7044 {
7045 	if (i915.panel_use_ssc >= 0)
7046 		return i915.panel_use_ssc != 0;
7047 	return dev_priv->vbt.lvds_use_ssc
7048 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7049 }
7050 
7051 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7052 {
7053 	return (1 << dpll->n) << 16 | dpll->m2;
7054 }
7055 
7056 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7057 {
7058 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7059 }
7060 
7061 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7062 				     struct intel_crtc_state *crtc_state,
7063 				     intel_clock_t *reduced_clock)
7064 {
7065 	struct drm_device *dev = crtc->base.dev;
7066 	u32 fp, fp2 = 0;
7067 
7068 	if (IS_PINEVIEW(dev)) {
7069 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7070 		if (reduced_clock)
7071 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7072 	} else {
7073 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7074 		if (reduced_clock)
7075 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7076 	}
7077 
7078 	crtc_state->dpll_hw_state.fp0 = fp;
7079 
7080 	crtc->lowfreq_avail = false;
7081 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7082 	    reduced_clock) {
7083 		crtc_state->dpll_hw_state.fp1 = fp2;
7084 		crtc->lowfreq_avail = true;
7085 	} else {
7086 		crtc_state->dpll_hw_state.fp1 = fp;
7087 	}
7088 }
7089 
7090 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe
7091 		pipe)
7092 {
7093 	u32 reg_val;
7094 
7095 	/*
7096 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7097 	 * and set it to a reasonable value instead.
7098 	 */
7099 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7100 	reg_val &= 0xffffff00;
7101 	reg_val |= 0x00000030;
7102 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7103 
7104 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7105 	reg_val &= 0x8cffffff;
7106 	reg_val = 0x8c000000;
7107 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7108 
7109 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7110 	reg_val &= 0xffffff00;
7111 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7112 
7113 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7114 	reg_val &= 0x00ffffff;
7115 	reg_val |= 0xb0000000;
7116 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7117 }
7118 
7119 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7120 					 struct intel_link_m_n *m_n)
7121 {
7122 	struct drm_device *dev = crtc->base.dev;
7123 	struct drm_i915_private *dev_priv = dev->dev_private;
7124 	int pipe = crtc->pipe;
7125 
7126 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7127 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7128 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7129 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7130 }
7131 
7132 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7133 					 struct intel_link_m_n *m_n,
7134 					 struct intel_link_m_n *m2_n2)
7135 {
7136 	struct drm_device *dev = crtc->base.dev;
7137 	struct drm_i915_private *dev_priv = dev->dev_private;
7138 	int pipe = crtc->pipe;
7139 	enum transcoder transcoder = crtc->config->cpu_transcoder;
7140 
7141 	if (INTEL_INFO(dev)->gen >= 5) {
7142 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7143 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7144 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7145 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7146 		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7147 		 * for gen < 8) and if DRRS is supported (to make sure the
7148 		 * registers are not unnecessarily accessed).
7149 		 */
7150 		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7151 			crtc->config->has_drrs) {
7152 			I915_WRITE(PIPE_DATA_M2(transcoder),
7153 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7154 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7155 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7156 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7157 		}
7158 	} else {
7159 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7160 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7161 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7162 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7163 	}
7164 }
7165 
7166 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7167 {
7168 	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7169 
7170 	if (m_n == M1_N1) {
7171 		dp_m_n = &crtc->config->dp_m_n;
7172 		dp_m2_n2 = &crtc->config->dp_m2_n2;
7173 	} else if (m_n == M2_N2) {
7174 
7175 		/*
7176 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7177 		 * needs to be programmed into M1_N1.
7178 		 */
7179 		dp_m_n = &crtc->config->dp_m2_n2;
7180 	} else {
7181 		DRM_ERROR("Unsupported divider value\n");
7182 		return;
7183 	}
7184 
7185 	if (crtc->config->has_pch_encoder)
7186 		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7187 	else
7188 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7189 }
7190 
7191 static void vlv_compute_dpll(struct intel_crtc *crtc,
7192 			     struct intel_crtc_state *pipe_config)
7193 {
7194 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7195 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7196 	if (crtc->pipe != PIPE_A)
7197 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7198 
7199 	/* DPLL not used with DSI, but still need the rest set up */
7200 	if (!pipe_config->has_dsi_encoder)
7201 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7202 			DPLL_EXT_BUFFER_ENABLE_VLV;
7203 
7204 	pipe_config->dpll_hw_state.dpll_md =
7205 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7206 }
7207 
7208 static void chv_compute_dpll(struct intel_crtc *crtc,
7209 			     struct intel_crtc_state *pipe_config)
7210 {
7211 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7212 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7213 	if (crtc->pipe != PIPE_A)
7214 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7215 
7216 	/* DPLL not used with DSI, but still need the rest set up */
7217 	if (!pipe_config->has_dsi_encoder)
7218 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7219 
7220 	pipe_config->dpll_hw_state.dpll_md =
7221 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7222 }
7223 
7224 static void vlv_prepare_pll(struct intel_crtc *crtc,
7225 			    const struct intel_crtc_state *pipe_config)
7226 {
7227 	struct drm_device *dev = crtc->base.dev;
7228 	struct drm_i915_private *dev_priv = dev->dev_private;
7229 	enum i915_pipe pipe = crtc->pipe;
7230 	u32 mdiv;
7231 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7232 	u32 coreclk, reg_val;
7233 
7234 	/* Enable Refclk */
7235 	I915_WRITE(DPLL(pipe),
7236 		   pipe_config->dpll_hw_state.dpll &
7237 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7238 
7239 	/* No need to actually set up the DPLL with DSI */
7240 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7241 		return;
7242 
7243 	mutex_lock(&dev_priv->sb_lock);
7244 
7245 	bestn = pipe_config->dpll.n;
7246 	bestm1 = pipe_config->dpll.m1;
7247 	bestm2 = pipe_config->dpll.m2;
7248 	bestp1 = pipe_config->dpll.p1;
7249 	bestp2 = pipe_config->dpll.p2;
7250 
7251 	/* See eDP HDMI DPIO driver vbios notes doc */
7252 
7253 	/* PLL B needs special handling */
7254 	if (pipe == PIPE_B)
7255 		vlv_pllb_recal_opamp(dev_priv, pipe);
7256 
7257 	/* Set up Tx target for periodic Rcomp update */
7258 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7259 
7260 	/* Disable target IRef on PLL */
7261 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7262 	reg_val &= 0x00ffffff;
7263 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7264 
7265 	/* Disable fast lock */
7266 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7267 
7268 	/* Set idtafcrecal before PLL is enabled */
7269 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7270 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7271 	mdiv |= ((bestn << DPIO_N_SHIFT));
7272 	mdiv |= (1 << DPIO_K_SHIFT);
7273 
7274 	/*
7275 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7276 	 * but we don't support that).
7277 	 * Note: don't use the DAC post divider as it seems unstable.
7278 	 */
7279 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7280 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7281 
7282 	mdiv |= DPIO_ENABLE_CALIBRATION;
7283 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7284 
7285 	/* Set HBR and RBR LPF coefficients */
7286 	if (pipe_config->port_clock == 162000 ||
7287 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7288 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7289 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7290 				 0x009f0003);
7291 	else
7292 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7293 				 0x00d0000f);
7294 
7295 	if (pipe_config->has_dp_encoder) {
7296 		/* Use SSC source */
7297 		if (pipe == PIPE_A)
7298 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7299 					 0x0df40000);
7300 		else
7301 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7302 					 0x0df70000);
7303 	} else { /* HDMI or VGA */
7304 		/* Use bend source */
7305 		if (pipe == PIPE_A)
7306 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7307 					 0x0df70000);
7308 		else
7309 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7310 					 0x0df40000);
7311 	}
7312 
7313 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7314 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7315 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7316 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7317 		coreclk |= 0x01000000;
7318 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7319 
7320 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7321 	mutex_unlock(&dev_priv->sb_lock);
7322 }
7323 
7324 static void chv_prepare_pll(struct intel_crtc *crtc,
7325 			    const struct intel_crtc_state *pipe_config)
7326 {
7327 	struct drm_device *dev = crtc->base.dev;
7328 	struct drm_i915_private *dev_priv = dev->dev_private;
7329 	enum i915_pipe pipe = crtc->pipe;
7330 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7331 	u32 loopfilter, tribuf_calcntr;
7332 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7333 	u32 dpio_val;
7334 	int vco;
7335 
7336 	/* Enable Refclk and SSC */
7337 	I915_WRITE(DPLL(pipe),
7338 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7339 
7340 	/* No need to actually set up the DPLL with DSI */
7341 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7342 		return;
7343 
7344 	bestn = pipe_config->dpll.n;
7345 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7346 	bestm1 = pipe_config->dpll.m1;
7347 	bestm2 = pipe_config->dpll.m2 >> 22;
7348 	bestp1 = pipe_config->dpll.p1;
7349 	bestp2 = pipe_config->dpll.p2;
7350 	vco = pipe_config->dpll.vco;
7351 	dpio_val = 0;
7352 	loopfilter = 0;
7353 
7354 	mutex_lock(&dev_priv->sb_lock);
7355 
7356 	/* p1 and p2 divider */
7357 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7358 			5 << DPIO_CHV_S1_DIV_SHIFT |
7359 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7360 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7361 			1 << DPIO_CHV_K_DIV_SHIFT);
7362 
7363 	/* Feedback post-divider - m2 */
7364 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7365 
7366 	/* Feedback refclk divider - n and m1 */
7367 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7368 			DPIO_CHV_M1_DIV_BY_2 |
7369 			1 << DPIO_CHV_N_DIV_SHIFT);
7370 
7371 	/* M2 fraction division */
7372 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7373 
7374 	/* M2 fraction division enable */
7375 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7376 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7377 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7378 	if (bestm2_frac)
7379 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7380 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7381 
7382 	/* Program digital lock detect threshold */
7383 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7384 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7385 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7386 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7387 	if (!bestm2_frac)
7388 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7389 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7390 
7391 	/* Loop filter */
7392 	if (vco == 5400000) {
7393 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7394 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7395 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7396 		tribuf_calcntr = 0x9;
7397 	} else if (vco <= 6200000) {
7398 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7399 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7400 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7401 		tribuf_calcntr = 0x9;
7402 	} else if (vco <= 6480000) {
7403 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7404 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7405 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7406 		tribuf_calcntr = 0x8;
7407 	} else {
7408 		/* Not supported. Apply the same limits as in the max case */
7409 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7410 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7411 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7412 		tribuf_calcntr = 0;
7413 	}
7414 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7415 
7416 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7417 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7418 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7419 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7420 
7421 	/* AFC Recal */
7422 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7423 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7424 			DPIO_AFC_RECAL);
7425 
7426 	mutex_unlock(&dev_priv->sb_lock);
7427 }
7428 
7429 /**
7430  * vlv_force_pll_on - forcibly enable just the PLL
7431  * @dev_priv: i915 private structure
7432  * @pipe: pipe PLL to enable
7433  * @dpll: PLL configuration
7434  *
7435  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7436  * in cases where we need the PLL enabled even when @pipe is not going to
7437  * be enabled.
7438  */
7439 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe,
7440 		     const struct dpll *dpll)
7441 {
7442 	struct intel_crtc *crtc =
7443 		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7444 	struct intel_crtc_state *pipe_config;
7445 
7446 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7447 	if (!pipe_config)
7448 		return -ENOMEM;
7449 
7450 	pipe_config->base.crtc = &crtc->base;
7451 	pipe_config->pixel_multiplier = 1;
7452 	pipe_config->dpll = *dpll;
7453 
7454 	if (IS_CHERRYVIEW(dev)) {
7455 		chv_compute_dpll(crtc, pipe_config);
7456 		chv_prepare_pll(crtc, pipe_config);
7457 		chv_enable_pll(crtc, pipe_config);
7458 	} else {
7459 		vlv_compute_dpll(crtc, pipe_config);
7460 		vlv_prepare_pll(crtc, pipe_config);
7461 		vlv_enable_pll(crtc, pipe_config);
7462 	}
7463 
7464 	kfree(pipe_config);
7465 
7466 	return 0;
7467 }
7468 
7469 /**
7470  * vlv_force_pll_off - forcibly disable just the PLL
7471  * @dev_priv: i915 private structure
7472  * @pipe: pipe PLL to disable
7473  *
7474  * Disable the PLL for @pipe. To be used in cases where we need
7475  * the PLL enabled even when @pipe is not going to be enabled.
7476  */
7477 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe)
7478 {
7479 	if (IS_CHERRYVIEW(dev))
7480 		chv_disable_pll(to_i915(dev), pipe);
7481 	else
7482 		vlv_disable_pll(to_i915(dev), pipe);
7483 }
7484 
7485 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7486 			      struct intel_crtc_state *crtc_state,
7487 			      intel_clock_t *reduced_clock)
7488 {
7489 	struct drm_device *dev = crtc->base.dev;
7490 	struct drm_i915_private *dev_priv = dev->dev_private;
7491 	u32 dpll;
7492 	bool is_sdvo;
7493 	struct dpll *clock = &crtc_state->dpll;
7494 
7495 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7496 
7497 	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7498 		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7499 
7500 	dpll = DPLL_VGA_MODE_DIS;
7501 
7502 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7503 		dpll |= DPLLB_MODE_LVDS;
7504 	else
7505 		dpll |= DPLLB_MODE_DAC_SERIAL;
7506 
7507 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7508 		dpll |= (crtc_state->pixel_multiplier - 1)
7509 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7510 	}
7511 
7512 	if (is_sdvo)
7513 		dpll |= DPLL_SDVO_HIGH_SPEED;
7514 
7515 	if (crtc_state->has_dp_encoder)
7516 		dpll |= DPLL_SDVO_HIGH_SPEED;
7517 
7518 	/* compute bitmask from p1 value */
7519 	if (IS_PINEVIEW(dev))
7520 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7521 	else {
7522 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7523 		if (IS_G4X(dev) && reduced_clock)
7524 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7525 	}
7526 	switch (clock->p2) {
7527 	case 5:
7528 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7529 		break;
7530 	case 7:
7531 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7532 		break;
7533 	case 10:
7534 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7535 		break;
7536 	case 14:
7537 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7538 		break;
7539 	}
7540 	if (INTEL_INFO(dev)->gen >= 4)
7541 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7542 
7543 	if (crtc_state->sdvo_tv_clock)
7544 		dpll |= PLL_REF_INPUT_TVCLKINBC;
7545 	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7546 		 intel_panel_use_ssc(dev_priv))
7547 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7548 	else
7549 		dpll |= PLL_REF_INPUT_DREFCLK;
7550 
7551 	dpll |= DPLL_VCO_ENABLE;
7552 	crtc_state->dpll_hw_state.dpll = dpll;
7553 
7554 	if (INTEL_INFO(dev)->gen >= 4) {
7555 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7556 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7557 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7558 	}
7559 }
7560 
7561 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7562 			      struct intel_crtc_state *crtc_state,
7563 			      intel_clock_t *reduced_clock)
7564 {
7565 	struct drm_device *dev = crtc->base.dev;
7566 	struct drm_i915_private *dev_priv = dev->dev_private;
7567 	u32 dpll;
7568 	struct dpll *clock = &crtc_state->dpll;
7569 
7570 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7571 
7572 	dpll = DPLL_VGA_MODE_DIS;
7573 
7574 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7575 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7576 	} else {
7577 		if (clock->p1 == 2)
7578 			dpll |= PLL_P1_DIVIDE_BY_TWO;
7579 		else
7580 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7581 		if (clock->p2 == 4)
7582 			dpll |= PLL_P2_DIVIDE_BY_4;
7583 	}
7584 
7585 	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7586 		dpll |= DPLL_DVO_2X_MODE;
7587 
7588 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7589 	    intel_panel_use_ssc(dev_priv))
7590 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7591 	else
7592 		dpll |= PLL_REF_INPUT_DREFCLK;
7593 
7594 	dpll |= DPLL_VCO_ENABLE;
7595 	crtc_state->dpll_hw_state.dpll = dpll;
7596 }
7597 
7598 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7599 {
7600 	struct drm_device *dev = intel_crtc->base.dev;
7601 	struct drm_i915_private *dev_priv = dev->dev_private;
7602 	enum i915_pipe pipe = intel_crtc->pipe;
7603 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7604 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7605 	uint32_t crtc_vtotal, crtc_vblank_end;
7606 	int vsyncshift = 0;
7607 
7608 	/* We need to be careful not to changed the adjusted mode, for otherwise
7609 	 * the hw state checker will get angry at the mismatch. */
7610 	crtc_vtotal = adjusted_mode->crtc_vtotal;
7611 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7612 
7613 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7614 		/* the chip adds 2 halflines automatically */
7615 		crtc_vtotal -= 1;
7616 		crtc_vblank_end -= 1;
7617 
7618 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7619 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7620 		else
7621 			vsyncshift = adjusted_mode->crtc_hsync_start -
7622 				adjusted_mode->crtc_htotal / 2;
7623 		if (vsyncshift < 0)
7624 			vsyncshift += adjusted_mode->crtc_htotal;
7625 	}
7626 
7627 	if (INTEL_INFO(dev)->gen > 3)
7628 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7629 
7630 	I915_WRITE(HTOTAL(cpu_transcoder),
7631 		   (adjusted_mode->crtc_hdisplay - 1) |
7632 		   ((adjusted_mode->crtc_htotal - 1) << 16));
7633 	I915_WRITE(HBLANK(cpu_transcoder),
7634 		   (adjusted_mode->crtc_hblank_start - 1) |
7635 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7636 	I915_WRITE(HSYNC(cpu_transcoder),
7637 		   (adjusted_mode->crtc_hsync_start - 1) |
7638 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7639 
7640 	I915_WRITE(VTOTAL(cpu_transcoder),
7641 		   (adjusted_mode->crtc_vdisplay - 1) |
7642 		   ((crtc_vtotal - 1) << 16));
7643 	I915_WRITE(VBLANK(cpu_transcoder),
7644 		   (adjusted_mode->crtc_vblank_start - 1) |
7645 		   ((crtc_vblank_end - 1) << 16));
7646 	I915_WRITE(VSYNC(cpu_transcoder),
7647 		   (adjusted_mode->crtc_vsync_start - 1) |
7648 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7649 
7650 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7651 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7652 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7653 	 * bits. */
7654 	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7655 	    (pipe == PIPE_B || pipe == PIPE_C))
7656 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7657 
7658 }
7659 
7660 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7661 {
7662 	struct drm_device *dev = intel_crtc->base.dev;
7663 	struct drm_i915_private *dev_priv = dev->dev_private;
7664 	enum i915_pipe pipe = intel_crtc->pipe;
7665 
7666 	/* pipesrc controls the size that is scaled from, which should
7667 	 * always be the user's requested size.
7668 	 */
7669 	I915_WRITE(PIPESRC(pipe),
7670 		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7671 		   (intel_crtc->config->pipe_src_h - 1));
7672 }
7673 
7674 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7675 				   struct intel_crtc_state *pipe_config)
7676 {
7677 	struct drm_device *dev = crtc->base.dev;
7678 	struct drm_i915_private *dev_priv = dev->dev_private;
7679 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7680 	uint32_t tmp;
7681 
7682 	tmp = I915_READ(HTOTAL(cpu_transcoder));
7683 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7684 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7685 	tmp = I915_READ(HBLANK(cpu_transcoder));
7686 	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7687 	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7688 	tmp = I915_READ(HSYNC(cpu_transcoder));
7689 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7690 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7691 
7692 	tmp = I915_READ(VTOTAL(cpu_transcoder));
7693 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7694 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7695 	tmp = I915_READ(VBLANK(cpu_transcoder));
7696 	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7697 	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7698 	tmp = I915_READ(VSYNC(cpu_transcoder));
7699 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7700 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7701 
7702 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7703 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7704 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7705 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7706 	}
7707 }
7708 
7709 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7710 				    struct intel_crtc_state *pipe_config)
7711 {
7712 	struct drm_device *dev = crtc->base.dev;
7713 	struct drm_i915_private *dev_priv = dev->dev_private;
7714 	u32 tmp;
7715 
7716 	tmp = I915_READ(PIPESRC(crtc->pipe));
7717 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7718 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7719 
7720 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7721 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7722 }
7723 
7724 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7725 				 struct intel_crtc_state *pipe_config)
7726 {
7727 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7728 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7729 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7730 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7731 
7732 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7733 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7734 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7735 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7736 
7737 	mode->flags = pipe_config->base.adjusted_mode.flags;
7738 	mode->type = DRM_MODE_TYPE_DRIVER;
7739 
7740 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7741 	mode->flags |= pipe_config->base.adjusted_mode.flags;
7742 
7743 	mode->hsync = drm_mode_hsync(mode);
7744 	mode->vrefresh = drm_mode_vrefresh(mode);
7745 	drm_mode_set_name(mode);
7746 }
7747 
7748 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7749 {
7750 	struct drm_device *dev = intel_crtc->base.dev;
7751 	struct drm_i915_private *dev_priv = dev->dev_private;
7752 	uint32_t pipeconf;
7753 
7754 	pipeconf = 0;
7755 
7756 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7757 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7758 		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7759 
7760 	if (intel_crtc->config->double_wide)
7761 		pipeconf |= PIPECONF_DOUBLE_WIDE;
7762 
7763 	/* only g4x and later have fancy bpc/dither controls */
7764 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7765 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7766 		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7767 			pipeconf |= PIPECONF_DITHER_EN |
7768 				    PIPECONF_DITHER_TYPE_SP;
7769 
7770 		switch (intel_crtc->config->pipe_bpp) {
7771 		case 18:
7772 			pipeconf |= PIPECONF_6BPC;
7773 			break;
7774 		case 24:
7775 			pipeconf |= PIPECONF_8BPC;
7776 			break;
7777 		case 30:
7778 			pipeconf |= PIPECONF_10BPC;
7779 			break;
7780 		default:
7781 			/* Case prevented by intel_choose_pipe_bpp_dither. */
7782 			BUG();
7783 		}
7784 	}
7785 
7786 	if (HAS_PIPE_CXSR(dev)) {
7787 		if (intel_crtc->lowfreq_avail) {
7788 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7789 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7790 		} else {
7791 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7792 		}
7793 	}
7794 
7795 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7796 		if (INTEL_INFO(dev)->gen < 4 ||
7797 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7798 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7799 		else
7800 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7801 	} else
7802 		pipeconf |= PIPECONF_PROGRESSIVE;
7803 
7804 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7805 	     intel_crtc->config->limited_color_range)
7806 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7807 
7808 	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7809 	POSTING_READ(PIPECONF(intel_crtc->pipe));
7810 }
7811 
7812 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7813 				   struct intel_crtc_state *crtc_state)
7814 {
7815 	struct drm_device *dev = crtc->base.dev;
7816 	struct drm_i915_private *dev_priv = dev->dev_private;
7817 	const intel_limit_t *limit;
7818 	int refclk = 48000;
7819 
7820 	memset(&crtc_state->dpll_hw_state, 0,
7821 	       sizeof(crtc_state->dpll_hw_state));
7822 
7823 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7824 		if (intel_panel_use_ssc(dev_priv)) {
7825 			refclk = dev_priv->vbt.lvds_ssc_freq;
7826 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7827 		}
7828 
7829 		limit = &intel_limits_i8xx_lvds;
7830 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7831 		limit = &intel_limits_i8xx_dvo;
7832 	} else {
7833 		limit = &intel_limits_i8xx_dac;
7834 	}
7835 
7836 	if (!crtc_state->clock_set &&
7837 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7838 				 refclk, NULL, &crtc_state->dpll)) {
7839 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7840 		return -EINVAL;
7841 	}
7842 
7843 	i8xx_compute_dpll(crtc, crtc_state, NULL);
7844 
7845 	return 0;
7846 }
7847 
7848 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7849 				  struct intel_crtc_state *crtc_state)
7850 {
7851 	struct drm_device *dev = crtc->base.dev;
7852 	struct drm_i915_private *dev_priv = dev->dev_private;
7853 	const intel_limit_t *limit;
7854 	int refclk = 96000;
7855 
7856 	memset(&crtc_state->dpll_hw_state, 0,
7857 	       sizeof(crtc_state->dpll_hw_state));
7858 
7859 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7860 		if (intel_panel_use_ssc(dev_priv)) {
7861 			refclk = dev_priv->vbt.lvds_ssc_freq;
7862 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7863 		}
7864 
7865 		if (intel_is_dual_link_lvds(dev))
7866 			limit = &intel_limits_g4x_dual_channel_lvds;
7867 		else
7868 			limit = &intel_limits_g4x_single_channel_lvds;
7869 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7870 		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7871 		limit = &intel_limits_g4x_hdmi;
7872 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7873 		limit = &intel_limits_g4x_sdvo;
7874 	} else {
7875 		/* The option is for other outputs */
7876 		limit = &intel_limits_i9xx_sdvo;
7877 	}
7878 
7879 	if (!crtc_state->clock_set &&
7880 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7881 				refclk, NULL, &crtc_state->dpll)) {
7882 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7883 		return -EINVAL;
7884 	}
7885 
7886 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7887 
7888 	return 0;
7889 }
7890 
7891 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7892 				  struct intel_crtc_state *crtc_state)
7893 {
7894 	struct drm_device *dev = crtc->base.dev;
7895 	struct drm_i915_private *dev_priv = dev->dev_private;
7896 	const intel_limit_t *limit;
7897 	int refclk = 96000;
7898 
7899 	memset(&crtc_state->dpll_hw_state, 0,
7900 	       sizeof(crtc_state->dpll_hw_state));
7901 
7902 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7903 		if (intel_panel_use_ssc(dev_priv)) {
7904 			refclk = dev_priv->vbt.lvds_ssc_freq;
7905 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7906 		}
7907 
7908 		limit = &intel_limits_pineview_lvds;
7909 	} else {
7910 		limit = &intel_limits_pineview_sdvo;
7911 	}
7912 
7913 	if (!crtc_state->clock_set &&
7914 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7915 				refclk, NULL, &crtc_state->dpll)) {
7916 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7917 		return -EINVAL;
7918 	}
7919 
7920 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7921 
7922 	return 0;
7923 }
7924 
7925 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7926 				   struct intel_crtc_state *crtc_state)
7927 {
7928 	struct drm_device *dev = crtc->base.dev;
7929 	struct drm_i915_private *dev_priv = dev->dev_private;
7930 	const intel_limit_t *limit;
7931 	int refclk = 96000;
7932 
7933 	memset(&crtc_state->dpll_hw_state, 0,
7934 	       sizeof(crtc_state->dpll_hw_state));
7935 
7936 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7937 		if (intel_panel_use_ssc(dev_priv)) {
7938 			refclk = dev_priv->vbt.lvds_ssc_freq;
7939 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7940 		}
7941 
7942 		limit = &intel_limits_i9xx_lvds;
7943 	} else {
7944 		limit = &intel_limits_i9xx_sdvo;
7945 	}
7946 
7947 	if (!crtc_state->clock_set &&
7948 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7949 				 refclk, NULL, &crtc_state->dpll)) {
7950 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7951 		return -EINVAL;
7952 	}
7953 
7954 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7955 
7956 	return 0;
7957 }
7958 
7959 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7960 				  struct intel_crtc_state *crtc_state)
7961 {
7962 	int refclk = 100000;
7963 	const intel_limit_t *limit = &intel_limits_chv;
7964 
7965 	memset(&crtc_state->dpll_hw_state, 0,
7966 	       sizeof(crtc_state->dpll_hw_state));
7967 
7968 	if (!crtc_state->clock_set &&
7969 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7970 				refclk, NULL, &crtc_state->dpll)) {
7971 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7972 		return -EINVAL;
7973 	}
7974 
7975 	chv_compute_dpll(crtc, crtc_state);
7976 
7977 	return 0;
7978 }
7979 
7980 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7981 				  struct intel_crtc_state *crtc_state)
7982 {
7983 	int refclk = 100000;
7984 	const intel_limit_t *limit = &intel_limits_vlv;
7985 
7986 	memset(&crtc_state->dpll_hw_state, 0,
7987 	       sizeof(crtc_state->dpll_hw_state));
7988 
7989 	if (!crtc_state->clock_set &&
7990 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7991 				refclk, NULL, &crtc_state->dpll)) {
7992 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7993 		return -EINVAL;
7994 	}
7995 
7996 	vlv_compute_dpll(crtc, crtc_state);
7997 
7998 	return 0;
7999 }
8000 
8001 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8002 				 struct intel_crtc_state *pipe_config)
8003 {
8004 	struct drm_device *dev = crtc->base.dev;
8005 	struct drm_i915_private *dev_priv = dev->dev_private;
8006 	uint32_t tmp;
8007 
8008 	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8009 		return;
8010 
8011 	tmp = I915_READ(PFIT_CONTROL);
8012 	if (!(tmp & PFIT_ENABLE))
8013 		return;
8014 
8015 	/* Check whether the pfit is attached to our pipe. */
8016 	if (INTEL_INFO(dev)->gen < 4) {
8017 		if (crtc->pipe != PIPE_B)
8018 			return;
8019 	} else {
8020 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8021 			return;
8022 	}
8023 
8024 	pipe_config->gmch_pfit.control = tmp;
8025 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8026 }
8027 
8028 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8029 			       struct intel_crtc_state *pipe_config)
8030 {
8031 	struct drm_device *dev = crtc->base.dev;
8032 	struct drm_i915_private *dev_priv = dev->dev_private;
8033 	int pipe = pipe_config->cpu_transcoder;
8034 	intel_clock_t clock;
8035 	u32 mdiv;
8036 	int refclk = 100000;
8037 
8038 	/* In case of DSI, DPLL will not be used */
8039 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8040 		return;
8041 
8042 	mutex_lock(&dev_priv->sb_lock);
8043 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8044 	mutex_unlock(&dev_priv->sb_lock);
8045 
8046 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8047 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8048 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8049 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8050 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8051 
8052 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8053 }
8054 
8055 static void
8056 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8057 			      struct intel_initial_plane_config *plane_config)
8058 {
8059 	struct drm_device *dev = crtc->base.dev;
8060 	struct drm_i915_private *dev_priv = dev->dev_private;
8061 	u32 val, base, offset;
8062 	int pipe = crtc->pipe, plane = crtc->plane;
8063 	int fourcc, pixel_format;
8064 	unsigned int aligned_height;
8065 	struct drm_framebuffer *fb;
8066 	struct intel_framebuffer *intel_fb;
8067 
8068 	val = I915_READ(DSPCNTR(plane));
8069 	if (!(val & DISPLAY_PLANE_ENABLE))
8070 		return;
8071 
8072 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8073 	if (!intel_fb) {
8074 		DRM_DEBUG_KMS("failed to alloc fb\n");
8075 		return;
8076 	}
8077 
8078 	fb = &intel_fb->base;
8079 
8080 	if (INTEL_INFO(dev)->gen >= 4) {
8081 		if (val & DISPPLANE_TILED) {
8082 			plane_config->tiling = I915_TILING_X;
8083 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8084 		}
8085 	}
8086 
8087 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8088 	fourcc = i9xx_format_to_fourcc(pixel_format);
8089 	fb->pixel_format = fourcc;
8090 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8091 
8092 	if (INTEL_INFO(dev)->gen >= 4) {
8093 		if (plane_config->tiling)
8094 			offset = I915_READ(DSPTILEOFF(plane));
8095 		else
8096 			offset = I915_READ(DSPLINOFF(plane));
8097 		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8098 	} else {
8099 		base = I915_READ(DSPADDR(plane));
8100 	}
8101 	plane_config->base = base;
8102 
8103 	val = I915_READ(PIPESRC(pipe));
8104 	fb->width = ((val >> 16) & 0xfff) + 1;
8105 	fb->height = ((val >> 0) & 0xfff) + 1;
8106 
8107 	val = I915_READ(DSPSTRIDE(pipe));
8108 	fb->pitches[0] = val & 0xffffffc0;
8109 
8110 	aligned_height = intel_fb_align_height(dev, fb->height,
8111 					       fb->pixel_format,
8112 					       fb->modifier[0]);
8113 
8114 	plane_config->size = fb->pitches[0] * aligned_height;
8115 
8116 	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8117 		      pipe_name(pipe), plane, fb->width, fb->height,
8118 		      fb->bits_per_pixel, base, fb->pitches[0],
8119 		      plane_config->size);
8120 
8121 	plane_config->fb = intel_fb;
8122 }
8123 
8124 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8125 			       struct intel_crtc_state *pipe_config)
8126 {
8127 	struct drm_device *dev = crtc->base.dev;
8128 	struct drm_i915_private *dev_priv = dev->dev_private;
8129 	int pipe = pipe_config->cpu_transcoder;
8130 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8131 	intel_clock_t clock;
8132 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8133 	int refclk = 100000;
8134 
8135 	/* In case of DSI, DPLL will not be used */
8136 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8137 		return;
8138 
8139 	mutex_lock(&dev_priv->sb_lock);
8140 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8141 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8142 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8143 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8144 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8145 	mutex_unlock(&dev_priv->sb_lock);
8146 
8147 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8148 	clock.m2 = (pll_dw0 & 0xff) << 22;
8149 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8150 		clock.m2 |= pll_dw2 & 0x3fffff;
8151 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8152 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8153 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8154 
8155 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8156 }
8157 
8158 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8159 				 struct intel_crtc_state *pipe_config)
8160 {
8161 	struct drm_device *dev = crtc->base.dev;
8162 	struct drm_i915_private *dev_priv = dev->dev_private;
8163 	enum intel_display_power_domain power_domain;
8164 	uint32_t tmp;
8165 	bool ret;
8166 
8167 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8168 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8169 		return false;
8170 
8171 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8172 	pipe_config->shared_dpll = NULL;
8173 
8174 	ret = false;
8175 
8176 	tmp = I915_READ(PIPECONF(crtc->pipe));
8177 	if (!(tmp & PIPECONF_ENABLE))
8178 		goto out;
8179 
8180 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8181 		switch (tmp & PIPECONF_BPC_MASK) {
8182 		case PIPECONF_6BPC:
8183 			pipe_config->pipe_bpp = 18;
8184 			break;
8185 		case PIPECONF_8BPC:
8186 			pipe_config->pipe_bpp = 24;
8187 			break;
8188 		case PIPECONF_10BPC:
8189 			pipe_config->pipe_bpp = 30;
8190 			break;
8191 		default:
8192 			break;
8193 		}
8194 	}
8195 
8196 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8197 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8198 		pipe_config->limited_color_range = true;
8199 
8200 	if (INTEL_INFO(dev)->gen < 4)
8201 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8202 
8203 	intel_get_pipe_timings(crtc, pipe_config);
8204 	intel_get_pipe_src_size(crtc, pipe_config);
8205 
8206 	i9xx_get_pfit_config(crtc, pipe_config);
8207 
8208 	if (INTEL_INFO(dev)->gen >= 4) {
8209 		/* No way to read it out on pipes B and C */
8210 		if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8211 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
8212 		else
8213 			tmp = I915_READ(DPLL_MD(crtc->pipe));
8214 		pipe_config->pixel_multiplier =
8215 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8216 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8217 		pipe_config->dpll_hw_state.dpll_md = tmp;
8218 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8219 		tmp = I915_READ(DPLL(crtc->pipe));
8220 		pipe_config->pixel_multiplier =
8221 			((tmp & SDVO_MULTIPLIER_MASK)
8222 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8223 	} else {
8224 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8225 		 * port and will be fixed up in the encoder->get_config
8226 		 * function. */
8227 		pipe_config->pixel_multiplier = 1;
8228 	}
8229 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8230 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8231 		/*
8232 		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8233 		 * on 830. Filter it out here so that we don't
8234 		 * report errors due to that.
8235 		 */
8236 		if (IS_I830(dev))
8237 			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8238 
8239 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8240 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8241 	} else {
8242 		/* Mask out read-only status bits. */
8243 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8244 						     DPLL_PORTC_READY_MASK |
8245 						     DPLL_PORTB_READY_MASK);
8246 	}
8247 
8248 	if (IS_CHERRYVIEW(dev))
8249 		chv_crtc_clock_get(crtc, pipe_config);
8250 	else if (IS_VALLEYVIEW(dev))
8251 		vlv_crtc_clock_get(crtc, pipe_config);
8252 	else
8253 		i9xx_crtc_clock_get(crtc, pipe_config);
8254 
8255 	/*
8256 	 * Normally the dotclock is filled in by the encoder .get_config()
8257 	 * but in case the pipe is enabled w/o any ports we need a sane
8258 	 * default.
8259 	 */
8260 	pipe_config->base.adjusted_mode.crtc_clock =
8261 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8262 
8263 	ret = true;
8264 
8265 out:
8266 	intel_display_power_put(dev_priv, power_domain);
8267 
8268 	return ret;
8269 }
8270 
8271 static void ironlake_init_pch_refclk(struct drm_device *dev)
8272 {
8273 	struct drm_i915_private *dev_priv = dev->dev_private;
8274 	struct intel_encoder *encoder;
8275 	int i;
8276 	u32 val, final;
8277 	bool has_lvds = false;
8278 	bool has_cpu_edp = false;
8279 	bool has_panel = false;
8280 	bool has_ck505 = false;
8281 	bool can_ssc = false;
8282 	bool using_ssc_source = false;
8283 
8284 	/* We need to take the global config into account */
8285 	for_each_intel_encoder(dev, encoder) {
8286 		switch (encoder->type) {
8287 		case INTEL_OUTPUT_LVDS:
8288 			has_panel = true;
8289 			has_lvds = true;
8290 			break;
8291 		case INTEL_OUTPUT_EDP:
8292 			has_panel = true;
8293 			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8294 				has_cpu_edp = true;
8295 			break;
8296 		default:
8297 			break;
8298 		}
8299 	}
8300 
8301 	if (HAS_PCH_IBX(dev)) {
8302 		has_ck505 = dev_priv->vbt.display_clock_mode;
8303 		can_ssc = has_ck505;
8304 	} else {
8305 		has_ck505 = false;
8306 		can_ssc = true;
8307 	}
8308 
8309 	/* Check if any DPLLs are using the SSC source */
8310 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8311 		u32 temp = I915_READ(PCH_DPLL(i));
8312 
8313 		if (!(temp & DPLL_VCO_ENABLE))
8314 			continue;
8315 
8316 		if ((temp & PLL_REF_INPUT_MASK) ==
8317 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8318 			using_ssc_source = true;
8319 			break;
8320 		}
8321 	}
8322 
8323 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8324 		      has_panel, has_lvds, has_ck505, using_ssc_source);
8325 
8326 	/* Ironlake: try to setup display ref clock before DPLL
8327 	 * enabling. This is only under driver's control after
8328 	 * PCH B stepping, previous chipset stepping should be
8329 	 * ignoring this setting.
8330 	 */
8331 	val = I915_READ(PCH_DREF_CONTROL);
8332 
8333 	/* As we must carefully and slowly disable/enable each source in turn,
8334 	 * compute the final state we want first and check if we need to
8335 	 * make any changes at all.
8336 	 */
8337 	final = val;
8338 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8339 	if (has_ck505)
8340 		final |= DREF_NONSPREAD_CK505_ENABLE;
8341 	else
8342 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8343 
8344 	final &= ~DREF_SSC_SOURCE_MASK;
8345 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8346 	final &= ~DREF_SSC1_ENABLE;
8347 
8348 	if (has_panel) {
8349 		final |= DREF_SSC_SOURCE_ENABLE;
8350 
8351 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8352 			final |= DREF_SSC1_ENABLE;
8353 
8354 		if (has_cpu_edp) {
8355 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8356 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8357 			else
8358 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8359 		} else
8360 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8361 	} else if (using_ssc_source) {
8362 		final |= DREF_SSC_SOURCE_ENABLE;
8363 		final |= DREF_SSC1_ENABLE;
8364 	}
8365 
8366 	if (final == val)
8367 		return;
8368 
8369 	/* Always enable nonspread source */
8370 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8371 
8372 	if (has_ck505)
8373 		val |= DREF_NONSPREAD_CK505_ENABLE;
8374 	else
8375 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8376 
8377 	if (has_panel) {
8378 		val &= ~DREF_SSC_SOURCE_MASK;
8379 		val |= DREF_SSC_SOURCE_ENABLE;
8380 
8381 		/* SSC must be turned on before enabling the CPU output  */
8382 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8383 			DRM_DEBUG_KMS("Using SSC on panel\n");
8384 			val |= DREF_SSC1_ENABLE;
8385 		} else
8386 			val &= ~DREF_SSC1_ENABLE;
8387 
8388 		/* Get SSC going before enabling the outputs */
8389 		I915_WRITE(PCH_DREF_CONTROL, val);
8390 		POSTING_READ(PCH_DREF_CONTROL);
8391 		udelay(200);
8392 
8393 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8394 
8395 		/* Enable CPU source on CPU attached eDP */
8396 		if (has_cpu_edp) {
8397 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8398 				DRM_DEBUG_KMS("Using SSC on eDP\n");
8399 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8400 			} else
8401 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8402 		} else
8403 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8404 
8405 		I915_WRITE(PCH_DREF_CONTROL, val);
8406 		POSTING_READ(PCH_DREF_CONTROL);
8407 		udelay(200);
8408 	} else {
8409 		DRM_DEBUG_KMS("Disabling CPU source output\n");
8410 
8411 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8412 
8413 		/* Turn off CPU output */
8414 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8415 
8416 		I915_WRITE(PCH_DREF_CONTROL, val);
8417 		POSTING_READ(PCH_DREF_CONTROL);
8418 		udelay(200);
8419 
8420 		if (!using_ssc_source) {
8421 			DRM_DEBUG_KMS("Disabling SSC source\n");
8422 
8423 			/* Turn off the SSC source */
8424 			val &= ~DREF_SSC_SOURCE_MASK;
8425 			val |= DREF_SSC_SOURCE_DISABLE;
8426 
8427 			/* Turn off SSC1 */
8428 			val &= ~DREF_SSC1_ENABLE;
8429 
8430 			I915_WRITE(PCH_DREF_CONTROL, val);
8431 			POSTING_READ(PCH_DREF_CONTROL);
8432 			udelay(200);
8433 		}
8434 	}
8435 
8436 	BUG_ON(val != final);
8437 }
8438 
8439 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8440 {
8441 	uint32_t tmp;
8442 
8443 	tmp = I915_READ(SOUTH_CHICKEN2);
8444 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8445 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8446 
8447 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8448 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8449 		DRM_ERROR("FDI mPHY reset assert timeout\n");
8450 
8451 	tmp = I915_READ(SOUTH_CHICKEN2);
8452 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8453 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8454 
8455 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8456 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8457 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8458 }
8459 
8460 /* WaMPhyProgramming:hsw */
8461 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8462 {
8463 	uint32_t tmp;
8464 
8465 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8466 	tmp &= ~(0xFF << 24);
8467 	tmp |= (0x12 << 24);
8468 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8469 
8470 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8471 	tmp |= (1 << 11);
8472 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8473 
8474 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8475 	tmp |= (1 << 11);
8476 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8477 
8478 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8479 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8480 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8481 
8482 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8483 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8484 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8485 
8486 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8487 	tmp &= ~(7 << 13);
8488 	tmp |= (5 << 13);
8489 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8490 
8491 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8492 	tmp &= ~(7 << 13);
8493 	tmp |= (5 << 13);
8494 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8495 
8496 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8497 	tmp &= ~0xFF;
8498 	tmp |= 0x1C;
8499 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8500 
8501 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8502 	tmp &= ~0xFF;
8503 	tmp |= 0x1C;
8504 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8505 
8506 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8507 	tmp &= ~(0xFF << 16);
8508 	tmp |= (0x1C << 16);
8509 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8510 
8511 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8512 	tmp &= ~(0xFF << 16);
8513 	tmp |= (0x1C << 16);
8514 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8515 
8516 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8517 	tmp |= (1 << 27);
8518 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8519 
8520 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8521 	tmp |= (1 << 27);
8522 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8523 
8524 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8525 	tmp &= ~(0xF << 28);
8526 	tmp |= (4 << 28);
8527 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8528 
8529 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8530 	tmp &= ~(0xF << 28);
8531 	tmp |= (4 << 28);
8532 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8533 }
8534 
8535 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8536  * Programming" based on the parameters passed:
8537  * - Sequence to enable CLKOUT_DP
8538  * - Sequence to enable CLKOUT_DP without spread
8539  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8540  */
8541 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8542 				 bool with_fdi)
8543 {
8544 	struct drm_i915_private *dev_priv = dev->dev_private;
8545 	uint32_t reg, tmp;
8546 
8547 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8548 		with_spread = true;
8549 	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8550 		with_fdi = false;
8551 
8552 	mutex_lock(&dev_priv->sb_lock);
8553 
8554 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8555 	tmp &= ~SBI_SSCCTL_DISABLE;
8556 	tmp |= SBI_SSCCTL_PATHALT;
8557 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8558 
8559 	udelay(24);
8560 
8561 	if (with_spread) {
8562 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8563 		tmp &= ~SBI_SSCCTL_PATHALT;
8564 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8565 
8566 		if (with_fdi) {
8567 			lpt_reset_fdi_mphy(dev_priv);
8568 			lpt_program_fdi_mphy(dev_priv);
8569 		}
8570 	}
8571 
8572 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8573 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8574 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8575 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8576 
8577 	mutex_unlock(&dev_priv->sb_lock);
8578 }
8579 
8580 /* Sequence to disable CLKOUT_DP */
8581 static void lpt_disable_clkout_dp(struct drm_device *dev)
8582 {
8583 	struct drm_i915_private *dev_priv = dev->dev_private;
8584 	uint32_t reg, tmp;
8585 
8586 	mutex_lock(&dev_priv->sb_lock);
8587 
8588 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8589 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8590 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8591 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8592 
8593 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8594 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8595 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8596 			tmp |= SBI_SSCCTL_PATHALT;
8597 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8598 			udelay(32);
8599 		}
8600 		tmp |= SBI_SSCCTL_DISABLE;
8601 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8602 	}
8603 
8604 	mutex_unlock(&dev_priv->sb_lock);
8605 }
8606 
8607 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8608 
8609 static const uint16_t sscdivintphase[] = {
8610 	[BEND_IDX( 50)] = 0x3B23,
8611 	[BEND_IDX( 45)] = 0x3B23,
8612 	[BEND_IDX( 40)] = 0x3C23,
8613 	[BEND_IDX( 35)] = 0x3C23,
8614 	[BEND_IDX( 30)] = 0x3D23,
8615 	[BEND_IDX( 25)] = 0x3D23,
8616 	[BEND_IDX( 20)] = 0x3E23,
8617 	[BEND_IDX( 15)] = 0x3E23,
8618 	[BEND_IDX( 10)] = 0x3F23,
8619 	[BEND_IDX(  5)] = 0x3F23,
8620 	[BEND_IDX(  0)] = 0x0025,
8621 	[BEND_IDX( -5)] = 0x0025,
8622 	[BEND_IDX(-10)] = 0x0125,
8623 	[BEND_IDX(-15)] = 0x0125,
8624 	[BEND_IDX(-20)] = 0x0225,
8625 	[BEND_IDX(-25)] = 0x0225,
8626 	[BEND_IDX(-30)] = 0x0325,
8627 	[BEND_IDX(-35)] = 0x0325,
8628 	[BEND_IDX(-40)] = 0x0425,
8629 	[BEND_IDX(-45)] = 0x0425,
8630 	[BEND_IDX(-50)] = 0x0525,
8631 };
8632 
8633 /*
8634  * Bend CLKOUT_DP
8635  * steps -50 to 50 inclusive, in steps of 5
8636  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8637  * change in clock period = -(steps / 10) * 5.787 ps
8638  */
8639 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8640 {
8641 	uint32_t tmp;
8642 	int idx = BEND_IDX(steps);
8643 
8644 	if (WARN_ON(steps % 5 != 0))
8645 		return;
8646 
8647 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8648 		return;
8649 
8650 	mutex_lock(&dev_priv->sb_lock);
8651 
8652 	if (steps % 10 != 0)
8653 		tmp = 0xAAAAAAAB;
8654 	else
8655 		tmp = 0x00000000;
8656 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8657 
8658 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8659 	tmp &= 0xffff0000;
8660 	tmp |= sscdivintphase[idx];
8661 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8662 
8663 	mutex_unlock(&dev_priv->sb_lock);
8664 }
8665 
8666 #undef BEND_IDX
8667 
8668 static void lpt_init_pch_refclk(struct drm_device *dev)
8669 {
8670 	struct intel_encoder *encoder;
8671 	bool has_vga = false;
8672 
8673 	for_each_intel_encoder(dev, encoder) {
8674 		switch (encoder->type) {
8675 		case INTEL_OUTPUT_ANALOG:
8676 			has_vga = true;
8677 			break;
8678 		default:
8679 			break;
8680 		}
8681 	}
8682 
8683 	if (has_vga) {
8684 		lpt_bend_clkout_dp(to_i915(dev), 0);
8685 		lpt_enable_clkout_dp(dev, true, true);
8686 	} else {
8687 		lpt_disable_clkout_dp(dev);
8688 	}
8689 }
8690 
8691 /*
8692  * Initialize reference clocks when the driver loads
8693  */
8694 void intel_init_pch_refclk(struct drm_device *dev)
8695 {
8696 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8697 		ironlake_init_pch_refclk(dev);
8698 	else if (HAS_PCH_LPT(dev))
8699 		lpt_init_pch_refclk(dev);
8700 }
8701 
8702 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8703 {
8704 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8705 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8706 	int pipe = intel_crtc->pipe;
8707 	uint32_t val;
8708 
8709 	val = 0;
8710 
8711 	switch (intel_crtc->config->pipe_bpp) {
8712 	case 18:
8713 		val |= PIPECONF_6BPC;
8714 		break;
8715 	case 24:
8716 		val |= PIPECONF_8BPC;
8717 		break;
8718 	case 30:
8719 		val |= PIPECONF_10BPC;
8720 		break;
8721 	case 36:
8722 		val |= PIPECONF_12BPC;
8723 		break;
8724 	default:
8725 		/* Case prevented by intel_choose_pipe_bpp_dither. */
8726 		BUG();
8727 	}
8728 
8729 	if (intel_crtc->config->dither)
8730 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8731 
8732 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8733 		val |= PIPECONF_INTERLACED_ILK;
8734 	else
8735 		val |= PIPECONF_PROGRESSIVE;
8736 
8737 	if (intel_crtc->config->limited_color_range)
8738 		val |= PIPECONF_COLOR_RANGE_SELECT;
8739 
8740 	I915_WRITE(PIPECONF(pipe), val);
8741 	POSTING_READ(PIPECONF(pipe));
8742 }
8743 
8744 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8745 {
8746 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8747 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8748 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8749 	u32 val = 0;
8750 
8751 	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8752 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8753 
8754 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8755 		val |= PIPECONF_INTERLACED_ILK;
8756 	else
8757 		val |= PIPECONF_PROGRESSIVE;
8758 
8759 	I915_WRITE(PIPECONF(cpu_transcoder), val);
8760 	POSTING_READ(PIPECONF(cpu_transcoder));
8761 }
8762 
8763 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8764 {
8765 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8766 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8767 
8768 	if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8769 		u32 val = 0;
8770 
8771 		switch (intel_crtc->config->pipe_bpp) {
8772 		case 18:
8773 			val |= PIPEMISC_DITHER_6_BPC;
8774 			break;
8775 		case 24:
8776 			val |= PIPEMISC_DITHER_8_BPC;
8777 			break;
8778 		case 30:
8779 			val |= PIPEMISC_DITHER_10_BPC;
8780 			break;
8781 		case 36:
8782 			val |= PIPEMISC_DITHER_12_BPC;
8783 			break;
8784 		default:
8785 			/* Case prevented by pipe_config_set_bpp. */
8786 			BUG();
8787 		}
8788 
8789 		if (intel_crtc->config->dither)
8790 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8791 
8792 		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8793 	}
8794 }
8795 
8796 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8797 {
8798 	/*
8799 	 * Account for spread spectrum to avoid
8800 	 * oversubscribing the link. Max center spread
8801 	 * is 2.5%; use 5% for safety's sake.
8802 	 */
8803 	u32 bps = target_clock * bpp * 21 / 20;
8804 	return DIV_ROUND_UP(bps, link_bw * 8);
8805 }
8806 
8807 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8808 {
8809 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8810 }
8811 
8812 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8813 				  struct intel_crtc_state *crtc_state,
8814 				  intel_clock_t *reduced_clock)
8815 {
8816 	struct drm_crtc *crtc = &intel_crtc->base;
8817 	struct drm_device *dev = crtc->dev;
8818 	struct drm_i915_private *dev_priv = dev->dev_private;
8819 	struct drm_atomic_state *state = crtc_state->base.state;
8820 	struct drm_connector *connector;
8821 	struct drm_connector_state *connector_state;
8822 	struct intel_encoder *encoder;
8823 	u32 dpll, fp, fp2;
8824 	int factor, i;
8825 	bool is_lvds = false, is_sdvo = false;
8826 
8827 	for_each_connector_in_state(state, connector, connector_state, i) {
8828 		if (connector_state->crtc != crtc_state->base.crtc)
8829 			continue;
8830 
8831 		encoder = to_intel_encoder(connector_state->best_encoder);
8832 
8833 		switch (encoder->type) {
8834 		case INTEL_OUTPUT_LVDS:
8835 			is_lvds = true;
8836 			break;
8837 		case INTEL_OUTPUT_SDVO:
8838 		case INTEL_OUTPUT_HDMI:
8839 			is_sdvo = true;
8840 			break;
8841 		default:
8842 			break;
8843 		}
8844 	}
8845 
8846 	/* Enable autotuning of the PLL clock (if permissible) */
8847 	factor = 21;
8848 	if (is_lvds) {
8849 		if ((intel_panel_use_ssc(dev_priv) &&
8850 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8851 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8852 			factor = 25;
8853 	} else if (crtc_state->sdvo_tv_clock)
8854 		factor = 20;
8855 
8856 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8857 
8858 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8859 		fp |= FP_CB_TUNE;
8860 
8861 	if (reduced_clock) {
8862 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
8863 
8864 		if (reduced_clock->m < factor * reduced_clock->n)
8865 			fp2 |= FP_CB_TUNE;
8866 	} else {
8867 		fp2 = fp;
8868 	}
8869 
8870 	dpll = 0;
8871 
8872 	if (is_lvds)
8873 		dpll |= DPLLB_MODE_LVDS;
8874 	else
8875 		dpll |= DPLLB_MODE_DAC_SERIAL;
8876 
8877 	dpll |= (crtc_state->pixel_multiplier - 1)
8878 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8879 
8880 	if (is_sdvo)
8881 		dpll |= DPLL_SDVO_HIGH_SPEED;
8882 	if (crtc_state->has_dp_encoder)
8883 		dpll |= DPLL_SDVO_HIGH_SPEED;
8884 
8885 	/* compute bitmask from p1 value */
8886 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8887 	/* also FPA1 */
8888 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8889 
8890 	switch (crtc_state->dpll.p2) {
8891 	case 5:
8892 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8893 		break;
8894 	case 7:
8895 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8896 		break;
8897 	case 10:
8898 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8899 		break;
8900 	case 14:
8901 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8902 		break;
8903 	}
8904 
8905 	if (is_lvds && intel_panel_use_ssc(dev_priv))
8906 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8907 	else
8908 		dpll |= PLL_REF_INPUT_DREFCLK;
8909 
8910 	dpll |= DPLL_VCO_ENABLE;
8911 
8912 	crtc_state->dpll_hw_state.dpll = dpll;
8913 	crtc_state->dpll_hw_state.fp0 = fp;
8914 	crtc_state->dpll_hw_state.fp1 = fp2;
8915 }
8916 
8917 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8918 				       struct intel_crtc_state *crtc_state)
8919 {
8920 	struct drm_device *dev = crtc->base.dev;
8921 	struct drm_i915_private *dev_priv = dev->dev_private;
8922 	intel_clock_t reduced_clock;
8923 	bool has_reduced_clock = false;
8924 	struct intel_shared_dpll *pll;
8925 	const intel_limit_t *limit;
8926 	int refclk = 120000;
8927 
8928 	memset(&crtc_state->dpll_hw_state, 0,
8929 	       sizeof(crtc_state->dpll_hw_state));
8930 
8931 	crtc->lowfreq_avail = false;
8932 
8933 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8934 	if (!crtc_state->has_pch_encoder)
8935 		return 0;
8936 
8937 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8938 		if (intel_panel_use_ssc(dev_priv)) {
8939 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8940 				      dev_priv->vbt.lvds_ssc_freq);
8941 			refclk = dev_priv->vbt.lvds_ssc_freq;
8942 		}
8943 
8944 		if (intel_is_dual_link_lvds(dev)) {
8945 			if (refclk == 100000)
8946 				limit = &intel_limits_ironlake_dual_lvds_100m;
8947 			else
8948 				limit = &intel_limits_ironlake_dual_lvds;
8949 		} else {
8950 			if (refclk == 100000)
8951 				limit = &intel_limits_ironlake_single_lvds_100m;
8952 			else
8953 				limit = &intel_limits_ironlake_single_lvds;
8954 		}
8955 	} else {
8956 		limit = &intel_limits_ironlake_dac;
8957 	}
8958 
8959 	if (!crtc_state->clock_set &&
8960 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8961 				refclk, NULL, &crtc_state->dpll)) {
8962 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8963 		return -EINVAL;
8964 	}
8965 
8966 	ironlake_compute_dpll(crtc, crtc_state,
8967 			      has_reduced_clock ? &reduced_clock : NULL);
8968 
8969 	pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
8970 	if (pll == NULL) {
8971 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8972 				 pipe_name(crtc->pipe));
8973 		return -EINVAL;
8974 	}
8975 
8976 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8977 	    has_reduced_clock)
8978 		crtc->lowfreq_avail = true;
8979 
8980 	return 0;
8981 }
8982 
8983 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8984 					 struct intel_link_m_n *m_n)
8985 {
8986 	struct drm_device *dev = crtc->base.dev;
8987 	struct drm_i915_private *dev_priv = dev->dev_private;
8988 	enum i915_pipe pipe = crtc->pipe;
8989 
8990 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8991 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8992 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8993 		& ~TU_SIZE_MASK;
8994 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8995 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8996 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8997 }
8998 
8999 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9000 					 enum transcoder transcoder,
9001 					 struct intel_link_m_n *m_n,
9002 					 struct intel_link_m_n *m2_n2)
9003 {
9004 	struct drm_device *dev = crtc->base.dev;
9005 	struct drm_i915_private *dev_priv = dev->dev_private;
9006 	enum i915_pipe pipe = crtc->pipe;
9007 
9008 	if (INTEL_INFO(dev)->gen >= 5) {
9009 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9010 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9011 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9012 			& ~TU_SIZE_MASK;
9013 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9014 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9015 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9016 		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9017 		 * gen < 8) and if DRRS is supported (to make sure the
9018 		 * registers are not unnecessarily read).
9019 		 */
9020 		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9021 			crtc->config->has_drrs) {
9022 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9023 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9024 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9025 					& ~TU_SIZE_MASK;
9026 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9027 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9028 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9029 		}
9030 	} else {
9031 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9032 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9033 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9034 			& ~TU_SIZE_MASK;
9035 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9036 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9037 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9038 	}
9039 }
9040 
9041 void intel_dp_get_m_n(struct intel_crtc *crtc,
9042 		      struct intel_crtc_state *pipe_config)
9043 {
9044 	if (pipe_config->has_pch_encoder)
9045 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9046 	else
9047 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9048 					     &pipe_config->dp_m_n,
9049 					     &pipe_config->dp_m2_n2);
9050 }
9051 
9052 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9053 					struct intel_crtc_state *pipe_config)
9054 {
9055 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9056 				     &pipe_config->fdi_m_n, NULL);
9057 }
9058 
9059 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9060 				    struct intel_crtc_state *pipe_config)
9061 {
9062 	struct drm_device *dev = crtc->base.dev;
9063 	struct drm_i915_private *dev_priv = dev->dev_private;
9064 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9065 	uint32_t ps_ctrl = 0;
9066 	int id = -1;
9067 	int i;
9068 
9069 	/* find scaler attached to this pipe */
9070 	for (i = 0; i < crtc->num_scalers; i++) {
9071 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9072 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9073 			id = i;
9074 			pipe_config->pch_pfit.enabled = true;
9075 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9076 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9077 			break;
9078 		}
9079 	}
9080 
9081 	scaler_state->scaler_id = id;
9082 	if (id >= 0) {
9083 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9084 	} else {
9085 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9086 	}
9087 }
9088 
9089 static void
9090 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9091 				 struct intel_initial_plane_config *plane_config)
9092 {
9093 	struct drm_device *dev = crtc->base.dev;
9094 	struct drm_i915_private *dev_priv = dev->dev_private;
9095 	u32 val, base, offset, stride_mult, tiling;
9096 	int pipe = crtc->pipe;
9097 	int fourcc, pixel_format;
9098 	unsigned int aligned_height;
9099 	struct drm_framebuffer *fb;
9100 	struct intel_framebuffer *intel_fb;
9101 
9102 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9103 	if (!intel_fb) {
9104 		DRM_DEBUG_KMS("failed to alloc fb\n");
9105 		return;
9106 	}
9107 
9108 	fb = &intel_fb->base;
9109 
9110 	val = I915_READ(PLANE_CTL(pipe, 0));
9111 	if (!(val & PLANE_CTL_ENABLE))
9112 		goto error;
9113 
9114 	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9115 	fourcc = skl_format_to_fourcc(pixel_format,
9116 				      val & PLANE_CTL_ORDER_RGBX,
9117 				      val & PLANE_CTL_ALPHA_MASK);
9118 	fb->pixel_format = fourcc;
9119 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9120 
9121 	tiling = val & PLANE_CTL_TILED_MASK;
9122 	switch (tiling) {
9123 	case PLANE_CTL_TILED_LINEAR:
9124 		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9125 		break;
9126 	case PLANE_CTL_TILED_X:
9127 		plane_config->tiling = I915_TILING_X;
9128 		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9129 		break;
9130 	case PLANE_CTL_TILED_Y:
9131 		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9132 		break;
9133 	case PLANE_CTL_TILED_YF:
9134 		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9135 		break;
9136 	default:
9137 		MISSING_CASE(tiling);
9138 		goto error;
9139 	}
9140 
9141 	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9142 	plane_config->base = base;
9143 
9144 	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9145 
9146 	val = I915_READ(PLANE_SIZE(pipe, 0));
9147 	fb->height = ((val >> 16) & 0xfff) + 1;
9148 	fb->width = ((val >> 0) & 0x1fff) + 1;
9149 
9150 	val = I915_READ(PLANE_STRIDE(pipe, 0));
9151 	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9152 						fb->pixel_format);
9153 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9154 
9155 	aligned_height = intel_fb_align_height(dev, fb->height,
9156 					       fb->pixel_format,
9157 					       fb->modifier[0]);
9158 
9159 	plane_config->size = fb->pitches[0] * aligned_height;
9160 
9161 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9162 		      pipe_name(pipe), fb->width, fb->height,
9163 		      fb->bits_per_pixel, base, fb->pitches[0],
9164 		      plane_config->size);
9165 
9166 	plane_config->fb = intel_fb;
9167 	return;
9168 
9169 error:
9170 	kfree(fb);
9171 }
9172 
9173 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9174 				     struct intel_crtc_state *pipe_config)
9175 {
9176 	struct drm_device *dev = crtc->base.dev;
9177 	struct drm_i915_private *dev_priv = dev->dev_private;
9178 	uint32_t tmp;
9179 
9180 	tmp = I915_READ(PF_CTL(crtc->pipe));
9181 
9182 	if (tmp & PF_ENABLE) {
9183 		pipe_config->pch_pfit.enabled = true;
9184 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9185 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9186 
9187 		/* We currently do not free assignements of panel fitters on
9188 		 * ivb/hsw (since we don't use the higher upscaling modes which
9189 		 * differentiates them) so just WARN about this case for now. */
9190 		if (IS_GEN7(dev)) {
9191 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9192 				PF_PIPE_SEL_IVB(crtc->pipe));
9193 		}
9194 	}
9195 }
9196 
9197 static void
9198 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9199 				  struct intel_initial_plane_config *plane_config)
9200 {
9201 	struct drm_device *dev = crtc->base.dev;
9202 	struct drm_i915_private *dev_priv = dev->dev_private;
9203 	u32 val, base, offset;
9204 	int pipe = crtc->pipe;
9205 	int fourcc, pixel_format;
9206 	unsigned int aligned_height;
9207 	struct drm_framebuffer *fb;
9208 	struct intel_framebuffer *intel_fb;
9209 
9210 	val = I915_READ(DSPCNTR(pipe));
9211 	if (!(val & DISPLAY_PLANE_ENABLE))
9212 		return;
9213 
9214 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9215 	if (!intel_fb) {
9216 		DRM_DEBUG_KMS("failed to alloc fb\n");
9217 		return;
9218 	}
9219 
9220 	fb = &intel_fb->base;
9221 
9222 	if (INTEL_INFO(dev)->gen >= 4) {
9223 		if (val & DISPPLANE_TILED) {
9224 			plane_config->tiling = I915_TILING_X;
9225 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9226 		}
9227 	}
9228 
9229 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9230 	fourcc = i9xx_format_to_fourcc(pixel_format);
9231 	fb->pixel_format = fourcc;
9232 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9233 
9234 	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9235 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9236 		offset = I915_READ(DSPOFFSET(pipe));
9237 	} else {
9238 		if (plane_config->tiling)
9239 			offset = I915_READ(DSPTILEOFF(pipe));
9240 		else
9241 			offset = I915_READ(DSPLINOFF(pipe));
9242 	}
9243 	plane_config->base = base;
9244 
9245 	val = I915_READ(PIPESRC(pipe));
9246 	fb->width = ((val >> 16) & 0xfff) + 1;
9247 	fb->height = ((val >> 0) & 0xfff) + 1;
9248 
9249 	val = I915_READ(DSPSTRIDE(pipe));
9250 	fb->pitches[0] = val & 0xffffffc0;
9251 
9252 	aligned_height = intel_fb_align_height(dev, fb->height,
9253 					       fb->pixel_format,
9254 					       fb->modifier[0]);
9255 
9256 	plane_config->size = fb->pitches[0] * aligned_height;
9257 
9258 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9259 		      pipe_name(pipe), fb->width, fb->height,
9260 		      fb->bits_per_pixel, base, fb->pitches[0],
9261 		      plane_config->size);
9262 
9263 	plane_config->fb = intel_fb;
9264 }
9265 
9266 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9267 				     struct intel_crtc_state *pipe_config)
9268 {
9269 	struct drm_device *dev = crtc->base.dev;
9270 	struct drm_i915_private *dev_priv = dev->dev_private;
9271 	enum intel_display_power_domain power_domain;
9272 	uint32_t tmp;
9273 	bool ret;
9274 
9275 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9276 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9277 		return false;
9278 
9279 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9280 	pipe_config->shared_dpll = NULL;
9281 
9282 	ret = false;
9283 	tmp = I915_READ(PIPECONF(crtc->pipe));
9284 	if (!(tmp & PIPECONF_ENABLE))
9285 		goto out;
9286 
9287 	switch (tmp & PIPECONF_BPC_MASK) {
9288 	case PIPECONF_6BPC:
9289 		pipe_config->pipe_bpp = 18;
9290 		break;
9291 	case PIPECONF_8BPC:
9292 		pipe_config->pipe_bpp = 24;
9293 		break;
9294 	case PIPECONF_10BPC:
9295 		pipe_config->pipe_bpp = 30;
9296 		break;
9297 	case PIPECONF_12BPC:
9298 		pipe_config->pipe_bpp = 36;
9299 		break;
9300 	default:
9301 		break;
9302 	}
9303 
9304 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9305 		pipe_config->limited_color_range = true;
9306 
9307 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9308 		struct intel_shared_dpll *pll;
9309 		enum intel_dpll_id pll_id;
9310 
9311 		pipe_config->has_pch_encoder = true;
9312 
9313 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9314 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9315 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9316 
9317 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9318 
9319 		if (HAS_PCH_IBX(dev_priv)) {
9320 			pll_id = (enum intel_dpll_id) crtc->pipe;
9321 		} else {
9322 			tmp = I915_READ(PCH_DPLL_SEL);
9323 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9324 				pll_id = DPLL_ID_PCH_PLL_B;
9325 			else
9326 				pll_id= DPLL_ID_PCH_PLL_A;
9327 		}
9328 
9329 		pipe_config->shared_dpll =
9330 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
9331 		pll = pipe_config->shared_dpll;
9332 
9333 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9334 						 &pipe_config->dpll_hw_state));
9335 
9336 		tmp = pipe_config->dpll_hw_state.dpll;
9337 		pipe_config->pixel_multiplier =
9338 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9339 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9340 
9341 		ironlake_pch_clock_get(crtc, pipe_config);
9342 	} else {
9343 		pipe_config->pixel_multiplier = 1;
9344 	}
9345 
9346 	intel_get_pipe_timings(crtc, pipe_config);
9347 	intel_get_pipe_src_size(crtc, pipe_config);
9348 
9349 	ironlake_get_pfit_config(crtc, pipe_config);
9350 
9351 	ret = true;
9352 
9353 out:
9354 	intel_display_power_put(dev_priv, power_domain);
9355 
9356 	return ret;
9357 }
9358 
9359 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9360 {
9361 	struct drm_device *dev = dev_priv->dev;
9362 	struct intel_crtc *crtc;
9363 
9364 	for_each_intel_crtc(dev, crtc)
9365 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9366 		     pipe_name(crtc->pipe));
9367 
9368 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9369 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9370 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9371 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9372 	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9373 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9374 	     "CPU PWM1 enabled\n");
9375 	if (IS_HASWELL(dev))
9376 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9377 		     "CPU PWM2 enabled\n");
9378 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9379 	     "PCH PWM1 enabled\n");
9380 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9381 	     "Utility pin enabled\n");
9382 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9383 
9384 	/*
9385 	 * In theory we can still leave IRQs enabled, as long as only the HPD
9386 	 * interrupts remain enabled. We used to check for that, but since it's
9387 	 * gen-specific and since we only disable LCPLL after we fully disable
9388 	 * the interrupts, the check below should be enough.
9389 	 */
9390 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9391 }
9392 
9393 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9394 {
9395 	struct drm_device *dev = dev_priv->dev;
9396 
9397 	if (IS_HASWELL(dev))
9398 		return I915_READ(D_COMP_HSW);
9399 	else
9400 		return I915_READ(D_COMP_BDW);
9401 }
9402 
9403 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9404 {
9405 	struct drm_device *dev = dev_priv->dev;
9406 
9407 	if (IS_HASWELL(dev)) {
9408 		mutex_lock(&dev_priv->rps.hw_lock);
9409 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9410 					    val))
9411 			DRM_ERROR("Failed to write to D_COMP\n");
9412 		mutex_unlock(&dev_priv->rps.hw_lock);
9413 	} else {
9414 		I915_WRITE(D_COMP_BDW, val);
9415 		POSTING_READ(D_COMP_BDW);
9416 	}
9417 }
9418 
9419 /*
9420  * This function implements pieces of two sequences from BSpec:
9421  * - Sequence for display software to disable LCPLL
9422  * - Sequence for display software to allow package C8+
9423  * The steps implemented here are just the steps that actually touch the LCPLL
9424  * register. Callers should take care of disabling all the display engine
9425  * functions, doing the mode unset, fixing interrupts, etc.
9426  */
9427 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9428 			      bool switch_to_fclk, bool allow_power_down)
9429 {
9430 	uint32_t val;
9431 
9432 	assert_can_disable_lcpll(dev_priv);
9433 
9434 	val = I915_READ(LCPLL_CTL);
9435 
9436 	if (switch_to_fclk) {
9437 		val |= LCPLL_CD_SOURCE_FCLK;
9438 		I915_WRITE(LCPLL_CTL, val);
9439 
9440 		if (wait_for_us(I915_READ(LCPLL_CTL) &
9441 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
9442 			DRM_ERROR("Switching to FCLK failed\n");
9443 
9444 		val = I915_READ(LCPLL_CTL);
9445 	}
9446 
9447 	val |= LCPLL_PLL_DISABLE;
9448 	I915_WRITE(LCPLL_CTL, val);
9449 	POSTING_READ(LCPLL_CTL);
9450 
9451 	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9452 		DRM_ERROR("LCPLL still locked\n");
9453 
9454 	val = hsw_read_dcomp(dev_priv);
9455 	val |= D_COMP_COMP_DISABLE;
9456 	hsw_write_dcomp(dev_priv, val);
9457 	ndelay(100);
9458 
9459 	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9460 		     1))
9461 		DRM_ERROR("D_COMP RCOMP still in progress\n");
9462 
9463 	if (allow_power_down) {
9464 		val = I915_READ(LCPLL_CTL);
9465 		val |= LCPLL_POWER_DOWN_ALLOW;
9466 		I915_WRITE(LCPLL_CTL, val);
9467 		POSTING_READ(LCPLL_CTL);
9468 	}
9469 }
9470 
9471 /*
9472  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9473  * source.
9474  */
9475 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9476 {
9477 	uint32_t val;
9478 
9479 	val = I915_READ(LCPLL_CTL);
9480 
9481 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9482 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9483 		return;
9484 
9485 	/*
9486 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9487 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9488 	 */
9489 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9490 
9491 	if (val & LCPLL_POWER_DOWN_ALLOW) {
9492 		val &= ~LCPLL_POWER_DOWN_ALLOW;
9493 		I915_WRITE(LCPLL_CTL, val);
9494 		POSTING_READ(LCPLL_CTL);
9495 	}
9496 
9497 	val = hsw_read_dcomp(dev_priv);
9498 	val |= D_COMP_COMP_FORCE;
9499 	val &= ~D_COMP_COMP_DISABLE;
9500 	hsw_write_dcomp(dev_priv, val);
9501 
9502 	val = I915_READ(LCPLL_CTL);
9503 	val &= ~LCPLL_PLL_DISABLE;
9504 	I915_WRITE(LCPLL_CTL, val);
9505 
9506 	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9507 		DRM_ERROR("LCPLL not locked yet\n");
9508 
9509 	if (val & LCPLL_CD_SOURCE_FCLK) {
9510 		val = I915_READ(LCPLL_CTL);
9511 		val &= ~LCPLL_CD_SOURCE_FCLK;
9512 		I915_WRITE(LCPLL_CTL, val);
9513 
9514 		if (wait_for_us((I915_READ(LCPLL_CTL) &
9515 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9516 			DRM_ERROR("Switching back to LCPLL failed\n");
9517 	}
9518 
9519 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9520 	intel_update_cdclk(dev_priv->dev);
9521 }
9522 
9523 /*
9524  * Package states C8 and deeper are really deep PC states that can only be
9525  * reached when all the devices on the system allow it, so even if the graphics
9526  * device allows PC8+, it doesn't mean the system will actually get to these
9527  * states. Our driver only allows PC8+ when going into runtime PM.
9528  *
9529  * The requirements for PC8+ are that all the outputs are disabled, the power
9530  * well is disabled and most interrupts are disabled, and these are also
9531  * requirements for runtime PM. When these conditions are met, we manually do
9532  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9533  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9534  * hang the machine.
9535  *
9536  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9537  * the state of some registers, so when we come back from PC8+ we need to
9538  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9539  * need to take care of the registers kept by RC6. Notice that this happens even
9540  * if we don't put the device in PCI D3 state (which is what currently happens
9541  * because of the runtime PM support).
9542  *
9543  * For more, read "Display Sequences for Package C8" on the hardware
9544  * documentation.
9545  */
9546 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9547 {
9548 	struct drm_device *dev = dev_priv->dev;
9549 	uint32_t val;
9550 
9551 	DRM_DEBUG_KMS("Enabling package C8+\n");
9552 
9553 	if (HAS_PCH_LPT_LP(dev)) {
9554 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9555 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9556 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9557 	}
9558 
9559 	lpt_disable_clkout_dp(dev);
9560 	hsw_disable_lcpll(dev_priv, true, true);
9561 }
9562 
9563 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9564 {
9565 	struct drm_device *dev = dev_priv->dev;
9566 	uint32_t val;
9567 
9568 	DRM_DEBUG_KMS("Disabling package C8+\n");
9569 
9570 	hsw_restore_lcpll(dev_priv);
9571 	lpt_init_pch_refclk(dev);
9572 
9573 	if (HAS_PCH_LPT_LP(dev)) {
9574 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9575 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9576 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9577 	}
9578 }
9579 
9580 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9581 {
9582 	struct drm_device *dev = old_state->dev;
9583 	struct intel_atomic_state *old_intel_state =
9584 		to_intel_atomic_state(old_state);
9585 	unsigned int req_cdclk = old_intel_state->dev_cdclk;
9586 
9587 	broxton_set_cdclk(to_i915(dev), req_cdclk);
9588 }
9589 
9590 /* compute the max rate for new configuration */
9591 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9592 {
9593 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9594 	struct drm_i915_private *dev_priv = state->dev->dev_private;
9595 	struct drm_crtc *crtc;
9596 	struct drm_crtc_state *cstate;
9597 	struct intel_crtc_state *crtc_state;
9598 	unsigned max_pixel_rate = 0, i;
9599 	enum i915_pipe pipe;
9600 
9601 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9602 	       sizeof(intel_state->min_pixclk));
9603 
9604 	for_each_crtc_in_state(state, crtc, cstate, i) {
9605 		int pixel_rate;
9606 
9607 		crtc_state = to_intel_crtc_state(cstate);
9608 		if (!crtc_state->base.enable) {
9609 			intel_state->min_pixclk[i] = 0;
9610 			continue;
9611 		}
9612 
9613 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9614 
9615 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9616 		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9617 			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9618 
9619 		intel_state->min_pixclk[i] = pixel_rate;
9620 	}
9621 
9622 	for_each_pipe(dev_priv, pipe)
9623 		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9624 
9625 	return max_pixel_rate;
9626 }
9627 
9628 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9629 {
9630 	struct drm_i915_private *dev_priv = dev->dev_private;
9631 	uint32_t val, data;
9632 	int ret;
9633 
9634 	if (WARN((I915_READ(LCPLL_CTL) &
9635 		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9636 		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9637 		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9638 		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9639 		 "trying to change cdclk frequency with cdclk not enabled\n"))
9640 		return;
9641 
9642 	mutex_lock(&dev_priv->rps.hw_lock);
9643 	ret = sandybridge_pcode_write(dev_priv,
9644 				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9645 	mutex_unlock(&dev_priv->rps.hw_lock);
9646 	if (ret) {
9647 		DRM_ERROR("failed to inform pcode about cdclk change\n");
9648 		return;
9649 	}
9650 
9651 	val = I915_READ(LCPLL_CTL);
9652 	val |= LCPLL_CD_SOURCE_FCLK;
9653 	I915_WRITE(LCPLL_CTL, val);
9654 
9655 	if (wait_for_us(I915_READ(LCPLL_CTL) &
9656 			LCPLL_CD_SOURCE_FCLK_DONE, 1))
9657 		DRM_ERROR("Switching to FCLK failed\n");
9658 
9659 	val = I915_READ(LCPLL_CTL);
9660 	val &= ~LCPLL_CLK_FREQ_MASK;
9661 
9662 	switch (cdclk) {
9663 	case 450000:
9664 		val |= LCPLL_CLK_FREQ_450;
9665 		data = 0;
9666 		break;
9667 	case 540000:
9668 		val |= LCPLL_CLK_FREQ_54O_BDW;
9669 		data = 1;
9670 		break;
9671 	case 337500:
9672 		val |= LCPLL_CLK_FREQ_337_5_BDW;
9673 		data = 2;
9674 		break;
9675 	case 675000:
9676 		val |= LCPLL_CLK_FREQ_675_BDW;
9677 		data = 3;
9678 		break;
9679 	default:
9680 		WARN(1, "invalid cdclk frequency\n");
9681 		return;
9682 	}
9683 
9684 	I915_WRITE(LCPLL_CTL, val);
9685 
9686 	val = I915_READ(LCPLL_CTL);
9687 	val &= ~LCPLL_CD_SOURCE_FCLK;
9688 	I915_WRITE(LCPLL_CTL, val);
9689 
9690 	if (wait_for_us((I915_READ(LCPLL_CTL) &
9691 			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9692 		DRM_ERROR("Switching back to LCPLL failed\n");
9693 
9694 	mutex_lock(&dev_priv->rps.hw_lock);
9695 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9696 	mutex_unlock(&dev_priv->rps.hw_lock);
9697 
9698 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9699 
9700 	intel_update_cdclk(dev);
9701 
9702 	WARN(cdclk != dev_priv->cdclk_freq,
9703 	     "cdclk requested %d kHz but got %d kHz\n",
9704 	     cdclk, dev_priv->cdclk_freq);
9705 }
9706 
9707 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9708 {
9709 	struct drm_i915_private *dev_priv = to_i915(state->dev);
9710 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9711 	int max_pixclk = ilk_max_pixel_rate(state);
9712 	int cdclk;
9713 
9714 	/*
9715 	 * FIXME should also account for plane ratio
9716 	 * once 64bpp pixel formats are supported.
9717 	 */
9718 	if (max_pixclk > 540000)
9719 		cdclk = 675000;
9720 	else if (max_pixclk > 450000)
9721 		cdclk = 540000;
9722 	else if (max_pixclk > 337500)
9723 		cdclk = 450000;
9724 	else
9725 		cdclk = 337500;
9726 
9727 	if (cdclk > dev_priv->max_cdclk_freq) {
9728 		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9729 			      cdclk, dev_priv->max_cdclk_freq);
9730 		return -EINVAL;
9731 	}
9732 
9733 	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9734 	if (!intel_state->active_crtcs)
9735 		intel_state->dev_cdclk = 337500;
9736 
9737 	return 0;
9738 }
9739 
9740 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9741 {
9742 	struct drm_device *dev = old_state->dev;
9743 	struct intel_atomic_state *old_intel_state =
9744 		to_intel_atomic_state(old_state);
9745 	unsigned req_cdclk = old_intel_state->dev_cdclk;
9746 
9747 	broadwell_set_cdclk(dev, req_cdclk);
9748 }
9749 
9750 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9751 				      struct intel_crtc_state *crtc_state)
9752 {
9753 	struct intel_encoder *intel_encoder =
9754 		intel_ddi_get_crtc_new_encoder(crtc_state);
9755 
9756 	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9757 		if (!intel_ddi_pll_select(crtc, crtc_state))
9758 			return -EINVAL;
9759 	}
9760 
9761 	crtc->lowfreq_avail = false;
9762 
9763 	return 0;
9764 }
9765 
9766 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9767 				enum port port,
9768 				struct intel_crtc_state *pipe_config)
9769 {
9770 	enum intel_dpll_id id;
9771 
9772 	switch (port) {
9773 	case PORT_A:
9774 		pipe_config->ddi_pll_sel = SKL_DPLL0;
9775 		id = DPLL_ID_SKL_DPLL0;
9776 		break;
9777 	case PORT_B:
9778 		pipe_config->ddi_pll_sel = SKL_DPLL1;
9779 		id = DPLL_ID_SKL_DPLL1;
9780 		break;
9781 	case PORT_C:
9782 		pipe_config->ddi_pll_sel = SKL_DPLL2;
9783 		id = DPLL_ID_SKL_DPLL2;
9784 		break;
9785 	default:
9786 		DRM_ERROR("Incorrect port type\n");
9787 		return;
9788 	}
9789 
9790 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9791 }
9792 
9793 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9794 				enum port port,
9795 				struct intel_crtc_state *pipe_config)
9796 {
9797 	enum intel_dpll_id id;
9798 	u32 temp;
9799 
9800 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9801 	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9802 
9803 	switch (pipe_config->ddi_pll_sel) {
9804 	case SKL_DPLL0:
9805 		id = DPLL_ID_SKL_DPLL0;
9806 		break;
9807 	case SKL_DPLL1:
9808 		id = DPLL_ID_SKL_DPLL1;
9809 		break;
9810 	case SKL_DPLL2:
9811 		id = DPLL_ID_SKL_DPLL2;
9812 		break;
9813 	case SKL_DPLL3:
9814 		id = DPLL_ID_SKL_DPLL3;
9815 		break;
9816 	default:
9817 		MISSING_CASE(pipe_config->ddi_pll_sel);
9818 		return;
9819 	}
9820 
9821 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9822 }
9823 
9824 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9825 				enum port port,
9826 				struct intel_crtc_state *pipe_config)
9827 {
9828 	enum intel_dpll_id id;
9829 
9830 	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9831 
9832 	switch (pipe_config->ddi_pll_sel) {
9833 	case PORT_CLK_SEL_WRPLL1:
9834 		id = DPLL_ID_WRPLL1;
9835 		break;
9836 	case PORT_CLK_SEL_WRPLL2:
9837 		id = DPLL_ID_WRPLL2;
9838 		break;
9839 	case PORT_CLK_SEL_SPLL:
9840 		id = DPLL_ID_SPLL;
9841 		break;
9842 	case PORT_CLK_SEL_LCPLL_810:
9843 		id = DPLL_ID_LCPLL_810;
9844 		break;
9845 	case PORT_CLK_SEL_LCPLL_1350:
9846 		id = DPLL_ID_LCPLL_1350;
9847 		break;
9848 	case PORT_CLK_SEL_LCPLL_2700:
9849 		id = DPLL_ID_LCPLL_2700;
9850 		break;
9851 	default:
9852 		MISSING_CASE(pipe_config->ddi_pll_sel);
9853 		/* fall through */
9854 	case PORT_CLK_SEL_NONE:
9855 		return;
9856 	}
9857 
9858 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9859 }
9860 
9861 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9862 				     struct intel_crtc_state *pipe_config,
9863 				     unsigned long *power_domain_mask)
9864 {
9865 	struct drm_device *dev = crtc->base.dev;
9866 	struct drm_i915_private *dev_priv = dev->dev_private;
9867 	enum intel_display_power_domain power_domain;
9868 	u32 tmp;
9869 
9870 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9871 
9872 	/*
9873 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9874 	 * consistency and less surprising code; it's in always on power).
9875 	 */
9876 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9877 	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9878 		enum i915_pipe trans_edp_pipe;
9879 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9880 		default:
9881 			WARN(1, "unknown pipe linked to edp transcoder\n");
9882 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9883 		case TRANS_DDI_EDP_INPUT_A_ON:
9884 			trans_edp_pipe = PIPE_A;
9885 			break;
9886 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9887 			trans_edp_pipe = PIPE_B;
9888 			break;
9889 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9890 			trans_edp_pipe = PIPE_C;
9891 			break;
9892 		}
9893 
9894 		if (trans_edp_pipe == crtc->pipe)
9895 			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9896 	}
9897 
9898 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9899 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9900 		return false;
9901 	*power_domain_mask |= BIT(power_domain);
9902 
9903 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9904 
9905 	return tmp & PIPECONF_ENABLE;
9906 }
9907 
9908 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9909 					 struct intel_crtc_state *pipe_config,
9910 					 unsigned long *power_domain_mask)
9911 {
9912 	struct drm_device *dev = crtc->base.dev;
9913 	struct drm_i915_private *dev_priv = dev->dev_private;
9914 	enum intel_display_power_domain power_domain;
9915 	enum port port;
9916 	enum transcoder cpu_transcoder;
9917 	u32 tmp;
9918 
9919 	pipe_config->has_dsi_encoder = false;
9920 
9921 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9922 		if (port == PORT_A)
9923 			cpu_transcoder = TRANSCODER_DSI_A;
9924 		else
9925 			cpu_transcoder = TRANSCODER_DSI_C;
9926 
9927 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9928 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9929 			continue;
9930 		*power_domain_mask |= BIT(power_domain);
9931 
9932 		/*
9933 		 * The PLL needs to be enabled with a valid divider
9934 		 * configuration, otherwise accessing DSI registers will hang
9935 		 * the machine. See BSpec North Display Engine
9936 		 * registers/MIPI[BXT]. We can break out here early, since we
9937 		 * need the same DSI PLL to be enabled for both DSI ports.
9938 		 */
9939 		if (!intel_dsi_pll_is_enabled(dev_priv))
9940 			break;
9941 
9942 		/* XXX: this works for video mode only */
9943 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9944 		if (!(tmp & DPI_ENABLE))
9945 			continue;
9946 
9947 		tmp = I915_READ(MIPI_CTRL(port));
9948 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9949 			continue;
9950 
9951 		pipe_config->cpu_transcoder = cpu_transcoder;
9952 		pipe_config->has_dsi_encoder = true;
9953 		break;
9954 	}
9955 
9956 	return pipe_config->has_dsi_encoder;
9957 }
9958 
9959 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9960 				       struct intel_crtc_state *pipe_config)
9961 {
9962 	struct drm_device *dev = crtc->base.dev;
9963 	struct drm_i915_private *dev_priv = dev->dev_private;
9964 	struct intel_shared_dpll *pll;
9965 	enum port port;
9966 	uint32_t tmp;
9967 
9968 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9969 
9970 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9971 
9972 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9973 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
9974 	else if (IS_BROXTON(dev))
9975 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
9976 	else
9977 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
9978 
9979 	pll = pipe_config->shared_dpll;
9980 	if (pll) {
9981 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9982 						 &pipe_config->dpll_hw_state));
9983 	}
9984 
9985 	/*
9986 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9987 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9988 	 * the PCH transcoder is on.
9989 	 */
9990 	if (INTEL_INFO(dev)->gen < 9 &&
9991 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9992 		pipe_config->has_pch_encoder = true;
9993 
9994 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9995 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9996 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9997 
9998 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9999 	}
10000 }
10001 
10002 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10003 				    struct intel_crtc_state *pipe_config)
10004 {
10005 	struct drm_device *dev = crtc->base.dev;
10006 	struct drm_i915_private *dev_priv = dev->dev_private;
10007 	enum intel_display_power_domain power_domain;
10008 	unsigned long power_domain_mask;
10009 	bool active;
10010 
10011 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10012 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10013 		return false;
10014 	power_domain_mask = BIT(power_domain);
10015 
10016 	pipe_config->shared_dpll = NULL;
10017 
10018 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10019 
10020 	if (IS_BROXTON(dev_priv)) {
10021 		bxt_get_dsi_transcoder_state(crtc, pipe_config,
10022 					     &power_domain_mask);
10023 		WARN_ON(active && pipe_config->has_dsi_encoder);
10024 		if (pipe_config->has_dsi_encoder)
10025 			active = true;
10026 	}
10027 
10028 	if (!active)
10029 		goto out;
10030 
10031 	if (!pipe_config->has_dsi_encoder) {
10032 		haswell_get_ddi_port_state(crtc, pipe_config);
10033 		intel_get_pipe_timings(crtc, pipe_config);
10034 	}
10035 
10036 	intel_get_pipe_src_size(crtc, pipe_config);
10037 
10038 	pipe_config->gamma_mode =
10039 		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10040 
10041 	if (INTEL_INFO(dev)->gen >= 9) {
10042 		skl_init_scalers(dev, crtc, pipe_config);
10043 	}
10044 
10045 	if (INTEL_INFO(dev)->gen >= 9) {
10046 		pipe_config->scaler_state.scaler_id = -1;
10047 		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10048 	}
10049 
10050 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10051 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10052 		power_domain_mask |= BIT(power_domain);
10053 		if (INTEL_INFO(dev)->gen >= 9)
10054 			skylake_get_pfit_config(crtc, pipe_config);
10055 		else
10056 			ironlake_get_pfit_config(crtc, pipe_config);
10057 	}
10058 
10059 	if (IS_HASWELL(dev))
10060 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10061 			(I915_READ(IPS_CTL) & IPS_ENABLE);
10062 
10063 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10064 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10065 		pipe_config->pixel_multiplier =
10066 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10067 	} else {
10068 		pipe_config->pixel_multiplier = 1;
10069 	}
10070 
10071 out:
10072 	for_each_power_domain(power_domain, power_domain_mask)
10073 		intel_display_power_put(dev_priv, power_domain);
10074 
10075 	return active;
10076 }
10077 
10078 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10079 			       const struct intel_plane_state *plane_state)
10080 {
10081 	struct drm_device *dev = crtc->dev;
10082 	struct drm_i915_private *dev_priv = dev->dev_private;
10083 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10084 	uint32_t cntl = 0, size = 0;
10085 
10086 	if (plane_state && plane_state->visible) {
10087 		unsigned int width = plane_state->base.crtc_w;
10088 		unsigned int height = plane_state->base.crtc_h;
10089 		unsigned int stride = roundup_pow_of_two(width) * 4;
10090 
10091 		switch (stride) {
10092 		default:
10093 			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10094 				  width, stride);
10095 			stride = 256;
10096 			/* fallthrough */
10097 		case 256:
10098 		case 512:
10099 		case 1024:
10100 		case 2048:
10101 			break;
10102 		}
10103 
10104 		cntl |= CURSOR_ENABLE |
10105 			CURSOR_GAMMA_ENABLE |
10106 			CURSOR_FORMAT_ARGB |
10107 			CURSOR_STRIDE(stride);
10108 
10109 		size = (height << 12) | width;
10110 	}
10111 
10112 	if (intel_crtc->cursor_cntl != 0 &&
10113 	    (intel_crtc->cursor_base != base ||
10114 	     intel_crtc->cursor_size != size ||
10115 	     intel_crtc->cursor_cntl != cntl)) {
10116 		/* On these chipsets we can only modify the base/size/stride
10117 		 * whilst the cursor is disabled.
10118 		 */
10119 		I915_WRITE(CURCNTR(PIPE_A), 0);
10120 		POSTING_READ(CURCNTR(PIPE_A));
10121 		intel_crtc->cursor_cntl = 0;
10122 	}
10123 
10124 	if (intel_crtc->cursor_base != base) {
10125 		I915_WRITE(CURBASE(PIPE_A), base);
10126 		intel_crtc->cursor_base = base;
10127 	}
10128 
10129 	if (intel_crtc->cursor_size != size) {
10130 		I915_WRITE(CURSIZE, size);
10131 		intel_crtc->cursor_size = size;
10132 	}
10133 
10134 	if (intel_crtc->cursor_cntl != cntl) {
10135 		I915_WRITE(CURCNTR(PIPE_A), cntl);
10136 		POSTING_READ(CURCNTR(PIPE_A));
10137 		intel_crtc->cursor_cntl = cntl;
10138 	}
10139 }
10140 
10141 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10142 			       const struct intel_plane_state *plane_state)
10143 {
10144 	struct drm_device *dev = crtc->dev;
10145 	struct drm_i915_private *dev_priv = dev->dev_private;
10146 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10147 	int pipe = intel_crtc->pipe;
10148 	uint32_t cntl = 0;
10149 
10150 	if (plane_state && plane_state->visible) {
10151 		cntl = MCURSOR_GAMMA_ENABLE;
10152 		switch (plane_state->base.crtc_w) {
10153 			case 64:
10154 				cntl |= CURSOR_MODE_64_ARGB_AX;
10155 				break;
10156 			case 128:
10157 				cntl |= CURSOR_MODE_128_ARGB_AX;
10158 				break;
10159 			case 256:
10160 				cntl |= CURSOR_MODE_256_ARGB_AX;
10161 				break;
10162 			default:
10163 				MISSING_CASE(plane_state->base.crtc_w);
10164 				return;
10165 		}
10166 		cntl |= pipe << 28; /* Connect to correct pipe */
10167 
10168 		if (HAS_DDI(dev))
10169 			cntl |= CURSOR_PIPE_CSC_ENABLE;
10170 
10171 		if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10172 			cntl |= CURSOR_ROTATE_180;
10173 	}
10174 
10175 	if (intel_crtc->cursor_cntl != cntl) {
10176 		I915_WRITE(CURCNTR(pipe), cntl);
10177 		POSTING_READ(CURCNTR(pipe));
10178 		intel_crtc->cursor_cntl = cntl;
10179 	}
10180 
10181 	/* and commit changes on next vblank */
10182 	I915_WRITE(CURBASE(pipe), base);
10183 	POSTING_READ(CURBASE(pipe));
10184 
10185 	intel_crtc->cursor_base = base;
10186 }
10187 
10188 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10189 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10190 				     const struct intel_plane_state *plane_state)
10191 {
10192 	struct drm_device *dev = crtc->dev;
10193 	struct drm_i915_private *dev_priv = dev->dev_private;
10194 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10195 	int pipe = intel_crtc->pipe;
10196 	u32 base = intel_crtc->cursor_addr;
10197 	u32 pos = 0;
10198 
10199 	if (plane_state) {
10200 		int x = plane_state->base.crtc_x;
10201 		int y = plane_state->base.crtc_y;
10202 
10203 		if (x < 0) {
10204 			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10205 			x = -x;
10206 		}
10207 		pos |= x << CURSOR_X_SHIFT;
10208 
10209 		if (y < 0) {
10210 			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10211 			y = -y;
10212 		}
10213 		pos |= y << CURSOR_Y_SHIFT;
10214 
10215 		/* ILK+ do this automagically */
10216 		if (HAS_GMCH_DISPLAY(dev) &&
10217 		    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10218 			base += (plane_state->base.crtc_h *
10219 				 plane_state->base.crtc_w - 1) * 4;
10220 		}
10221 	}
10222 
10223 	I915_WRITE(CURPOS(pipe), pos);
10224 
10225 	if (IS_845G(dev) || IS_I865G(dev))
10226 		i845_update_cursor(crtc, base, plane_state);
10227 	else
10228 		i9xx_update_cursor(crtc, base, plane_state);
10229 }
10230 
10231 static bool cursor_size_ok(struct drm_device *dev,
10232 			   uint32_t width, uint32_t height)
10233 {
10234 	if (width == 0 || height == 0)
10235 		return false;
10236 
10237 	/*
10238 	 * 845g/865g are special in that they are only limited by
10239 	 * the width of their cursors, the height is arbitrary up to
10240 	 * the precision of the register. Everything else requires
10241 	 * square cursors, limited to a few power-of-two sizes.
10242 	 */
10243 	if (IS_845G(dev) || IS_I865G(dev)) {
10244 		if ((width & 63) != 0)
10245 			return false;
10246 
10247 		if (width > (IS_845G(dev) ? 64 : 512))
10248 			return false;
10249 
10250 		if (height > 1023)
10251 			return false;
10252 	} else {
10253 		switch (width | height) {
10254 		case 256:
10255 		case 128:
10256 			if (IS_GEN2(dev))
10257 				return false;
10258 		case 64:
10259 			break;
10260 		default:
10261 			return false;
10262 		}
10263 	}
10264 
10265 	return true;
10266 }
10267 
10268 /* VESA 640x480x72Hz mode to set on the pipe */
10269 static struct drm_display_mode load_detect_mode = {
10270 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10271 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10272 };
10273 
10274 struct drm_framebuffer *
10275 __intel_framebuffer_create(struct drm_device *dev,
10276 			   struct drm_mode_fb_cmd2 *mode_cmd,
10277 			   struct drm_i915_gem_object *obj)
10278 {
10279 	struct intel_framebuffer *intel_fb;
10280 	int ret;
10281 
10282 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10283 	if (!intel_fb)
10284 		return ERR_PTR(-ENOMEM);
10285 
10286 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10287 	if (ret)
10288 		goto err;
10289 
10290 	return &intel_fb->base;
10291 
10292 err:
10293 	kfree(intel_fb);
10294 	return ERR_PTR(ret);
10295 }
10296 
10297 static struct drm_framebuffer *
10298 intel_framebuffer_create(struct drm_device *dev,
10299 			 struct drm_mode_fb_cmd2 *mode_cmd,
10300 			 struct drm_i915_gem_object *obj)
10301 {
10302 	struct drm_framebuffer *fb;
10303 	int ret;
10304 
10305 	ret = i915_mutex_lock_interruptible(dev);
10306 	if (ret)
10307 		return ERR_PTR(ret);
10308 	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10309 	mutex_unlock(&dev->struct_mutex);
10310 
10311 	return fb;
10312 }
10313 
10314 static u32
10315 intel_framebuffer_pitch_for_width(int width, int bpp)
10316 {
10317 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10318 	return ALIGN(pitch, 64);
10319 }
10320 
10321 static u32
10322 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10323 {
10324 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10325 	return PAGE_ALIGN(pitch * mode->vdisplay);
10326 }
10327 
10328 static struct drm_framebuffer *
10329 intel_framebuffer_create_for_mode(struct drm_device *dev,
10330 				  struct drm_display_mode *mode,
10331 				  int depth, int bpp)
10332 {
10333 	struct drm_framebuffer *fb;
10334 	struct drm_i915_gem_object *obj;
10335 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10336 
10337 	obj = i915_gem_alloc_object(dev,
10338 				    intel_framebuffer_size_for_mode(mode, bpp));
10339 	if (obj == NULL)
10340 		return ERR_PTR(-ENOMEM);
10341 
10342 	mode_cmd.width = mode->hdisplay;
10343 	mode_cmd.height = mode->vdisplay;
10344 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10345 								bpp);
10346 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10347 
10348 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10349 	if (IS_ERR(fb))
10350 		drm_gem_object_unreference_unlocked(&obj->base);
10351 
10352 	return fb;
10353 }
10354 
10355 static struct drm_framebuffer *
10356 mode_fits_in_fbdev(struct drm_device *dev,
10357 		   struct drm_display_mode *mode)
10358 {
10359 #ifdef CONFIG_DRM_FBDEV_EMULATION
10360 	struct drm_i915_private *dev_priv = dev->dev_private;
10361 	struct drm_i915_gem_object *obj;
10362 	struct drm_framebuffer *fb;
10363 
10364 	if (!dev_priv->fbdev)
10365 		return NULL;
10366 
10367 	if (!dev_priv->fbdev->fb)
10368 		return NULL;
10369 
10370 	obj = dev_priv->fbdev->fb->obj;
10371 	BUG_ON(!obj);
10372 
10373 	fb = &dev_priv->fbdev->fb->base;
10374 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10375 							       fb->bits_per_pixel))
10376 		return NULL;
10377 
10378 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10379 		return NULL;
10380 
10381 	drm_framebuffer_reference(fb);
10382 	return fb;
10383 #else
10384 	return NULL;
10385 #endif
10386 }
10387 
10388 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10389 					   struct drm_crtc *crtc,
10390 					   struct drm_display_mode *mode,
10391 					   struct drm_framebuffer *fb,
10392 					   int x, int y)
10393 {
10394 	struct drm_plane_state *plane_state;
10395 	int hdisplay, vdisplay;
10396 	int ret;
10397 
10398 	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10399 	if (IS_ERR(plane_state))
10400 		return PTR_ERR(plane_state);
10401 
10402 	if (mode)
10403 		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10404 	else
10405 		hdisplay = vdisplay = 0;
10406 
10407 	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10408 	if (ret)
10409 		return ret;
10410 	drm_atomic_set_fb_for_plane(plane_state, fb);
10411 	plane_state->crtc_x = 0;
10412 	plane_state->crtc_y = 0;
10413 	plane_state->crtc_w = hdisplay;
10414 	plane_state->crtc_h = vdisplay;
10415 	plane_state->src_x = x << 16;
10416 	plane_state->src_y = y << 16;
10417 	plane_state->src_w = hdisplay << 16;
10418 	plane_state->src_h = vdisplay << 16;
10419 
10420 	return 0;
10421 }
10422 
10423 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10424 				struct drm_display_mode *mode,
10425 				struct intel_load_detect_pipe *old,
10426 				struct drm_modeset_acquire_ctx *ctx)
10427 {
10428 	struct intel_crtc *intel_crtc;
10429 	struct intel_encoder *intel_encoder =
10430 		intel_attached_encoder(connector);
10431 	struct drm_crtc *possible_crtc;
10432 	struct drm_encoder *encoder = &intel_encoder->base;
10433 	struct drm_crtc *crtc = NULL;
10434 	struct drm_device *dev = encoder->dev;
10435 	struct drm_framebuffer *fb;
10436 	struct drm_mode_config *config = &dev->mode_config;
10437 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
10438 	struct drm_connector_state *connector_state;
10439 	struct intel_crtc_state *crtc_state;
10440 	int ret, i = -1;
10441 
10442 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10443 		      connector->base.id, connector->name,
10444 		      encoder->base.id, encoder->name);
10445 
10446 	old->restore_state = NULL;
10447 
10448 retry:
10449 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10450 	if (ret)
10451 		goto fail;
10452 
10453 	/*
10454 	 * Algorithm gets a little messy:
10455 	 *
10456 	 *   - if the connector already has an assigned crtc, use it (but make
10457 	 *     sure it's on first)
10458 	 *
10459 	 *   - try to find the first unused crtc that can drive this connector,
10460 	 *     and use that if we find one
10461 	 */
10462 
10463 	/* See if we already have a CRTC for this connector */
10464 	if (connector->state->crtc) {
10465 		crtc = connector->state->crtc;
10466 
10467 		ret = drm_modeset_lock(&crtc->mutex, ctx);
10468 		if (ret)
10469 			goto fail;
10470 
10471 		/* Make sure the crtc and connector are running */
10472 		goto found;
10473 	}
10474 
10475 	/* Find an unused one (if possible) */
10476 	for_each_crtc(dev, possible_crtc) {
10477 		i++;
10478 		if (!(encoder->possible_crtcs & (1 << i)))
10479 			continue;
10480 
10481 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10482 		if (ret)
10483 			goto fail;
10484 
10485 		if (possible_crtc->state->enable) {
10486 			drm_modeset_unlock(&possible_crtc->mutex);
10487 			continue;
10488 		}
10489 
10490 		crtc = possible_crtc;
10491 		break;
10492 	}
10493 
10494 	/*
10495 	 * If we didn't find an unused CRTC, don't use any.
10496 	 */
10497 	if (!crtc) {
10498 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10499 		goto fail;
10500 	}
10501 
10502 found:
10503 	intel_crtc = to_intel_crtc(crtc);
10504 
10505 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10506 	if (ret)
10507 		goto fail;
10508 
10509 	state = drm_atomic_state_alloc(dev);
10510 	restore_state = drm_atomic_state_alloc(dev);
10511 	if (!state || !restore_state) {
10512 		ret = -ENOMEM;
10513 		goto fail;
10514 	}
10515 
10516 	state->acquire_ctx = ctx;
10517 	restore_state->acquire_ctx = ctx;
10518 
10519 	connector_state = drm_atomic_get_connector_state(state, connector);
10520 	if (IS_ERR(connector_state)) {
10521 		ret = PTR_ERR(connector_state);
10522 		goto fail;
10523 	}
10524 
10525 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10526 	if (ret)
10527 		goto fail;
10528 
10529 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10530 	if (IS_ERR(crtc_state)) {
10531 		ret = PTR_ERR(crtc_state);
10532 		goto fail;
10533 	}
10534 
10535 	crtc_state->base.active = crtc_state->base.enable = true;
10536 
10537 	if (!mode)
10538 		mode = &load_detect_mode;
10539 
10540 	/* We need a framebuffer large enough to accommodate all accesses
10541 	 * that the plane may generate whilst we perform load detection.
10542 	 * We can not rely on the fbcon either being present (we get called
10543 	 * during its initialisation to detect all boot displays, or it may
10544 	 * not even exist) or that it is large enough to satisfy the
10545 	 * requested mode.
10546 	 */
10547 	fb = mode_fits_in_fbdev(dev, mode);
10548 	if (fb == NULL) {
10549 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10550 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10551 	} else
10552 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10553 	if (IS_ERR(fb)) {
10554 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10555 		goto fail;
10556 	}
10557 
10558 	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10559 	if (ret)
10560 		goto fail;
10561 
10562 	drm_framebuffer_unreference(fb);
10563 
10564 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10565 	if (ret)
10566 		goto fail;
10567 
10568 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10569 	if (!ret)
10570 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10571 	if (!ret)
10572 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10573 	if (ret) {
10574 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10575 		goto fail;
10576 	}
10577 
10578 	ret = drm_atomic_commit(state);
10579 	if (ret) {
10580 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10581 		goto fail;
10582 	}
10583 
10584 	old->restore_state = restore_state;
10585 
10586 	/* let the connector get through one full cycle before testing */
10587 	intel_wait_for_vblank(dev, intel_crtc->pipe);
10588 	return true;
10589 
10590 fail:
10591 	drm_atomic_state_free(state);
10592 	drm_atomic_state_free(restore_state);
10593 	restore_state = state = NULL;
10594 
10595 	if (ret == -EDEADLK) {
10596 		drm_modeset_backoff(ctx);
10597 		goto retry;
10598 	}
10599 
10600 	return false;
10601 }
10602 
10603 void intel_release_load_detect_pipe(struct drm_connector *connector,
10604 				    struct intel_load_detect_pipe *old,
10605 				    struct drm_modeset_acquire_ctx *ctx)
10606 {
10607 	struct intel_encoder *intel_encoder =
10608 		intel_attached_encoder(connector);
10609 	struct drm_encoder *encoder = &intel_encoder->base;
10610 	struct drm_atomic_state *state = old->restore_state;
10611 	int ret;
10612 
10613 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10614 		      connector->base.id, connector->name,
10615 		      encoder->base.id, encoder->name);
10616 
10617 	if (!state)
10618 		return;
10619 
10620 	ret = drm_atomic_commit(state);
10621 	if (ret) {
10622 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10623 		drm_atomic_state_free(state);
10624 	}
10625 }
10626 
10627 static int i9xx_pll_refclk(struct drm_device *dev,
10628 			   const struct intel_crtc_state *pipe_config)
10629 {
10630 	struct drm_i915_private *dev_priv = dev->dev_private;
10631 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10632 
10633 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10634 		return dev_priv->vbt.lvds_ssc_freq;
10635 	else if (HAS_PCH_SPLIT(dev))
10636 		return 120000;
10637 	else if (!IS_GEN2(dev))
10638 		return 96000;
10639 	else
10640 		return 48000;
10641 }
10642 
10643 /* Returns the clock of the currently programmed mode of the given pipe. */
10644 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10645 				struct intel_crtc_state *pipe_config)
10646 {
10647 	struct drm_device *dev = crtc->base.dev;
10648 	struct drm_i915_private *dev_priv = dev->dev_private;
10649 	int pipe = pipe_config->cpu_transcoder;
10650 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10651 	u32 fp;
10652 	intel_clock_t clock;
10653 	int port_clock;
10654 	int refclk = i9xx_pll_refclk(dev, pipe_config);
10655 
10656 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10657 		fp = pipe_config->dpll_hw_state.fp0;
10658 	else
10659 		fp = pipe_config->dpll_hw_state.fp1;
10660 
10661 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10662 	if (IS_PINEVIEW(dev)) {
10663 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10664 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10665 	} else {
10666 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10667 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10668 	}
10669 
10670 	if (!IS_GEN2(dev)) {
10671 		if (IS_PINEVIEW(dev))
10672 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10673 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10674 		else
10675 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10676 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10677 
10678 		switch (dpll & DPLL_MODE_MASK) {
10679 		case DPLLB_MODE_DAC_SERIAL:
10680 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10681 				5 : 10;
10682 			break;
10683 		case DPLLB_MODE_LVDS:
10684 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10685 				7 : 14;
10686 			break;
10687 		default:
10688 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10689 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10690 			return;
10691 		}
10692 
10693 		if (IS_PINEVIEW(dev))
10694 			port_clock = pnv_calc_dpll_params(refclk, &clock);
10695 		else
10696 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10697 	} else {
10698 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10699 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10700 
10701 		if (is_lvds) {
10702 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10703 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10704 
10705 			if (lvds & LVDS_CLKB_POWER_UP)
10706 				clock.p2 = 7;
10707 			else
10708 				clock.p2 = 14;
10709 		} else {
10710 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10711 				clock.p1 = 2;
10712 			else {
10713 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10714 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10715 			}
10716 			if (dpll & PLL_P2_DIVIDE_BY_4)
10717 				clock.p2 = 4;
10718 			else
10719 				clock.p2 = 2;
10720 		}
10721 
10722 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10723 	}
10724 
10725 	/*
10726 	 * This value includes pixel_multiplier. We will use
10727 	 * port_clock to compute adjusted_mode.crtc_clock in the
10728 	 * encoder's get_config() function.
10729 	 */
10730 	pipe_config->port_clock = port_clock;
10731 }
10732 
10733 int intel_dotclock_calculate(int link_freq,
10734 			     const struct intel_link_m_n *m_n)
10735 {
10736 	/*
10737 	 * The calculation for the data clock is:
10738 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10739 	 * But we want to avoid losing precison if possible, so:
10740 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10741 	 *
10742 	 * and the link clock is simpler:
10743 	 * link_clock = (m * link_clock) / n
10744 	 */
10745 
10746 	if (!m_n->link_n)
10747 		return 0;
10748 
10749 	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10750 }
10751 
10752 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10753 				   struct intel_crtc_state *pipe_config)
10754 {
10755 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10756 
10757 	/* read out port_clock from the DPLL */
10758 	i9xx_crtc_clock_get(crtc, pipe_config);
10759 
10760 	/*
10761 	 * In case there is an active pipe without active ports,
10762 	 * we may need some idea for the dotclock anyway.
10763 	 * Calculate one based on the FDI configuration.
10764 	 */
10765 	pipe_config->base.adjusted_mode.crtc_clock =
10766 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10767 					 &pipe_config->fdi_m_n);
10768 }
10769 
10770 /** Returns the currently programmed mode of the given pipe. */
10771 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10772 					     struct drm_crtc *crtc)
10773 {
10774 	struct drm_i915_private *dev_priv = dev->dev_private;
10775 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10776 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10777 	struct drm_display_mode *mode;
10778 	struct intel_crtc_state *pipe_config;
10779 	int htot = I915_READ(HTOTAL(cpu_transcoder));
10780 	int hsync = I915_READ(HSYNC(cpu_transcoder));
10781 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10782 	int vsync = I915_READ(VSYNC(cpu_transcoder));
10783 	enum i915_pipe pipe = intel_crtc->pipe;
10784 
10785 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10786 	if (!mode)
10787 		return NULL;
10788 
10789 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10790 	if (!pipe_config) {
10791 		kfree(mode);
10792 		return NULL;
10793 	}
10794 
10795 	/*
10796 	 * Construct a pipe_config sufficient for getting the clock info
10797 	 * back out of crtc_clock_get.
10798 	 *
10799 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10800 	 * to use a real value here instead.
10801 	 */
10802 	pipe_config->cpu_transcoder = (enum transcoder) pipe;
10803 	pipe_config->pixel_multiplier = 1;
10804 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10805 	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10806 	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10807 	i9xx_crtc_clock_get(intel_crtc, pipe_config);
10808 
10809 	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10810 	mode->hdisplay = (htot & 0xffff) + 1;
10811 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10812 	mode->hsync_start = (hsync & 0xffff) + 1;
10813 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10814 	mode->vdisplay = (vtot & 0xffff) + 1;
10815 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10816 	mode->vsync_start = (vsync & 0xffff) + 1;
10817 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10818 
10819 	drm_mode_set_name(mode);
10820 
10821 	kfree(pipe_config);
10822 
10823 	return mode;
10824 }
10825 
10826 void intel_mark_busy(struct drm_device *dev)
10827 {
10828 	struct drm_i915_private *dev_priv = dev->dev_private;
10829 
10830 	if (dev_priv->mm.busy)
10831 		return;
10832 
10833 	intel_runtime_pm_get(dev_priv);
10834 	i915_update_gfx_val(dev_priv);
10835 	if (INTEL_INFO(dev)->gen >= 6)
10836 		gen6_rps_busy(dev_priv);
10837 	dev_priv->mm.busy = true;
10838 }
10839 
10840 void intel_mark_idle(struct drm_device *dev)
10841 {
10842 	struct drm_i915_private *dev_priv = dev->dev_private;
10843 
10844 	if (!dev_priv->mm.busy)
10845 		return;
10846 
10847 	dev_priv->mm.busy = false;
10848 
10849 	if (INTEL_INFO(dev)->gen >= 6)
10850 		gen6_rps_idle(dev->dev_private);
10851 
10852 	intel_runtime_pm_put(dev_priv);
10853 }
10854 
10855 static void intel_crtc_destroy(struct drm_crtc *crtc)
10856 {
10857 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10858 	struct drm_device *dev = crtc->dev;
10859 	struct intel_unpin_work *work;
10860 
10861 	spin_lock_irq(&dev->event_lock);
10862 	work = intel_crtc->unpin_work;
10863 	intel_crtc->unpin_work = NULL;
10864 	spin_unlock_irq(&dev->event_lock);
10865 
10866 	if (work) {
10867 		cancel_work_sync(&work->work);
10868 		kfree(work);
10869 	}
10870 
10871 	drm_crtc_cleanup(crtc);
10872 
10873 	kfree(intel_crtc);
10874 }
10875 
10876 static void intel_unpin_work_fn(struct work_struct *__work)
10877 {
10878 	struct intel_unpin_work *work =
10879 		container_of(__work, struct intel_unpin_work, work);
10880 	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10881 	struct drm_device *dev = crtc->base.dev;
10882 	struct drm_plane *primary = crtc->base.primary;
10883 
10884 	mutex_lock(&dev->struct_mutex);
10885 	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10886 	drm_gem_object_unreference(&work->pending_flip_obj->base);
10887 
10888 	if (work->flip_queued_req)
10889 		i915_gem_request_assign(&work->flip_queued_req, NULL);
10890 	mutex_unlock(&dev->struct_mutex);
10891 
10892 	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10893 	intel_fbc_post_update(crtc);
10894 	drm_framebuffer_unreference(work->old_fb);
10895 
10896 	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10897 	atomic_dec(&crtc->unpin_work_count);
10898 
10899 	kfree(work);
10900 }
10901 
10902 static void do_intel_finish_page_flip(struct drm_device *dev,
10903 				      struct drm_crtc *crtc)
10904 {
10905 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10906 	struct intel_unpin_work *work;
10907 	unsigned long flags;
10908 
10909 	/* Ignore early vblank irqs */
10910 	if (intel_crtc == NULL)
10911 		return;
10912 
10913 	/*
10914 	 * This is called both by irq handlers and the reset code (to complete
10915 	 * lost pageflips) so needs the full irqsave spinlocks.
10916 	 */
10917 	spin_lock_irqsave(&dev->event_lock, flags);
10918 	work = intel_crtc->unpin_work;
10919 
10920 	/* Ensure we don't miss a work->pending update ... */
10921 	smp_rmb();
10922 
10923 	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10924 		spin_unlock_irqrestore(&dev->event_lock, flags);
10925 		return;
10926 	}
10927 
10928 	page_flip_completed(intel_crtc);
10929 
10930 	spin_unlock_irqrestore(&dev->event_lock, flags);
10931 }
10932 
10933 void intel_finish_page_flip(struct drm_device *dev, int pipe)
10934 {
10935 	struct drm_i915_private *dev_priv = dev->dev_private;
10936 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10937 
10938 	do_intel_finish_page_flip(dev, crtc);
10939 }
10940 
10941 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10942 {
10943 	struct drm_i915_private *dev_priv = dev->dev_private;
10944 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10945 
10946 	do_intel_finish_page_flip(dev, crtc);
10947 }
10948 
10949 /* Is 'a' after or equal to 'b'? */
10950 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10951 {
10952 	return !((a - b) & 0x80000000);
10953 }
10954 
10955 static bool page_flip_finished(struct intel_crtc *crtc)
10956 {
10957 	struct drm_device *dev = crtc->base.dev;
10958 	struct drm_i915_private *dev_priv = dev->dev_private;
10959 	unsigned reset_counter;
10960 
10961 	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
10962 	if (crtc->reset_counter != reset_counter)
10963 		return true;
10964 
10965 	/*
10966 	 * The relevant registers doen't exist on pre-ctg.
10967 	 * As the flip done interrupt doesn't trigger for mmio
10968 	 * flips on gmch platforms, a flip count check isn't
10969 	 * really needed there. But since ctg has the registers,
10970 	 * include it in the check anyway.
10971 	 */
10972 	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10973 		return true;
10974 
10975 	/*
10976 	 * BDW signals flip done immediately if the plane
10977 	 * is disabled, even if the plane enable is already
10978 	 * armed to occur at the next vblank :(
10979 	 */
10980 
10981 	/*
10982 	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10983 	 * used the same base address. In that case the mmio flip might
10984 	 * have completed, but the CS hasn't even executed the flip yet.
10985 	 *
10986 	 * A flip count check isn't enough as the CS might have updated
10987 	 * the base address just after start of vblank, but before we
10988 	 * managed to process the interrupt. This means we'd complete the
10989 	 * CS flip too soon.
10990 	 *
10991 	 * Combining both checks should get us a good enough result. It may
10992 	 * still happen that the CS flip has been executed, but has not
10993 	 * yet actually completed. But in case the base address is the same
10994 	 * anyway, we don't really care.
10995 	 */
10996 	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10997 		crtc->unpin_work->gtt_offset &&
10998 		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
10999 				    crtc->unpin_work->flip_count);
11000 }
11001 
11002 void intel_prepare_page_flip(struct drm_device *dev, int plane)
11003 {
11004 	struct drm_i915_private *dev_priv = dev->dev_private;
11005 	struct intel_crtc *intel_crtc =
11006 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
11007 	unsigned long flags;
11008 
11009 
11010 	/*
11011 	 * This is called both by irq handlers and the reset code (to complete
11012 	 * lost pageflips) so needs the full irqsave spinlocks.
11013 	 *
11014 	 * NB: An MMIO update of the plane base pointer will also
11015 	 * generate a page-flip completion irq, i.e. every modeset
11016 	 * is also accompanied by a spurious intel_prepare_page_flip().
11017 	 */
11018 	spin_lock_irqsave(&dev->event_lock, flags);
11019 	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
11020 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
11021 	spin_unlock_irqrestore(&dev->event_lock, flags);
11022 }
11023 
11024 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
11025 {
11026 	/* Ensure that the work item is consistent when activating it ... */
11027 	smp_wmb();
11028 	atomic_set(&work->pending, INTEL_FLIP_PENDING);
11029 	/* and that it is marked active as soon as the irq could fire. */
11030 	smp_wmb();
11031 }
11032 
11033 static int intel_gen2_queue_flip(struct drm_device *dev,
11034 				 struct drm_crtc *crtc,
11035 				 struct drm_framebuffer *fb,
11036 				 struct drm_i915_gem_object *obj,
11037 				 struct drm_i915_gem_request *req,
11038 				 uint32_t flags)
11039 {
11040 	struct intel_engine_cs *engine = req->engine;
11041 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11042 	u32 flip_mask;
11043 	int ret;
11044 
11045 	ret = intel_ring_begin(req, 6);
11046 	if (ret)
11047 		return ret;
11048 
11049 	/* Can't queue multiple flips, so wait for the previous
11050 	 * one to finish before executing the next.
11051 	 */
11052 	if (intel_crtc->plane)
11053 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11054 	else
11055 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11056 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11057 	intel_ring_emit(engine, MI_NOOP);
11058 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11059 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11060 	intel_ring_emit(engine, fb->pitches[0]);
11061 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11062 	intel_ring_emit(engine, 0); /* aux display base address, unused */
11063 
11064 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11065 	return 0;
11066 }
11067 
11068 static int intel_gen3_queue_flip(struct drm_device *dev,
11069 				 struct drm_crtc *crtc,
11070 				 struct drm_framebuffer *fb,
11071 				 struct drm_i915_gem_object *obj,
11072 				 struct drm_i915_gem_request *req,
11073 				 uint32_t flags)
11074 {
11075 	struct intel_engine_cs *engine = req->engine;
11076 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11077 	u32 flip_mask;
11078 	int ret;
11079 
11080 	ret = intel_ring_begin(req, 6);
11081 	if (ret)
11082 		return ret;
11083 
11084 	if (intel_crtc->plane)
11085 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11086 	else
11087 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11088 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11089 	intel_ring_emit(engine, MI_NOOP);
11090 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11091 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11092 	intel_ring_emit(engine, fb->pitches[0]);
11093 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11094 	intel_ring_emit(engine, MI_NOOP);
11095 
11096 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11097 	return 0;
11098 }
11099 
11100 static int intel_gen4_queue_flip(struct drm_device *dev,
11101 				 struct drm_crtc *crtc,
11102 				 struct drm_framebuffer *fb,
11103 				 struct drm_i915_gem_object *obj,
11104 				 struct drm_i915_gem_request *req,
11105 				 uint32_t flags)
11106 {
11107 	struct intel_engine_cs *engine = req->engine;
11108 	struct drm_i915_private *dev_priv = dev->dev_private;
11109 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11110 	uint32_t pf, pipesrc;
11111 	int ret;
11112 
11113 	ret = intel_ring_begin(req, 4);
11114 	if (ret)
11115 		return ret;
11116 
11117 	/* i965+ uses the linear or tiled offsets from the
11118 	 * Display Registers (which do not change across a page-flip)
11119 	 * so we need only reprogram the base address.
11120 	 */
11121 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11122 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11123 	intel_ring_emit(engine, fb->pitches[0]);
11124 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
11125 			obj->tiling_mode);
11126 
11127 	/* XXX Enabling the panel-fitter across page-flip is so far
11128 	 * untested on non-native modes, so ignore it for now.
11129 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11130 	 */
11131 	pf = 0;
11132 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11133 	intel_ring_emit(engine, pf | pipesrc);
11134 
11135 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11136 	return 0;
11137 }
11138 
11139 static int intel_gen6_queue_flip(struct drm_device *dev,
11140 				 struct drm_crtc *crtc,
11141 				 struct drm_framebuffer *fb,
11142 				 struct drm_i915_gem_object *obj,
11143 				 struct drm_i915_gem_request *req,
11144 				 uint32_t flags)
11145 {
11146 	struct intel_engine_cs *engine = req->engine;
11147 	struct drm_i915_private *dev_priv = dev->dev_private;
11148 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11149 	uint32_t pf, pipesrc;
11150 	int ret;
11151 
11152 	ret = intel_ring_begin(req, 4);
11153 	if (ret)
11154 		return ret;
11155 
11156 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11157 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11158 	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11159 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11160 
11161 	/* Contrary to the suggestions in the documentation,
11162 	 * "Enable Panel Fitter" does not seem to be required when page
11163 	 * flipping with a non-native mode, and worse causes a normal
11164 	 * modeset to fail.
11165 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11166 	 */
11167 	pf = 0;
11168 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11169 	intel_ring_emit(engine, pf | pipesrc);
11170 
11171 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11172 	return 0;
11173 }
11174 
11175 static int intel_gen7_queue_flip(struct drm_device *dev,
11176 				 struct drm_crtc *crtc,
11177 				 struct drm_framebuffer *fb,
11178 				 struct drm_i915_gem_object *obj,
11179 				 struct drm_i915_gem_request *req,
11180 				 uint32_t flags)
11181 {
11182 	struct intel_engine_cs *engine = req->engine;
11183 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11184 	uint32_t plane_bit = 0;
11185 	int len, ret;
11186 
11187 	switch (intel_crtc->plane) {
11188 	case PLANE_A:
11189 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11190 		break;
11191 	case PLANE_B:
11192 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11193 		break;
11194 	case PLANE_C:
11195 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11196 		break;
11197 	default:
11198 		WARN_ONCE(1, "unknown plane in flip command\n");
11199 		return -ENODEV;
11200 	}
11201 
11202 	len = 4;
11203 	if (engine->id == RCS) {
11204 		len += 6;
11205 		/*
11206 		 * On Gen 8, SRM is now taking an extra dword to accommodate
11207 		 * 48bits addresses, and we need a NOOP for the batch size to
11208 		 * stay even.
11209 		 */
11210 		if (IS_GEN8(dev))
11211 			len += 2;
11212 	}
11213 
11214 	/*
11215 	 * BSpec MI_DISPLAY_FLIP for IVB:
11216 	 * "The full packet must be contained within the same cache line."
11217 	 *
11218 	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11219 	 * cacheline, if we ever start emitting more commands before
11220 	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11221 	 * then do the cacheline alignment, and finally emit the
11222 	 * MI_DISPLAY_FLIP.
11223 	 */
11224 	ret = intel_ring_cacheline_align(req);
11225 	if (ret)
11226 		return ret;
11227 
11228 	ret = intel_ring_begin(req, len);
11229 	if (ret)
11230 		return ret;
11231 
11232 	/* Unmask the flip-done completion message. Note that the bspec says that
11233 	 * we should do this for both the BCS and RCS, and that we must not unmask
11234 	 * more than one flip event at any time (or ensure that one flip message
11235 	 * can be sent by waiting for flip-done prior to queueing new flips).
11236 	 * Experimentation says that BCS works despite DERRMR masking all
11237 	 * flip-done completion events and that unmasking all planes at once
11238 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11239 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11240 	 */
11241 	if (engine->id == RCS) {
11242 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11243 		intel_ring_emit_reg(engine, DERRMR);
11244 		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11245 					  DERRMR_PIPEB_PRI_FLIP_DONE |
11246 					  DERRMR_PIPEC_PRI_FLIP_DONE));
11247 		if (IS_GEN8(dev))
11248 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11249 					      MI_SRM_LRM_GLOBAL_GTT);
11250 		else
11251 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11252 					      MI_SRM_LRM_GLOBAL_GTT);
11253 		intel_ring_emit_reg(engine, DERRMR);
11254 		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11255 		if (IS_GEN8(dev)) {
11256 			intel_ring_emit(engine, 0);
11257 			intel_ring_emit(engine, MI_NOOP);
11258 		}
11259 	}
11260 
11261 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11262 	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11263 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11264 	intel_ring_emit(engine, (MI_NOOP));
11265 
11266 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11267 	return 0;
11268 }
11269 
11270 static bool use_mmio_flip(struct intel_engine_cs *engine,
11271 			  struct drm_i915_gem_object *obj)
11272 {
11273 	/*
11274 	 * This is not being used for older platforms, because
11275 	 * non-availability of flip done interrupt forces us to use
11276 	 * CS flips. Older platforms derive flip done using some clever
11277 	 * tricks involving the flip_pending status bits and vblank irqs.
11278 	 * So using MMIO flips there would disrupt this mechanism.
11279 	 */
11280 
11281 	if (engine == NULL)
11282 		return true;
11283 
11284 	if (INTEL_INFO(engine->dev)->gen < 5)
11285 		return false;
11286 
11287 	if (i915.use_mmio_flip < 0)
11288 		return false;
11289 	else if (i915.use_mmio_flip > 0)
11290 		return true;
11291 	else if (i915.enable_execlists)
11292 		return true;
11293 #if 0
11294 	else if (obj->base.dma_buf &&
11295 		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11296 						       false))
11297 		return true;
11298 #endif
11299 	else
11300 		return engine != i915_gem_request_get_engine(obj->last_write_req);
11301 }
11302 
11303 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11304 			     unsigned int rotation,
11305 			     struct intel_unpin_work *work)
11306 {
11307 	struct drm_device *dev = intel_crtc->base.dev;
11308 	struct drm_i915_private *dev_priv = dev->dev_private;
11309 	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11310 	const enum i915_pipe pipe = intel_crtc->pipe;
11311 	u32 ctl, stride, tile_height;
11312 
11313 	ctl = I915_READ(PLANE_CTL(pipe, 0));
11314 	ctl &= ~PLANE_CTL_TILED_MASK;
11315 	switch (fb->modifier[0]) {
11316 	case DRM_FORMAT_MOD_NONE:
11317 		break;
11318 	case I915_FORMAT_MOD_X_TILED:
11319 		ctl |= PLANE_CTL_TILED_X;
11320 		break;
11321 	case I915_FORMAT_MOD_Y_TILED:
11322 		ctl |= PLANE_CTL_TILED_Y;
11323 		break;
11324 	case I915_FORMAT_MOD_Yf_TILED:
11325 		ctl |= PLANE_CTL_TILED_YF;
11326 		break;
11327 	default:
11328 		MISSING_CASE(fb->modifier[0]);
11329 	}
11330 
11331 	/*
11332 	 * The stride is either expressed as a multiple of 64 bytes chunks for
11333 	 * linear buffers or in number of tiles for tiled buffers.
11334 	 */
11335 	if (intel_rotation_90_or_270(rotation)) {
11336 		/* stride = Surface height in tiles */
11337 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11338 		stride = DIV_ROUND_UP(fb->height, tile_height);
11339 	} else {
11340 		stride = fb->pitches[0] /
11341 			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11342 						  fb->pixel_format);
11343 	}
11344 
11345 	/*
11346 	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11347 	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11348 	 */
11349 	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11350 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11351 
11352 	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11353 	POSTING_READ(PLANE_SURF(pipe, 0));
11354 }
11355 
11356 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11357 			     struct intel_unpin_work *work)
11358 {
11359 	struct drm_device *dev = intel_crtc->base.dev;
11360 	struct drm_i915_private *dev_priv = dev->dev_private;
11361 	struct intel_framebuffer *intel_fb =
11362 		to_intel_framebuffer(intel_crtc->base.primary->fb);
11363 	struct drm_i915_gem_object *obj = intel_fb->obj;
11364 	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11365 	u32 dspcntr;
11366 
11367 	dspcntr = I915_READ(reg);
11368 
11369 	if (obj->tiling_mode != I915_TILING_NONE)
11370 		dspcntr |= DISPPLANE_TILED;
11371 	else
11372 		dspcntr &= ~DISPPLANE_TILED;
11373 
11374 	I915_WRITE(reg, dspcntr);
11375 
11376 	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11377 	POSTING_READ(DSPSURF(intel_crtc->plane));
11378 }
11379 
11380 /*
11381  * XXX: This is the temporary way to update the plane registers until we get
11382  * around to using the usual plane update functions for MMIO flips
11383  */
11384 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11385 {
11386 	struct intel_crtc *crtc = mmio_flip->crtc;
11387 	struct intel_unpin_work *work;
11388 
11389 	spin_lock_irq(&crtc->base.dev->event_lock);
11390 	work = crtc->unpin_work;
11391 	spin_unlock_irq(&crtc->base.dev->event_lock);
11392 	if (work == NULL)
11393 		return;
11394 
11395 	intel_mark_page_flip_active(work);
11396 
11397 	intel_pipe_update_start(crtc);
11398 
11399 	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11400 		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11401 	else
11402 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11403 		ilk_do_mmio_flip(crtc, work);
11404 
11405 	intel_pipe_update_end(crtc);
11406 }
11407 
11408 static void intel_mmio_flip_work_func(struct work_struct *work)
11409 {
11410 	struct intel_mmio_flip *mmio_flip =
11411 		container_of(work, struct intel_mmio_flip, work);
11412 #if 0
11413 	struct intel_framebuffer *intel_fb =
11414 		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11415 	struct drm_i915_gem_object *obj = intel_fb->obj;
11416 #endif
11417 
11418 	if (mmio_flip->req) {
11419 		WARN_ON(__i915_wait_request(mmio_flip->req,
11420 					    false, NULL,
11421 					    &mmio_flip->i915->rps.mmioflips));
11422 		i915_gem_request_unreference__unlocked(mmio_flip->req);
11423 	}
11424 
11425 	/* For framebuffer backed by dmabuf, wait for fence */
11426 #if 0
11427 	if (obj->base.dma_buf)
11428 		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11429 							    false, false,
11430 							    MAX_SCHEDULE_TIMEOUT) < 0);
11431 #endif
11432 
11433 	intel_do_mmio_flip(mmio_flip);
11434 	kfree(mmio_flip);
11435 }
11436 
11437 static int intel_queue_mmio_flip(struct drm_device *dev,
11438 				 struct drm_crtc *crtc,
11439 				 struct drm_i915_gem_object *obj)
11440 {
11441 	struct intel_mmio_flip *mmio_flip;
11442 
11443 	mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK);
11444 	if (mmio_flip == NULL)
11445 		return -ENOMEM;
11446 
11447 	mmio_flip->i915 = to_i915(dev);
11448 	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11449 	mmio_flip->crtc = to_intel_crtc(crtc);
11450 	mmio_flip->rotation = crtc->primary->state->rotation;
11451 
11452 	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11453 	schedule_work(&mmio_flip->work);
11454 
11455 	return 0;
11456 }
11457 
11458 static int intel_default_queue_flip(struct drm_device *dev,
11459 				    struct drm_crtc *crtc,
11460 				    struct drm_framebuffer *fb,
11461 				    struct drm_i915_gem_object *obj,
11462 				    struct drm_i915_gem_request *req,
11463 				    uint32_t flags)
11464 {
11465 	return -ENODEV;
11466 }
11467 
11468 static bool __intel_pageflip_stall_check(struct drm_device *dev,
11469 					 struct drm_crtc *crtc)
11470 {
11471 	struct drm_i915_private *dev_priv = dev->dev_private;
11472 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11473 	struct intel_unpin_work *work = intel_crtc->unpin_work;
11474 	u32 addr;
11475 
11476 	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11477 		return true;
11478 
11479 	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11480 		return false;
11481 
11482 	if (!work->enable_stall_check)
11483 		return false;
11484 
11485 	if (work->flip_ready_vblank == 0) {
11486 		if (work->flip_queued_req &&
11487 		    !i915_gem_request_completed(work->flip_queued_req, true))
11488 			return false;
11489 
11490 		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11491 	}
11492 
11493 	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11494 		return false;
11495 
11496 	/* Potential stall - if we see that the flip has happened,
11497 	 * assume a missed interrupt. */
11498 	if (INTEL_INFO(dev)->gen >= 4)
11499 		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11500 	else
11501 		addr = I915_READ(DSPADDR(intel_crtc->plane));
11502 
11503 	/* There is a potential issue here with a false positive after a flip
11504 	 * to the same address. We could address this by checking for a
11505 	 * non-incrementing frame counter.
11506 	 */
11507 	return addr == work->gtt_offset;
11508 }
11509 
11510 void intel_check_page_flip(struct drm_device *dev, int pipe)
11511 {
11512 	struct drm_i915_private *dev_priv = dev->dev_private;
11513 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11514 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11515 	struct intel_unpin_work *work;
11516 
11517 //	WARN_ON(!in_interrupt());
11518 
11519 	if (crtc == NULL)
11520 		return;
11521 
11522 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
11523 	work = intel_crtc->unpin_work;
11524 	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11525 		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11526 			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11527 		page_flip_completed(intel_crtc);
11528 		work = NULL;
11529 	}
11530 	if (work != NULL &&
11531 	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11532 		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11533 	lockmgr(&dev->event_lock, LK_RELEASE);
11534 }
11535 
11536 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11537 				struct drm_framebuffer *fb,
11538 				struct drm_pending_vblank_event *event,
11539 				uint32_t page_flip_flags)
11540 {
11541 	struct drm_device *dev = crtc->dev;
11542 	struct drm_i915_private *dev_priv = dev->dev_private;
11543 	struct drm_framebuffer *old_fb = crtc->primary->fb;
11544 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11545 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11546 	struct drm_plane *primary = crtc->primary;
11547 	enum i915_pipe pipe = intel_crtc->pipe;
11548 	struct intel_unpin_work *work;
11549 	struct intel_engine_cs *engine;
11550 	bool mmio_flip;
11551 	struct drm_i915_gem_request *request = NULL;
11552 	int ret;
11553 
11554 	/*
11555 	 * drm_mode_page_flip_ioctl() should already catch this, but double
11556 	 * check to be safe.  In the future we may enable pageflipping from
11557 	 * a disabled primary plane.
11558 	 */
11559 	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11560 		return -EBUSY;
11561 
11562 	/* Can't change pixel format via MI display flips. */
11563 	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11564 		return -EINVAL;
11565 
11566 	/*
11567 	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11568 	 * Note that pitch changes could also affect these register.
11569 	 */
11570 	if (INTEL_INFO(dev)->gen > 3 &&
11571 	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11572 	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11573 		return -EINVAL;
11574 
11575 	if (i915_terminally_wedged(&dev_priv->gpu_error))
11576 		goto out_hang;
11577 
11578 	work = kzalloc(sizeof(*work), GFP_KERNEL);
11579 	if (work == NULL)
11580 		return -ENOMEM;
11581 
11582 	work->event = event;
11583 	work->crtc = crtc;
11584 	work->old_fb = old_fb;
11585 	INIT_WORK(&work->work, intel_unpin_work_fn);
11586 
11587 	ret = drm_crtc_vblank_get(crtc);
11588 	if (ret)
11589 		goto free_work;
11590 
11591 	/* We borrow the event spin lock for protecting unpin_work */
11592 	spin_lock_irq(&dev->event_lock);
11593 	if (intel_crtc->unpin_work) {
11594 		/* Before declaring the flip queue wedged, check if
11595 		 * the hardware completed the operation behind our backs.
11596 		 */
11597 		if (__intel_pageflip_stall_check(dev, crtc)) {
11598 			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11599 			page_flip_completed(intel_crtc);
11600 		} else {
11601 			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11602 			spin_unlock_irq(&dev->event_lock);
11603 
11604 			drm_crtc_vblank_put(crtc);
11605 			kfree(work);
11606 			return -EBUSY;
11607 		}
11608 	}
11609 	intel_crtc->unpin_work = work;
11610 	spin_unlock_irq(&dev->event_lock);
11611 
11612 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11613 		flush_workqueue(dev_priv->wq);
11614 
11615 	/* Reference the objects for the scheduled work. */
11616 	drm_framebuffer_reference(work->old_fb);
11617 	drm_gem_object_reference(&obj->base);
11618 
11619 	crtc->primary->fb = fb;
11620 	update_state_fb(crtc->primary);
11621 	intel_fbc_pre_update(intel_crtc);
11622 
11623 	work->pending_flip_obj = obj;
11624 
11625 	ret = i915_mutex_lock_interruptible(dev);
11626 	if (ret)
11627 		goto cleanup;
11628 
11629 	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11630 	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11631 		ret = -EIO;
11632 		goto cleanup;
11633 	}
11634 
11635 	atomic_inc(&intel_crtc->unpin_work_count);
11636 
11637 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11638 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11639 
11640 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11641 		engine = &dev_priv->engine[BCS];
11642 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11643 			/* vlv: DISPLAY_FLIP fails to change tiling */
11644 			engine = NULL;
11645 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11646 		engine = &dev_priv->engine[BCS];
11647 	} else if (INTEL_INFO(dev)->gen >= 7) {
11648 		engine = i915_gem_request_get_engine(obj->last_write_req);
11649 		if (engine == NULL || engine->id != RCS)
11650 			engine = &dev_priv->engine[BCS];
11651 	} else {
11652 		engine = &dev_priv->engine[RCS];
11653 	}
11654 
11655 	mmio_flip = use_mmio_flip(engine, obj);
11656 
11657 	/* When using CS flips, we want to emit semaphores between rings.
11658 	 * However, when using mmio flips we will create a task to do the
11659 	 * synchronisation, so all we want here is to pin the framebuffer
11660 	 * into the display plane and skip any waits.
11661 	 */
11662 	if (!mmio_flip) {
11663 		ret = i915_gem_object_sync(obj, engine, &request);
11664 		if (ret)
11665 			goto cleanup_pending;
11666 	}
11667 
11668 	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11669 	if (ret)
11670 		goto cleanup_pending;
11671 
11672 	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11673 						  obj, 0);
11674 	work->gtt_offset += intel_crtc->dspaddr_offset;
11675 
11676 	if (mmio_flip) {
11677 		ret = intel_queue_mmio_flip(dev, crtc, obj);
11678 		if (ret)
11679 			goto cleanup_unpin;
11680 
11681 		i915_gem_request_assign(&work->flip_queued_req,
11682 					obj->last_write_req);
11683 	} else {
11684 		if (!request) {
11685 			request = i915_gem_request_alloc(engine, NULL);
11686 			if (IS_ERR(request)) {
11687 				ret = PTR_ERR(request);
11688 				goto cleanup_unpin;
11689 			}
11690 		}
11691 
11692 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11693 						   page_flip_flags);
11694 		if (ret)
11695 			goto cleanup_unpin;
11696 
11697 		i915_gem_request_assign(&work->flip_queued_req, request);
11698 	}
11699 
11700 	if (request)
11701 		i915_add_request_no_flush(request);
11702 
11703 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11704 	work->enable_stall_check = true;
11705 
11706 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11707 			  to_intel_plane(primary)->frontbuffer_bit);
11708 	mutex_unlock(&dev->struct_mutex);
11709 
11710 	intel_frontbuffer_flip_prepare(dev,
11711 				       to_intel_plane(primary)->frontbuffer_bit);
11712 
11713 	trace_i915_flip_request(intel_crtc->plane, obj);
11714 
11715 	return 0;
11716 
11717 cleanup_unpin:
11718 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11719 cleanup_pending:
11720 	if (!IS_ERR_OR_NULL(request))
11721 		i915_add_request_no_flush(request);
11722 	atomic_dec(&intel_crtc->unpin_work_count);
11723 	mutex_unlock(&dev->struct_mutex);
11724 cleanup:
11725 	crtc->primary->fb = old_fb;
11726 	update_state_fb(crtc->primary);
11727 
11728 	drm_gem_object_unreference_unlocked(&obj->base);
11729 	drm_framebuffer_unreference(work->old_fb);
11730 
11731 	spin_lock_irq(&dev->event_lock);
11732 	intel_crtc->unpin_work = NULL;
11733 	spin_unlock_irq(&dev->event_lock);
11734 
11735 	drm_crtc_vblank_put(crtc);
11736 free_work:
11737 	kfree(work);
11738 
11739 	if (ret == -EIO) {
11740 		struct drm_atomic_state *state;
11741 		struct drm_plane_state *plane_state;
11742 
11743 out_hang:
11744 		state = drm_atomic_state_alloc(dev);
11745 		if (!state)
11746 			return -ENOMEM;
11747 		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11748 
11749 retry:
11750 		plane_state = drm_atomic_get_plane_state(state, primary);
11751 		ret = PTR_ERR_OR_ZERO(plane_state);
11752 		if (!ret) {
11753 			drm_atomic_set_fb_for_plane(plane_state, fb);
11754 
11755 			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11756 			if (!ret)
11757 				ret = drm_atomic_commit(state);
11758 		}
11759 
11760 		if (ret == -EDEADLK) {
11761 			drm_modeset_backoff(state->acquire_ctx);
11762 			drm_atomic_state_clear(state);
11763 			goto retry;
11764 		}
11765 
11766 		if (ret)
11767 			drm_atomic_state_free(state);
11768 
11769 		if (ret == 0 && event) {
11770 			spin_lock_irq(&dev->event_lock);
11771 			drm_crtc_send_vblank_event(crtc, event);
11772 			spin_unlock_irq(&dev->event_lock);
11773 		}
11774 	}
11775 	return ret;
11776 }
11777 
11778 
11779 /**
11780  * intel_wm_need_update - Check whether watermarks need updating
11781  * @plane: drm plane
11782  * @state: new plane state
11783  *
11784  * Check current plane state versus the new one to determine whether
11785  * watermarks need to be recalculated.
11786  *
11787  * Returns true or false.
11788  */
11789 static bool intel_wm_need_update(struct drm_plane *plane,
11790 				 struct drm_plane_state *state)
11791 {
11792 	struct intel_plane_state *new = to_intel_plane_state(state);
11793 	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11794 
11795 	/* Update watermarks on tiling or size changes. */
11796 	if (new->visible != cur->visible)
11797 		return true;
11798 
11799 	if (!cur->base.fb || !new->base.fb)
11800 		return false;
11801 
11802 	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11803 	    cur->base.rotation != new->base.rotation ||
11804 	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11805 	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11806 	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11807 	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11808 		return true;
11809 
11810 	return false;
11811 }
11812 
11813 static bool needs_scaling(struct intel_plane_state *state)
11814 {
11815 	int src_w = drm_rect_width(&state->src) >> 16;
11816 	int src_h = drm_rect_height(&state->src) >> 16;
11817 	int dst_w = drm_rect_width(&state->dst);
11818 	int dst_h = drm_rect_height(&state->dst);
11819 
11820 	return (src_w != dst_w || src_h != dst_h);
11821 }
11822 
11823 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11824 				    struct drm_plane_state *plane_state)
11825 {
11826 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11827 	struct drm_crtc *crtc = crtc_state->crtc;
11828 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11829 	struct drm_plane *plane = plane_state->plane;
11830 	struct drm_device *dev = crtc->dev;
11831 	struct drm_i915_private *dev_priv = to_i915(dev);
11832 	struct intel_plane_state *old_plane_state =
11833 		to_intel_plane_state(plane->state);
11834 	int idx = intel_crtc->base.base.id, ret;
11835 	bool mode_changed = needs_modeset(crtc_state);
11836 	bool was_crtc_enabled = crtc->state->active;
11837 	bool is_crtc_enabled = crtc_state->active;
11838 	bool turn_off, turn_on, visible, was_visible;
11839 	struct drm_framebuffer *fb = plane_state->fb;
11840 
11841 	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11842 	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11843 		ret = skl_update_scaler_plane(
11844 			to_intel_crtc_state(crtc_state),
11845 			to_intel_plane_state(plane_state));
11846 		if (ret)
11847 			return ret;
11848 	}
11849 
11850 	was_visible = old_plane_state->visible;
11851 	visible = to_intel_plane_state(plane_state)->visible;
11852 
11853 	if (!was_crtc_enabled && WARN_ON(was_visible))
11854 		was_visible = false;
11855 
11856 	/*
11857 	 * Visibility is calculated as if the crtc was on, but
11858 	 * after scaler setup everything depends on it being off
11859 	 * when the crtc isn't active.
11860 	 */
11861 	if (!is_crtc_enabled)
11862 		to_intel_plane_state(plane_state)->visible = visible = false;
11863 
11864 	if (!was_visible && !visible)
11865 		return 0;
11866 
11867 	if (fb != old_plane_state->base.fb)
11868 		pipe_config->fb_changed = true;
11869 
11870 	turn_off = was_visible && (!visible || mode_changed);
11871 	turn_on = visible && (!was_visible || mode_changed);
11872 
11873 	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11874 			 plane->base.id, fb ? fb->base.id : -1);
11875 
11876 	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11877 			 plane->base.id, was_visible, visible,
11878 			 turn_off, turn_on, mode_changed);
11879 
11880 	if (turn_on) {
11881 		pipe_config->update_wm_pre = true;
11882 
11883 		/* must disable cxsr around plane enable/disable */
11884 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11885 			pipe_config->disable_cxsr = true;
11886 	} else if (turn_off) {
11887 		pipe_config->update_wm_post = true;
11888 
11889 		/* must disable cxsr around plane enable/disable */
11890 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11891 			pipe_config->disable_cxsr = true;
11892 	} else if (intel_wm_need_update(plane, plane_state)) {
11893 		/* FIXME bollocks */
11894 		pipe_config->update_wm_pre = true;
11895 		pipe_config->update_wm_post = true;
11896 	}
11897 
11898 	/* Pre-gen9 platforms need two-step watermark updates */
11899 	if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
11900 	    INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
11901 		to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
11902 
11903 	if (visible || was_visible)
11904 		pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
11905 
11906 	/*
11907 	 * WaCxSRDisabledForSpriteScaling:ivb
11908 	 *
11909 	 * cstate->update_wm was already set above, so this flag will
11910 	 * take effect when we commit and program watermarks.
11911 	 */
11912 	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
11913 	    needs_scaling(to_intel_plane_state(plane_state)) &&
11914 	    !needs_scaling(old_plane_state))
11915 		pipe_config->disable_lp_wm = true;
11916 
11917 	return 0;
11918 }
11919 
11920 static bool encoders_cloneable(const struct intel_encoder *a,
11921 			       const struct intel_encoder *b)
11922 {
11923 	/* masks could be asymmetric, so check both ways */
11924 	return a == b || (a->cloneable & (1 << b->type) &&
11925 			  b->cloneable & (1 << a->type));
11926 }
11927 
11928 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11929 					 struct intel_crtc *crtc,
11930 					 struct intel_encoder *encoder)
11931 {
11932 	struct intel_encoder *source_encoder;
11933 	struct drm_connector *connector;
11934 	struct drm_connector_state *connector_state;
11935 	int i;
11936 
11937 	for_each_connector_in_state(state, connector, connector_state, i) {
11938 		if (connector_state->crtc != &crtc->base)
11939 			continue;
11940 
11941 		source_encoder =
11942 			to_intel_encoder(connector_state->best_encoder);
11943 		if (!encoders_cloneable(encoder, source_encoder))
11944 			return false;
11945 	}
11946 
11947 	return true;
11948 }
11949 
11950 static bool check_encoder_cloning(struct drm_atomic_state *state,
11951 				  struct intel_crtc *crtc)
11952 {
11953 	struct intel_encoder *encoder;
11954 	struct drm_connector *connector;
11955 	struct drm_connector_state *connector_state;
11956 	int i;
11957 
11958 	for_each_connector_in_state(state, connector, connector_state, i) {
11959 		if (connector_state->crtc != &crtc->base)
11960 			continue;
11961 
11962 		encoder = to_intel_encoder(connector_state->best_encoder);
11963 		if (!check_single_encoder_cloning(state, crtc, encoder))
11964 			return false;
11965 	}
11966 
11967 	return true;
11968 }
11969 
11970 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11971 				   struct drm_crtc_state *crtc_state)
11972 {
11973 	struct drm_device *dev = crtc->dev;
11974 	struct drm_i915_private *dev_priv = dev->dev_private;
11975 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11976 	struct intel_crtc_state *pipe_config =
11977 		to_intel_crtc_state(crtc_state);
11978 	struct drm_atomic_state *state = crtc_state->state;
11979 	int ret;
11980 	bool mode_changed = needs_modeset(crtc_state);
11981 
11982 	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11983 		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11984 		return -EINVAL;
11985 	}
11986 
11987 	if (mode_changed && !crtc_state->active)
11988 		pipe_config->update_wm_post = true;
11989 
11990 	if (mode_changed && crtc_state->enable &&
11991 	    dev_priv->display.crtc_compute_clock &&
11992 	    !WARN_ON(pipe_config->shared_dpll)) {
11993 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11994 							   pipe_config);
11995 		if (ret)
11996 			return ret;
11997 	}
11998 
11999 	if (crtc_state->color_mgmt_changed) {
12000 		ret = intel_color_check(crtc, crtc_state);
12001 		if (ret)
12002 			return ret;
12003 
12004 		/*
12005 		 * Changing color management on Intel hardware is
12006 		 * handled as part of planes update.
12007 		 */
12008 		crtc_state->planes_changed = true;
12009 	}
12010 
12011 	ret = 0;
12012 	if (dev_priv->display.compute_pipe_wm) {
12013 		ret = dev_priv->display.compute_pipe_wm(pipe_config);
12014 		if (ret) {
12015 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12016 			return ret;
12017 		}
12018 	}
12019 
12020 	if (dev_priv->display.compute_intermediate_wm &&
12021 	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
12022 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12023 			return 0;
12024 
12025 		/*
12026 		 * Calculate 'intermediate' watermarks that satisfy both the
12027 		 * old state and the new state.  We can program these
12028 		 * immediately.
12029 		 */
12030 		ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12031 								intel_crtc,
12032 								pipe_config);
12033 		if (ret) {
12034 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12035 			return ret;
12036 		}
12037 	} else if (dev_priv->display.compute_intermediate_wm) {
12038 		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12039 			pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
12040 	}
12041 
12042 	if (INTEL_INFO(dev)->gen >= 9) {
12043 		if (mode_changed)
12044 			ret = skl_update_scaler_crtc(pipe_config);
12045 
12046 		if (!ret)
12047 			ret = intel_atomic_setup_scalers(dev, intel_crtc,
12048 							 pipe_config);
12049 	}
12050 
12051 	return ret;
12052 }
12053 
12054 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12055 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12056 	.atomic_begin = intel_begin_crtc_commit,
12057 	.atomic_flush = intel_finish_crtc_commit,
12058 	.atomic_check = intel_crtc_atomic_check,
12059 };
12060 
12061 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12062 {
12063 	struct intel_connector *connector;
12064 
12065 	for_each_intel_connector(dev, connector) {
12066 		if (connector->base.state->crtc)
12067 			drm_connector_unreference(&connector->base);
12068 
12069 		if (connector->base.encoder) {
12070 			connector->base.state->best_encoder =
12071 				connector->base.encoder;
12072 			connector->base.state->crtc =
12073 				connector->base.encoder->crtc;
12074 
12075 			drm_connector_reference(&connector->base);
12076 		} else {
12077 			connector->base.state->best_encoder = NULL;
12078 			connector->base.state->crtc = NULL;
12079 		}
12080 	}
12081 }
12082 
12083 static void
12084 connected_sink_compute_bpp(struct intel_connector *connector,
12085 			   struct intel_crtc_state *pipe_config)
12086 {
12087 	int bpp = pipe_config->pipe_bpp;
12088 
12089 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12090 		connector->base.base.id,
12091 		connector->base.name);
12092 
12093 	/* Don't use an invalid EDID bpc value */
12094 	if (connector->base.display_info.bpc &&
12095 	    connector->base.display_info.bpc * 3 < bpp) {
12096 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12097 			      bpp, connector->base.display_info.bpc*3);
12098 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12099 	}
12100 
12101 	/* Clamp bpp to 8 on screens without EDID 1.4 */
12102 	if (connector->base.display_info.bpc == 0 && bpp > 24) {
12103 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
12104 			      bpp);
12105 		pipe_config->pipe_bpp = 24;
12106 	}
12107 }
12108 
12109 static int
12110 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12111 			  struct intel_crtc_state *pipe_config)
12112 {
12113 	struct drm_device *dev = crtc->base.dev;
12114 	struct drm_atomic_state *state;
12115 	struct drm_connector *connector;
12116 	struct drm_connector_state *connector_state;
12117 	int bpp, i;
12118 
12119 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12120 		bpp = 10*3;
12121 	else if (INTEL_INFO(dev)->gen >= 5)
12122 		bpp = 12*3;
12123 	else
12124 		bpp = 8*3;
12125 
12126 
12127 	pipe_config->pipe_bpp = bpp;
12128 
12129 	state = pipe_config->base.state;
12130 
12131 	/* Clamp display bpp to EDID value */
12132 	for_each_connector_in_state(state, connector, connector_state, i) {
12133 		if (connector_state->crtc != &crtc->base)
12134 			continue;
12135 
12136 		connected_sink_compute_bpp(to_intel_connector(connector),
12137 					   pipe_config);
12138 	}
12139 
12140 	return bpp;
12141 }
12142 
12143 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12144 {
12145 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12146 			"type: 0x%x flags: 0x%x\n",
12147 		mode->crtc_clock,
12148 		mode->crtc_hdisplay, mode->crtc_hsync_start,
12149 		mode->crtc_hsync_end, mode->crtc_htotal,
12150 		mode->crtc_vdisplay, mode->crtc_vsync_start,
12151 		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12152 }
12153 
12154 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12155 				   struct intel_crtc_state *pipe_config,
12156 				   const char *context)
12157 {
12158 	struct drm_device *dev = crtc->base.dev;
12159 	struct drm_plane *plane;
12160 	struct intel_plane *intel_plane;
12161 	struct intel_plane_state *state;
12162 	struct drm_framebuffer *fb;
12163 
12164 	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12165 		      context, pipe_config, pipe_name(crtc->pipe));
12166 
12167 	DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
12168 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12169 		      pipe_config->pipe_bpp, pipe_config->dither);
12170 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12171 		      pipe_config->has_pch_encoder,
12172 		      pipe_config->fdi_lanes,
12173 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12174 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12175 		      pipe_config->fdi_m_n.tu);
12176 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12177 		      pipe_config->has_dp_encoder,
12178 		      pipe_config->lane_count,
12179 		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12180 		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12181 		      pipe_config->dp_m_n.tu);
12182 
12183 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12184 		      pipe_config->has_dp_encoder,
12185 		      pipe_config->lane_count,
12186 		      pipe_config->dp_m2_n2.gmch_m,
12187 		      pipe_config->dp_m2_n2.gmch_n,
12188 		      pipe_config->dp_m2_n2.link_m,
12189 		      pipe_config->dp_m2_n2.link_n,
12190 		      pipe_config->dp_m2_n2.tu);
12191 
12192 	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12193 		      pipe_config->has_audio,
12194 		      pipe_config->has_infoframe);
12195 
12196 	DRM_DEBUG_KMS("requested mode:\n");
12197 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12198 	DRM_DEBUG_KMS("adjusted mode:\n");
12199 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12200 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12201 	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12202 	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12203 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12204 	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12205 		      crtc->num_scalers,
12206 		      pipe_config->scaler_state.scaler_users,
12207 		      pipe_config->scaler_state.scaler_id);
12208 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12209 		      pipe_config->gmch_pfit.control,
12210 		      pipe_config->gmch_pfit.pgm_ratios,
12211 		      pipe_config->gmch_pfit.lvds_border_bits);
12212 	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12213 		      pipe_config->pch_pfit.pos,
12214 		      pipe_config->pch_pfit.size,
12215 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12216 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12217 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12218 
12219 	if (IS_BROXTON(dev)) {
12220 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12221 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12222 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12223 			      pipe_config->ddi_pll_sel,
12224 			      pipe_config->dpll_hw_state.ebb0,
12225 			      pipe_config->dpll_hw_state.ebb4,
12226 			      pipe_config->dpll_hw_state.pll0,
12227 			      pipe_config->dpll_hw_state.pll1,
12228 			      pipe_config->dpll_hw_state.pll2,
12229 			      pipe_config->dpll_hw_state.pll3,
12230 			      pipe_config->dpll_hw_state.pll6,
12231 			      pipe_config->dpll_hw_state.pll8,
12232 			      pipe_config->dpll_hw_state.pll9,
12233 			      pipe_config->dpll_hw_state.pll10,
12234 			      pipe_config->dpll_hw_state.pcsdw12);
12235 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12236 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12237 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12238 			      pipe_config->ddi_pll_sel,
12239 			      pipe_config->dpll_hw_state.ctrl1,
12240 			      pipe_config->dpll_hw_state.cfgcr1,
12241 			      pipe_config->dpll_hw_state.cfgcr2);
12242 	} else if (HAS_DDI(dev)) {
12243 		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12244 			      pipe_config->ddi_pll_sel,
12245 			      pipe_config->dpll_hw_state.wrpll,
12246 			      pipe_config->dpll_hw_state.spll);
12247 	} else {
12248 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12249 			      "fp0: 0x%x, fp1: 0x%x\n",
12250 			      pipe_config->dpll_hw_state.dpll,
12251 			      pipe_config->dpll_hw_state.dpll_md,
12252 			      pipe_config->dpll_hw_state.fp0,
12253 			      pipe_config->dpll_hw_state.fp1);
12254 	}
12255 
12256 	DRM_DEBUG_KMS("planes on this crtc\n");
12257 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12258 		intel_plane = to_intel_plane(plane);
12259 		if (intel_plane->pipe != crtc->pipe)
12260 			continue;
12261 
12262 		state = to_intel_plane_state(plane->state);
12263 		fb = state->base.fb;
12264 		if (!fb) {
12265 			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12266 				"disabled, scaler_id = %d\n",
12267 				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12268 				plane->base.id, intel_plane->pipe,
12269 				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12270 				drm_plane_index(plane), state->scaler_id);
12271 			continue;
12272 		}
12273 
12274 		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12275 			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12276 			plane->base.id, intel_plane->pipe,
12277 			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12278 			drm_plane_index(plane));
12279 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12280 			fb->base.id, fb->width, fb->height, fb->pixel_format);
12281 		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12282 			state->scaler_id,
12283 			state->src.x1 >> 16, state->src.y1 >> 16,
12284 			drm_rect_width(&state->src) >> 16,
12285 			drm_rect_height(&state->src) >> 16,
12286 			state->dst.x1, state->dst.y1,
12287 			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12288 	}
12289 }
12290 
12291 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12292 {
12293 	struct drm_device *dev = state->dev;
12294 	struct drm_connector *connector;
12295 	unsigned int used_ports = 0;
12296 
12297 	/*
12298 	 * Walk the connector list instead of the encoder
12299 	 * list to detect the problem on ddi platforms
12300 	 * where there's just one encoder per digital port.
12301 	 */
12302 	drm_for_each_connector(connector, dev) {
12303 		struct drm_connector_state *connector_state;
12304 		struct intel_encoder *encoder;
12305 
12306 		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12307 		if (!connector_state)
12308 			connector_state = connector->state;
12309 
12310 		if (!connector_state->best_encoder)
12311 			continue;
12312 
12313 		encoder = to_intel_encoder(connector_state->best_encoder);
12314 
12315 		WARN_ON(!connector_state->crtc);
12316 
12317 		switch (encoder->type) {
12318 			unsigned int port_mask;
12319 		case INTEL_OUTPUT_UNKNOWN:
12320 			if (WARN_ON(!HAS_DDI(dev)))
12321 				break;
12322 		case INTEL_OUTPUT_DISPLAYPORT:
12323 		case INTEL_OUTPUT_HDMI:
12324 		case INTEL_OUTPUT_EDP:
12325 			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12326 
12327 			/* the same port mustn't appear more than once */
12328 			if (used_ports & port_mask)
12329 				return false;
12330 
12331 			used_ports |= port_mask;
12332 		default:
12333 			break;
12334 		}
12335 	}
12336 
12337 	return true;
12338 }
12339 
12340 static void
12341 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12342 {
12343 	struct drm_crtc_state tmp_state;
12344 	struct intel_crtc_scaler_state scaler_state;
12345 	struct intel_dpll_hw_state dpll_hw_state;
12346 	struct intel_shared_dpll *shared_dpll;
12347 	uint32_t ddi_pll_sel;
12348 	bool force_thru;
12349 
12350 	/* FIXME: before the switch to atomic started, a new pipe_config was
12351 	 * kzalloc'd. Code that depends on any field being zero should be
12352 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12353 	 * only fields that are know to not cause problems are preserved. */
12354 
12355 	tmp_state = crtc_state->base;
12356 	scaler_state = crtc_state->scaler_state;
12357 	shared_dpll = crtc_state->shared_dpll;
12358 	dpll_hw_state = crtc_state->dpll_hw_state;
12359 	ddi_pll_sel = crtc_state->ddi_pll_sel;
12360 	force_thru = crtc_state->pch_pfit.force_thru;
12361 
12362 	memset(crtc_state, 0, sizeof *crtc_state);
12363 
12364 	crtc_state->base = tmp_state;
12365 	crtc_state->scaler_state = scaler_state;
12366 	crtc_state->shared_dpll = shared_dpll;
12367 	crtc_state->dpll_hw_state = dpll_hw_state;
12368 	crtc_state->ddi_pll_sel = ddi_pll_sel;
12369 	crtc_state->pch_pfit.force_thru = force_thru;
12370 }
12371 
12372 static int
12373 intel_modeset_pipe_config(struct drm_crtc *crtc,
12374 			  struct intel_crtc_state *pipe_config)
12375 {
12376 	struct drm_atomic_state *state = pipe_config->base.state;
12377 	struct intel_encoder *encoder;
12378 	struct drm_connector *connector;
12379 	struct drm_connector_state *connector_state;
12380 	int base_bpp, ret = -EINVAL;
12381 	int i;
12382 	bool retry = true;
12383 
12384 	clear_intel_crtc_state(pipe_config);
12385 
12386 	pipe_config->cpu_transcoder =
12387 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12388 
12389 	/*
12390 	 * Sanitize sync polarity flags based on requested ones. If neither
12391 	 * positive or negative polarity is requested, treat this as meaning
12392 	 * negative polarity.
12393 	 */
12394 	if (!(pipe_config->base.adjusted_mode.flags &
12395 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12396 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12397 
12398 	if (!(pipe_config->base.adjusted_mode.flags &
12399 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12400 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12401 
12402 	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12403 					     pipe_config);
12404 	if (base_bpp < 0)
12405 		goto fail;
12406 
12407 	/*
12408 	 * Determine the real pipe dimensions. Note that stereo modes can
12409 	 * increase the actual pipe size due to the frame doubling and
12410 	 * insertion of additional space for blanks between the frame. This
12411 	 * is stored in the crtc timings. We use the requested mode to do this
12412 	 * computation to clearly distinguish it from the adjusted mode, which
12413 	 * can be changed by the connectors in the below retry loop.
12414 	 */
12415 	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12416 			       &pipe_config->pipe_src_w,
12417 			       &pipe_config->pipe_src_h);
12418 
12419 encoder_retry:
12420 	/* Ensure the port clock defaults are reset when retrying. */
12421 	pipe_config->port_clock = 0;
12422 	pipe_config->pixel_multiplier = 1;
12423 
12424 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12425 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12426 			      CRTC_STEREO_DOUBLE);
12427 
12428 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12429 	 * adjust it according to limitations or connector properties, and also
12430 	 * a chance to reject the mode entirely.
12431 	 */
12432 	for_each_connector_in_state(state, connector, connector_state, i) {
12433 		if (connector_state->crtc != crtc)
12434 			continue;
12435 
12436 		encoder = to_intel_encoder(connector_state->best_encoder);
12437 
12438 		if (!(encoder->compute_config(encoder, pipe_config))) {
12439 			DRM_DEBUG_KMS("Encoder config failure\n");
12440 			goto fail;
12441 		}
12442 	}
12443 
12444 	/* Set default port clock if not overwritten by the encoder. Needs to be
12445 	 * done afterwards in case the encoder adjusts the mode. */
12446 	if (!pipe_config->port_clock)
12447 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12448 			* pipe_config->pixel_multiplier;
12449 
12450 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12451 	if (ret < 0) {
12452 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12453 		goto fail;
12454 	}
12455 
12456 	if (ret == RETRY) {
12457 		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12458 			ret = -EINVAL;
12459 			goto fail;
12460 		}
12461 
12462 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12463 		retry = false;
12464 		goto encoder_retry;
12465 	}
12466 
12467 	/* Dithering seems to not pass-through bits correctly when it should, so
12468 	 * only enable it on 6bpc panels. */
12469 	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12470 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12471 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12472 
12473 fail:
12474 	return ret;
12475 }
12476 
12477 static void
12478 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12479 {
12480 	struct drm_crtc *crtc;
12481 	struct drm_crtc_state *crtc_state;
12482 	int i;
12483 
12484 	/* Double check state. */
12485 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12486 		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12487 
12488 		/* Update hwmode for vblank functions */
12489 		if (crtc->state->active)
12490 			crtc->hwmode = crtc->state->adjusted_mode;
12491 		else
12492 			crtc->hwmode.crtc_clock = 0;
12493 
12494 		/*
12495 		 * Update legacy state to satisfy fbc code. This can
12496 		 * be removed when fbc uses the atomic state.
12497 		 */
12498 		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12499 			struct drm_plane_state *plane_state = crtc->primary->state;
12500 
12501 			crtc->primary->fb = plane_state->fb;
12502 			crtc->x = plane_state->src_x >> 16;
12503 			crtc->y = plane_state->src_y >> 16;
12504 		}
12505 	}
12506 }
12507 
12508 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12509 {
12510 	int diff;
12511 
12512 	if (clock1 == clock2)
12513 		return true;
12514 
12515 	if (!clock1 || !clock2)
12516 		return false;
12517 
12518 	diff = abs(clock1 - clock2);
12519 
12520 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12521 		return true;
12522 
12523 	return false;
12524 }
12525 
12526 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12527 	list_for_each_entry((intel_crtc), \
12528 			    &(dev)->mode_config.crtc_list, \
12529 			    base.head) \
12530 		for_each_if (mask & (1 <<(intel_crtc)->pipe))
12531 
12532 static bool
12533 intel_compare_m_n(unsigned int m, unsigned int n,
12534 		  unsigned int m2, unsigned int n2,
12535 		  bool exact)
12536 {
12537 	if (m == m2 && n == n2)
12538 		return true;
12539 
12540 	if (exact || !m || !n || !m2 || !n2)
12541 		return false;
12542 
12543 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12544 
12545 	if (n > n2) {
12546 		while (n > n2) {
12547 			m2 <<= 1;
12548 			n2 <<= 1;
12549 		}
12550 	} else if (n < n2) {
12551 		while (n < n2) {
12552 			m <<= 1;
12553 			n <<= 1;
12554 		}
12555 	}
12556 
12557 	if (n != n2)
12558 		return false;
12559 
12560 	return intel_fuzzy_clock_check(m, m2);
12561 }
12562 
12563 static bool
12564 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12565 		       struct intel_link_m_n *m2_n2,
12566 		       bool adjust)
12567 {
12568 	if (m_n->tu == m2_n2->tu &&
12569 	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12570 			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12571 	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12572 			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12573 		if (adjust)
12574 			*m2_n2 = *m_n;
12575 
12576 		return true;
12577 	}
12578 
12579 	return false;
12580 }
12581 
12582 static bool
12583 intel_pipe_config_compare(struct drm_device *dev,
12584 			  struct intel_crtc_state *current_config,
12585 			  struct intel_crtc_state *pipe_config,
12586 			  bool adjust)
12587 {
12588 	bool ret = true;
12589 
12590 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12591 	do { \
12592 		if (!adjust) \
12593 			DRM_ERROR(fmt, ##__VA_ARGS__); \
12594 		else \
12595 			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12596 	} while (0)
12597 
12598 #define PIPE_CONF_CHECK_X(name)	\
12599 	if (current_config->name != pipe_config->name) { \
12600 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12601 			  "(expected 0x%08x, found 0x%08x)\n", \
12602 			  current_config->name, \
12603 			  pipe_config->name); \
12604 		ret = false; \
12605 	}
12606 
12607 #define PIPE_CONF_CHECK_I(name)	\
12608 	if (current_config->name != pipe_config->name) { \
12609 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12610 			  "(expected %i, found %i)\n", \
12611 			  current_config->name, \
12612 			  pipe_config->name); \
12613 		ret = false; \
12614 	}
12615 
12616 #define PIPE_CONF_CHECK_P(name)	\
12617 	if (current_config->name != pipe_config->name) { \
12618 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12619 			  "(expected %p, found %p)\n", \
12620 			  current_config->name, \
12621 			  pipe_config->name); \
12622 		ret = false; \
12623 	}
12624 
12625 #define PIPE_CONF_CHECK_M_N(name) \
12626 	if (!intel_compare_link_m_n(&current_config->name, \
12627 				    &pipe_config->name,\
12628 				    adjust)) { \
12629 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12630 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12631 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12632 			  current_config->name.tu, \
12633 			  current_config->name.gmch_m, \
12634 			  current_config->name.gmch_n, \
12635 			  current_config->name.link_m, \
12636 			  current_config->name.link_n, \
12637 			  pipe_config->name.tu, \
12638 			  pipe_config->name.gmch_m, \
12639 			  pipe_config->name.gmch_n, \
12640 			  pipe_config->name.link_m, \
12641 			  pipe_config->name.link_n); \
12642 		ret = false; \
12643 	}
12644 
12645 /* This is required for BDW+ where there is only one set of registers for
12646  * switching between high and low RR.
12647  * This macro can be used whenever a comparison has to be made between one
12648  * hw state and multiple sw state variables.
12649  */
12650 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12651 	if (!intel_compare_link_m_n(&current_config->name, \
12652 				    &pipe_config->name, adjust) && \
12653 	    !intel_compare_link_m_n(&current_config->alt_name, \
12654 				    &pipe_config->name, adjust)) { \
12655 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12656 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12657 			  "or tu %i gmch %i/%i link %i/%i, " \
12658 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12659 			  current_config->name.tu, \
12660 			  current_config->name.gmch_m, \
12661 			  current_config->name.gmch_n, \
12662 			  current_config->name.link_m, \
12663 			  current_config->name.link_n, \
12664 			  current_config->alt_name.tu, \
12665 			  current_config->alt_name.gmch_m, \
12666 			  current_config->alt_name.gmch_n, \
12667 			  current_config->alt_name.link_m, \
12668 			  current_config->alt_name.link_n, \
12669 			  pipe_config->name.tu, \
12670 			  pipe_config->name.gmch_m, \
12671 			  pipe_config->name.gmch_n, \
12672 			  pipe_config->name.link_m, \
12673 			  pipe_config->name.link_n); \
12674 		ret = false; \
12675 	}
12676 
12677 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12678 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12679 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12680 			  "(expected %i, found %i)\n", \
12681 			  current_config->name & (mask), \
12682 			  pipe_config->name & (mask)); \
12683 		ret = false; \
12684 	}
12685 
12686 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12687 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12688 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12689 			  "(expected %i, found %i)\n", \
12690 			  current_config->name, \
12691 			  pipe_config->name); \
12692 		ret = false; \
12693 	}
12694 
12695 #define PIPE_CONF_QUIRK(quirk)	\
12696 	((current_config->quirks | pipe_config->quirks) & (quirk))
12697 
12698 	PIPE_CONF_CHECK_I(cpu_transcoder);
12699 
12700 	PIPE_CONF_CHECK_I(has_pch_encoder);
12701 	PIPE_CONF_CHECK_I(fdi_lanes);
12702 	PIPE_CONF_CHECK_M_N(fdi_m_n);
12703 
12704 	PIPE_CONF_CHECK_I(has_dp_encoder);
12705 	PIPE_CONF_CHECK_I(lane_count);
12706 
12707 	if (INTEL_INFO(dev)->gen < 8) {
12708 		PIPE_CONF_CHECK_M_N(dp_m_n);
12709 
12710 		if (current_config->has_drrs)
12711 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12712 	} else
12713 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12714 
12715 	PIPE_CONF_CHECK_I(has_dsi_encoder);
12716 
12717 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12718 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12719 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12720 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12721 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12722 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12723 
12724 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12725 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12726 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12727 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12728 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12729 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12730 
12731 	PIPE_CONF_CHECK_I(pixel_multiplier);
12732 	PIPE_CONF_CHECK_I(has_hdmi_sink);
12733 	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12734 	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12735 		PIPE_CONF_CHECK_I(limited_color_range);
12736 	PIPE_CONF_CHECK_I(has_infoframe);
12737 
12738 	PIPE_CONF_CHECK_I(has_audio);
12739 
12740 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12741 			      DRM_MODE_FLAG_INTERLACE);
12742 
12743 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12744 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12745 				      DRM_MODE_FLAG_PHSYNC);
12746 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12747 				      DRM_MODE_FLAG_NHSYNC);
12748 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12749 				      DRM_MODE_FLAG_PVSYNC);
12750 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12751 				      DRM_MODE_FLAG_NVSYNC);
12752 	}
12753 
12754 	PIPE_CONF_CHECK_X(gmch_pfit.control);
12755 	/* pfit ratios are autocomputed by the hw on gen4+ */
12756 	if (INTEL_INFO(dev)->gen < 4)
12757 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12758 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12759 
12760 	if (!adjust) {
12761 		PIPE_CONF_CHECK_I(pipe_src_w);
12762 		PIPE_CONF_CHECK_I(pipe_src_h);
12763 
12764 		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12765 		if (current_config->pch_pfit.enabled) {
12766 			PIPE_CONF_CHECK_X(pch_pfit.pos);
12767 			PIPE_CONF_CHECK_X(pch_pfit.size);
12768 		}
12769 
12770 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12771 	}
12772 
12773 	/* BDW+ don't expose a synchronous way to read the state */
12774 	if (IS_HASWELL(dev))
12775 		PIPE_CONF_CHECK_I(ips_enabled);
12776 
12777 	PIPE_CONF_CHECK_I(double_wide);
12778 
12779 	PIPE_CONF_CHECK_X(ddi_pll_sel);
12780 
12781 	PIPE_CONF_CHECK_P(shared_dpll);
12782 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12783 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12784 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12785 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12786 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12787 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12788 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12789 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12790 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12791 
12792 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12793 	PIPE_CONF_CHECK_X(dsi_pll.div);
12794 
12795 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12796 		PIPE_CONF_CHECK_I(pipe_bpp);
12797 
12798 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12799 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12800 
12801 #undef PIPE_CONF_CHECK_X
12802 #undef PIPE_CONF_CHECK_I
12803 #undef PIPE_CONF_CHECK_P
12804 #undef PIPE_CONF_CHECK_FLAGS
12805 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12806 #undef PIPE_CONF_QUIRK
12807 #undef INTEL_ERR_OR_DBG_KMS
12808 
12809 	return ret;
12810 }
12811 
12812 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12813 					   const struct intel_crtc_state *pipe_config)
12814 {
12815 	if (pipe_config->has_pch_encoder) {
12816 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12817 							    &pipe_config->fdi_m_n);
12818 		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12819 
12820 		/*
12821 		 * FDI already provided one idea for the dotclock.
12822 		 * Yell if the encoder disagrees.
12823 		 */
12824 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12825 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12826 		     fdi_dotclock, dotclock);
12827 	}
12828 }
12829 
12830 static void verify_wm_state(struct drm_crtc *crtc,
12831 			    struct drm_crtc_state *new_state)
12832 {
12833 	struct drm_device *dev = crtc->dev;
12834 	struct drm_i915_private *dev_priv = dev->dev_private;
12835 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12836 	struct skl_ddb_entry *hw_entry, *sw_entry;
12837 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12838 	const enum i915_pipe pipe = intel_crtc->pipe;
12839 	int plane;
12840 
12841 	if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
12842 		return;
12843 
12844 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12845 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12846 
12847 	/* planes */
12848 	for_each_plane(dev_priv, pipe, plane) {
12849 		hw_entry = &hw_ddb.plane[pipe][plane];
12850 		sw_entry = &sw_ddb->plane[pipe][plane];
12851 
12852 		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12853 			continue;
12854 
12855 		DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12856 			  "(expected (%u,%u), found (%u,%u))\n",
12857 			  pipe_name(pipe), plane + 1,
12858 			  sw_entry->start, sw_entry->end,
12859 			  hw_entry->start, hw_entry->end);
12860 	}
12861 
12862 	/* cursor */
12863 	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12864 	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12865 
12866 	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
12867 		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12868 			  "(expected (%u,%u), found (%u,%u))\n",
12869 			  pipe_name(pipe),
12870 			  sw_entry->start, sw_entry->end,
12871 			  hw_entry->start, hw_entry->end);
12872 	}
12873 }
12874 
12875 static void
12876 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
12877 {
12878 	struct drm_connector *connector;
12879 
12880 	drm_for_each_connector(connector, dev) {
12881 		struct drm_encoder *encoder = connector->encoder;
12882 		struct drm_connector_state *state = connector->state;
12883 
12884 		if (state->crtc != crtc)
12885 			continue;
12886 
12887 		intel_connector_verify_state(to_intel_connector(connector));
12888 
12889 		I915_STATE_WARN(state->best_encoder != encoder,
12890 		     "connector's atomic encoder doesn't match legacy encoder\n");
12891 	}
12892 }
12893 
12894 static void
12895 verify_encoder_state(struct drm_device *dev)
12896 {
12897 	struct intel_encoder *encoder;
12898 	struct intel_connector *connector;
12899 
12900 	for_each_intel_encoder(dev, encoder) {
12901 		bool enabled = false;
12902 		enum i915_pipe pipe;
12903 
12904 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12905 			      encoder->base.base.id,
12906 			      encoder->base.name);
12907 
12908 		for_each_intel_connector(dev, connector) {
12909 			if (connector->base.state->best_encoder != &encoder->base)
12910 				continue;
12911 			enabled = true;
12912 
12913 			I915_STATE_WARN(connector->base.state->crtc !=
12914 					encoder->base.crtc,
12915 			     "connector's crtc doesn't match encoder crtc\n");
12916 		}
12917 
12918 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12919 		     "encoder's enabled state mismatch "
12920 		     "(expected %i, found %i)\n",
12921 		     !!encoder->base.crtc, enabled);
12922 
12923 		if (!encoder->base.crtc) {
12924 			bool active;
12925 
12926 			active = encoder->get_hw_state(encoder, &pipe);
12927 			I915_STATE_WARN(active,
12928 			     "encoder detached but still enabled on pipe %c.\n",
12929 			     pipe_name(pipe));
12930 		}
12931 	}
12932 }
12933 
12934 static void
12935 verify_crtc_state(struct drm_crtc *crtc,
12936 		  struct drm_crtc_state *old_crtc_state,
12937 		  struct drm_crtc_state *new_crtc_state)
12938 {
12939 	struct drm_device *dev = crtc->dev;
12940 	struct drm_i915_private *dev_priv = dev->dev_private;
12941 	struct intel_encoder *encoder;
12942 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12943 	struct intel_crtc_state *pipe_config, *sw_config;
12944 	struct drm_atomic_state *old_state;
12945 	bool active;
12946 
12947 	old_state = old_crtc_state->state;
12948 	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12949 	pipe_config = to_intel_crtc_state(old_crtc_state);
12950 	memset(pipe_config, 0, sizeof(*pipe_config));
12951 	pipe_config->base.crtc = crtc;
12952 	pipe_config->base.state = old_state;
12953 
12954 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
12955 
12956 	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12957 
12958 	/* hw state is inconsistent with the pipe quirk */
12959 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12960 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12961 		active = new_crtc_state->active;
12962 
12963 	I915_STATE_WARN(new_crtc_state->active != active,
12964 	     "crtc active state doesn't match with hw state "
12965 	     "(expected %i, found %i)\n", new_crtc_state->active, active);
12966 
12967 	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12968 	     "transitional active state does not match atomic hw state "
12969 	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12970 
12971 	for_each_encoder_on_crtc(dev, crtc, encoder) {
12972 		enum i915_pipe pipe;
12973 
12974 		active = encoder->get_hw_state(encoder, &pipe);
12975 		I915_STATE_WARN(active != new_crtc_state->active,
12976 			"[ENCODER:%i] active %i with crtc active %i\n",
12977 			encoder->base.base.id, active, new_crtc_state->active);
12978 
12979 		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12980 				"Encoder connected to wrong pipe %c\n",
12981 				pipe_name(pipe));
12982 
12983 		if (active)
12984 			encoder->get_config(encoder, pipe_config);
12985 	}
12986 
12987 	if (!new_crtc_state->active)
12988 		return;
12989 
12990 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
12991 
12992 	sw_config = to_intel_crtc_state(crtc->state);
12993 	if (!intel_pipe_config_compare(dev, sw_config,
12994 				       pipe_config, false)) {
12995 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
12996 		intel_dump_pipe_config(intel_crtc, pipe_config,
12997 				       "[hw state]");
12998 		intel_dump_pipe_config(intel_crtc, sw_config,
12999 				       "[sw state]");
13000 	}
13001 }
13002 
13003 static void
13004 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13005 			 struct intel_shared_dpll *pll,
13006 			 struct drm_crtc *crtc,
13007 			 struct drm_crtc_state *new_state)
13008 {
13009 	struct intel_dpll_hw_state dpll_hw_state;
13010 	unsigned crtc_mask;
13011 	bool active;
13012 
13013 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13014 
13015 	DRM_DEBUG_KMS("%s\n", pll->name);
13016 
13017 	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
13018 
13019 	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13020 		I915_STATE_WARN(!pll->on && pll->active_mask,
13021 		     "pll in active use but not on in sw tracking\n");
13022 		I915_STATE_WARN(pll->on && !pll->active_mask,
13023 		     "pll is on but not used by any active crtc\n");
13024 		I915_STATE_WARN(pll->on != active,
13025 		     "pll on state mismatch (expected %i, found %i)\n",
13026 		     pll->on, active);
13027 	}
13028 
13029 	if (!crtc) {
13030 		I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
13031 				"more active pll users than references: %x vs %x\n",
13032 				pll->active_mask, pll->config.crtc_mask);
13033 
13034 		return;
13035 	}
13036 
13037 	crtc_mask = 1 << drm_crtc_index(crtc);
13038 
13039 	if (new_state->active)
13040 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13041 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13042 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13043 	else
13044 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13045 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13046 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13047 
13048 	I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13049 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13050 			crtc_mask, pll->config.crtc_mask);
13051 
13052 	I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13053 					  &dpll_hw_state,
13054 					  sizeof(dpll_hw_state)),
13055 			"pll hw state mismatch\n");
13056 }
13057 
13058 static void
13059 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13060 			 struct drm_crtc_state *old_crtc_state,
13061 			 struct drm_crtc_state *new_crtc_state)
13062 {
13063 	struct drm_i915_private *dev_priv = dev->dev_private;
13064 	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13065 	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13066 
13067 	if (new_state->shared_dpll)
13068 		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13069 
13070 	if (old_state->shared_dpll &&
13071 	    old_state->shared_dpll != new_state->shared_dpll) {
13072 		unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13073 		struct intel_shared_dpll *pll = old_state->shared_dpll;
13074 
13075 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13076 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13077 				pipe_name(drm_crtc_index(crtc)));
13078 		I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13079 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13080 				pipe_name(drm_crtc_index(crtc)));
13081 	}
13082 }
13083 
13084 static void
13085 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13086 			 struct drm_crtc_state *old_state,
13087 			 struct drm_crtc_state *new_state)
13088 {
13089 	if (!needs_modeset(new_state) &&
13090 	    !to_intel_crtc_state(new_state)->update_pipe)
13091 		return;
13092 
13093 	verify_wm_state(crtc, new_state);
13094 	verify_connector_state(crtc->dev, crtc);
13095 	verify_crtc_state(crtc, old_state, new_state);
13096 	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13097 }
13098 
13099 static void
13100 verify_disabled_dpll_state(struct drm_device *dev)
13101 {
13102 	struct drm_i915_private *dev_priv = dev->dev_private;
13103 	int i;
13104 
13105 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
13106 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13107 }
13108 
13109 static void
13110 intel_modeset_verify_disabled(struct drm_device *dev)
13111 {
13112 	verify_encoder_state(dev);
13113 	verify_connector_state(dev, NULL);
13114 	verify_disabled_dpll_state(dev);
13115 }
13116 
13117 static void update_scanline_offset(struct intel_crtc *crtc)
13118 {
13119 	struct drm_device *dev = crtc->base.dev;
13120 
13121 	/*
13122 	 * The scanline counter increments at the leading edge of hsync.
13123 	 *
13124 	 * On most platforms it starts counting from vtotal-1 on the
13125 	 * first active line. That means the scanline counter value is
13126 	 * always one less than what we would expect. Ie. just after
13127 	 * start of vblank, which also occurs at start of hsync (on the
13128 	 * last active line), the scanline counter will read vblank_start-1.
13129 	 *
13130 	 * On gen2 the scanline counter starts counting from 1 instead
13131 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13132 	 * to keep the value positive), instead of adding one.
13133 	 *
13134 	 * On HSW+ the behaviour of the scanline counter depends on the output
13135 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13136 	 * there's an extra 1 line difference. So we need to add two instead of
13137 	 * one to the value.
13138 	 */
13139 	if (IS_GEN2(dev)) {
13140 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13141 		int vtotal;
13142 
13143 		vtotal = adjusted_mode->crtc_vtotal;
13144 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13145 			vtotal /= 2;
13146 
13147 		crtc->scanline_offset = vtotal - 1;
13148 	} else if (HAS_DDI(dev) &&
13149 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13150 		crtc->scanline_offset = 2;
13151 	} else
13152 		crtc->scanline_offset = 1;
13153 }
13154 
13155 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13156 {
13157 	struct drm_device *dev = state->dev;
13158 	struct drm_i915_private *dev_priv = to_i915(dev);
13159 	struct intel_shared_dpll_config *shared_dpll = NULL;
13160 	struct drm_crtc *crtc;
13161 	struct drm_crtc_state *crtc_state;
13162 	int i;
13163 
13164 	if (!dev_priv->display.crtc_compute_clock)
13165 		return;
13166 
13167 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13168 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13169 		struct intel_shared_dpll *old_dpll =
13170 			to_intel_crtc_state(crtc->state)->shared_dpll;
13171 
13172 		if (!needs_modeset(crtc_state))
13173 			continue;
13174 
13175 		to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
13176 
13177 		if (!old_dpll)
13178 			continue;
13179 
13180 		if (!shared_dpll)
13181 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13182 
13183 		intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
13184 	}
13185 }
13186 
13187 /*
13188  * This implements the workaround described in the "notes" section of the mode
13189  * set sequence documentation. When going from no pipes or single pipe to
13190  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13191  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13192  */
13193 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13194 {
13195 	struct drm_crtc_state *crtc_state;
13196 	struct intel_crtc *intel_crtc;
13197 	struct drm_crtc *crtc;
13198 	struct intel_crtc_state *first_crtc_state = NULL;
13199 	struct intel_crtc_state *other_crtc_state = NULL;
13200 	enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13201 	int i;
13202 
13203 	/* look at all crtc's that are going to be enabled in during modeset */
13204 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13205 		intel_crtc = to_intel_crtc(crtc);
13206 
13207 		if (!crtc_state->active || !needs_modeset(crtc_state))
13208 			continue;
13209 
13210 		if (first_crtc_state) {
13211 			other_crtc_state = to_intel_crtc_state(crtc_state);
13212 			break;
13213 		} else {
13214 			first_crtc_state = to_intel_crtc_state(crtc_state);
13215 			first_pipe = intel_crtc->pipe;
13216 		}
13217 	}
13218 
13219 	/* No workaround needed? */
13220 	if (!first_crtc_state)
13221 		return 0;
13222 
13223 	/* w/a possibly needed, check how many crtc's are already enabled. */
13224 	for_each_intel_crtc(state->dev, intel_crtc) {
13225 		struct intel_crtc_state *pipe_config;
13226 
13227 		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13228 		if (IS_ERR(pipe_config))
13229 			return PTR_ERR(pipe_config);
13230 
13231 		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13232 
13233 		if (!pipe_config->base.active ||
13234 		    needs_modeset(&pipe_config->base))
13235 			continue;
13236 
13237 		/* 2 or more enabled crtcs means no need for w/a */
13238 		if (enabled_pipe != INVALID_PIPE)
13239 			return 0;
13240 
13241 		enabled_pipe = intel_crtc->pipe;
13242 	}
13243 
13244 	if (enabled_pipe != INVALID_PIPE)
13245 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13246 	else if (other_crtc_state)
13247 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13248 
13249 	return 0;
13250 }
13251 
13252 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13253 {
13254 	struct drm_crtc *crtc;
13255 	struct drm_crtc_state *crtc_state;
13256 	int ret = 0;
13257 
13258 	/* add all active pipes to the state */
13259 	for_each_crtc(state->dev, crtc) {
13260 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13261 		if (IS_ERR(crtc_state))
13262 			return PTR_ERR(crtc_state);
13263 
13264 		if (!crtc_state->active || needs_modeset(crtc_state))
13265 			continue;
13266 
13267 		crtc_state->mode_changed = true;
13268 
13269 		ret = drm_atomic_add_affected_connectors(state, crtc);
13270 		if (ret)
13271 			break;
13272 
13273 		ret = drm_atomic_add_affected_planes(state, crtc);
13274 		if (ret)
13275 			break;
13276 	}
13277 
13278 	return ret;
13279 }
13280 
13281 static int intel_modeset_checks(struct drm_atomic_state *state)
13282 {
13283 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13284 	struct drm_i915_private *dev_priv = state->dev->dev_private;
13285 	struct drm_crtc *crtc;
13286 	struct drm_crtc_state *crtc_state;
13287 	int ret = 0, i;
13288 
13289 	if (!check_digital_port_conflicts(state)) {
13290 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13291 		return -EINVAL;
13292 	}
13293 
13294 	intel_state->modeset = true;
13295 	intel_state->active_crtcs = dev_priv->active_crtcs;
13296 
13297 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13298 		if (crtc_state->active)
13299 			intel_state->active_crtcs |= 1 << i;
13300 		else
13301 			intel_state->active_crtcs &= ~(1 << i);
13302 	}
13303 
13304 	/*
13305 	 * See if the config requires any additional preparation, e.g.
13306 	 * to adjust global state with pipes off.  We need to do this
13307 	 * here so we can get the modeset_pipe updated config for the new
13308 	 * mode set on this crtc.  For other crtcs we need to use the
13309 	 * adjusted_mode bits in the crtc directly.
13310 	 */
13311 	if (dev_priv->display.modeset_calc_cdclk) {
13312 		ret = dev_priv->display.modeset_calc_cdclk(state);
13313 
13314 		if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
13315 			ret = intel_modeset_all_pipes(state);
13316 
13317 		if (ret < 0)
13318 			return ret;
13319 
13320 		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13321 			      intel_state->cdclk, intel_state->dev_cdclk);
13322 	} else
13323 		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13324 
13325 	intel_modeset_clear_plls(state);
13326 
13327 	if (IS_HASWELL(dev_priv))
13328 		return haswell_mode_set_planes_workaround(state);
13329 
13330 	return 0;
13331 }
13332 
13333 /*
13334  * Handle calculation of various watermark data at the end of the atomic check
13335  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13336  * handlers to ensure that all derived state has been updated.
13337  */
13338 static void calc_watermark_data(struct drm_atomic_state *state)
13339 {
13340 	struct drm_device *dev = state->dev;
13341 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13342 	struct drm_crtc *crtc;
13343 	struct drm_crtc_state *cstate;
13344 	struct drm_plane *plane;
13345 	struct drm_plane_state *pstate;
13346 
13347 	/*
13348 	 * Calculate watermark configuration details now that derived
13349 	 * plane/crtc state is all properly updated.
13350 	 */
13351 	drm_for_each_crtc(crtc, dev) {
13352 		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13353 			crtc->state;
13354 
13355 		if (cstate->active)
13356 			intel_state->wm_config.num_pipes_active++;
13357 	}
13358 	drm_for_each_legacy_plane(plane, dev) {
13359 		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13360 			plane->state;
13361 
13362 		if (!to_intel_plane_state(pstate)->visible)
13363 			continue;
13364 
13365 		intel_state->wm_config.sprites_enabled = true;
13366 		if (pstate->crtc_w != pstate->src_w >> 16 ||
13367 		    pstate->crtc_h != pstate->src_h >> 16)
13368 			intel_state->wm_config.sprites_scaled = true;
13369 	}
13370 }
13371 
13372 /**
13373  * intel_atomic_check - validate state object
13374  * @dev: drm device
13375  * @state: state to validate
13376  */
13377 static int intel_atomic_check(struct drm_device *dev,
13378 			      struct drm_atomic_state *state)
13379 {
13380 	struct drm_i915_private *dev_priv = to_i915(dev);
13381 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13382 	struct drm_crtc *crtc;
13383 	struct drm_crtc_state *crtc_state;
13384 	int ret, i;
13385 	bool any_ms = false;
13386 
13387 	ret = drm_atomic_helper_check_modeset(dev, state);
13388 	if (ret)
13389 		return ret;
13390 
13391 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13392 		struct intel_crtc_state *pipe_config =
13393 			to_intel_crtc_state(crtc_state);
13394 
13395 		/* Catch I915_MODE_FLAG_INHERITED */
13396 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13397 			crtc_state->mode_changed = true;
13398 
13399 		if (!crtc_state->enable) {
13400 			if (needs_modeset(crtc_state))
13401 				any_ms = true;
13402 			continue;
13403 		}
13404 
13405 		if (!needs_modeset(crtc_state))
13406 			continue;
13407 
13408 		/* FIXME: For only active_changed we shouldn't need to do any
13409 		 * state recomputation at all. */
13410 
13411 		ret = drm_atomic_add_affected_connectors(state, crtc);
13412 		if (ret)
13413 			return ret;
13414 
13415 		ret = intel_modeset_pipe_config(crtc, pipe_config);
13416 		if (ret)
13417 			return ret;
13418 
13419 		if (i915.fastboot &&
13420 		    intel_pipe_config_compare(dev,
13421 					to_intel_crtc_state(crtc->state),
13422 					pipe_config, true)) {
13423 			crtc_state->mode_changed = false;
13424 			to_intel_crtc_state(crtc_state)->update_pipe = true;
13425 		}
13426 
13427 		if (needs_modeset(crtc_state)) {
13428 			any_ms = true;
13429 
13430 			ret = drm_atomic_add_affected_planes(state, crtc);
13431 			if (ret)
13432 				return ret;
13433 		}
13434 
13435 		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13436 				       needs_modeset(crtc_state) ?
13437 				       "[modeset]" : "[fastset]");
13438 	}
13439 
13440 	if (any_ms) {
13441 		ret = intel_modeset_checks(state);
13442 
13443 		if (ret)
13444 			return ret;
13445 	} else
13446 		intel_state->cdclk = dev_priv->cdclk_freq;
13447 
13448 	ret = drm_atomic_helper_check_planes(dev, state);
13449 	if (ret)
13450 		return ret;
13451 
13452 	intel_fbc_choose_crtc(dev_priv, state);
13453 	calc_watermark_data(state);
13454 
13455 	return 0;
13456 }
13457 
13458 static int intel_atomic_prepare_commit(struct drm_device *dev,
13459 				       struct drm_atomic_state *state,
13460 				       bool nonblock)
13461 {
13462 	struct drm_i915_private *dev_priv = dev->dev_private;
13463 	struct drm_plane_state *plane_state;
13464 	struct drm_crtc_state *crtc_state;
13465 	struct drm_plane *plane;
13466 	struct drm_crtc *crtc;
13467 	int i, ret;
13468 
13469 	if (nonblock) {
13470 		DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
13471 		return -EINVAL;
13472 	}
13473 
13474 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13475 		if (state->legacy_cursor_update)
13476 			continue;
13477 
13478 		ret = intel_crtc_wait_for_pending_flips(crtc);
13479 		if (ret)
13480 			return ret;
13481 
13482 		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13483 			flush_workqueue(dev_priv->wq);
13484 	}
13485 
13486 	ret = mutex_lock_interruptible(&dev->struct_mutex);
13487 	if (ret)
13488 		return ret;
13489 
13490 	ret = drm_atomic_helper_prepare_planes(dev, state);
13491 	mutex_unlock(&dev->struct_mutex);
13492 
13493 	if (!ret && !nonblock) {
13494 		for_each_plane_in_state(state, plane, plane_state, i) {
13495 			struct intel_plane_state *intel_plane_state =
13496 				to_intel_plane_state(plane_state);
13497 
13498 			if (!intel_plane_state->wait_req)
13499 				continue;
13500 
13501 			ret = __i915_wait_request(intel_plane_state->wait_req,
13502 						  true, NULL, NULL);
13503 			if (ret) {
13504 				/* Any hang should be swallowed by the wait */
13505 				WARN_ON(ret == -EIO);
13506 				mutex_lock(&dev->struct_mutex);
13507 				drm_atomic_helper_cleanup_planes(dev, state);
13508 				mutex_unlock(&dev->struct_mutex);
13509 				break;
13510 			}
13511 		}
13512 	}
13513 
13514 	return ret;
13515 }
13516 
13517 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13518 					  struct drm_i915_private *dev_priv,
13519 					  unsigned crtc_mask)
13520 {
13521 	unsigned last_vblank_count[I915_MAX_PIPES];
13522 	enum i915_pipe pipe;
13523 	int ret;
13524 
13525 	if (!crtc_mask)
13526 		return;
13527 
13528 	for_each_pipe(dev_priv, pipe) {
13529 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13530 
13531 		if (!((1 << pipe) & crtc_mask))
13532 			continue;
13533 
13534 		ret = drm_crtc_vblank_get(crtc);
13535 		if (WARN_ON(ret != 0)) {
13536 			crtc_mask &= ~(1 << pipe);
13537 			continue;
13538 		}
13539 
13540 		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13541 	}
13542 
13543 	for_each_pipe(dev_priv, pipe) {
13544 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13545 		long lret;
13546 
13547 		if (!((1 << pipe) & crtc_mask))
13548 			continue;
13549 
13550 		lret = wait_event_timeout(dev->vblank[pipe].queue,
13551 				last_vblank_count[pipe] !=
13552 					drm_crtc_vblank_count(crtc),
13553 				msecs_to_jiffies(50));
13554 
13555 		WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
13556 
13557 		drm_crtc_vblank_put(crtc);
13558 	}
13559 }
13560 
13561 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13562 {
13563 	/* fb updated, need to unpin old fb */
13564 	if (crtc_state->fb_changed)
13565 		return true;
13566 
13567 	/* wm changes, need vblank before final wm's */
13568 	if (crtc_state->update_wm_post)
13569 		return true;
13570 
13571 	/*
13572 	 * cxsr is re-enabled after vblank.
13573 	 * This is already handled by crtc_state->update_wm_post,
13574 	 * but added for clarity.
13575 	 */
13576 	if (crtc_state->disable_cxsr)
13577 		return true;
13578 
13579 	return false;
13580 }
13581 
13582 /**
13583  * intel_atomic_commit - commit validated state object
13584  * @dev: DRM device
13585  * @state: the top-level driver state object
13586  * @nonblock: nonblocking commit
13587  *
13588  * This function commits a top-level state object that has been validated
13589  * with drm_atomic_helper_check().
13590  *
13591  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13592  * we can only handle plane-related operations and do not yet support
13593  * nonblocking commit.
13594  *
13595  * RETURNS
13596  * Zero for success or -errno.
13597  */
13598 static int intel_atomic_commit(struct drm_device *dev,
13599 			       struct drm_atomic_state *state,
13600 			       bool nonblock)
13601 {
13602 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13603 	struct drm_i915_private *dev_priv = dev->dev_private;
13604 	struct drm_crtc_state *old_crtc_state;
13605 	struct drm_crtc *crtc;
13606 	struct intel_crtc_state *intel_cstate;
13607 	int ret = 0, i;
13608 	bool hw_check = intel_state->modeset;
13609 	unsigned long put_domains[I915_MAX_PIPES] = {};
13610 	unsigned crtc_vblank_mask = 0;
13611 
13612 	ret = intel_atomic_prepare_commit(dev, state, nonblock);
13613 	if (ret) {
13614 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13615 		return ret;
13616 	}
13617 
13618 	drm_atomic_helper_swap_state(dev, state);
13619 	dev_priv->wm.config = intel_state->wm_config;
13620 	intel_shared_dpll_commit(state);
13621 
13622 	if (intel_state->modeset) {
13623 		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13624 		       sizeof(intel_state->min_pixclk));
13625 		dev_priv->active_crtcs = intel_state->active_crtcs;
13626 		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13627 
13628 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13629 	}
13630 
13631 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13632 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13633 
13634 		if (needs_modeset(crtc->state) ||
13635 		    to_intel_crtc_state(crtc->state)->update_pipe) {
13636 			hw_check = true;
13637 
13638 			put_domains[to_intel_crtc(crtc)->pipe] =
13639 				modeset_get_crtc_power_domains(crtc,
13640 					to_intel_crtc_state(crtc->state));
13641 		}
13642 
13643 		if (!needs_modeset(crtc->state))
13644 			continue;
13645 
13646 		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13647 
13648 		if (old_crtc_state->active) {
13649 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13650 			dev_priv->display.crtc_disable(crtc);
13651 			intel_crtc->active = false;
13652 			intel_fbc_disable(intel_crtc);
13653 			intel_disable_shared_dpll(intel_crtc);
13654 
13655 			/*
13656 			 * Underruns don't always raise
13657 			 * interrupts, so check manually.
13658 			 */
13659 			intel_check_cpu_fifo_underruns(dev_priv);
13660 			intel_check_pch_fifo_underruns(dev_priv);
13661 
13662 			if (!crtc->state->active)
13663 				intel_update_watermarks(crtc);
13664 		}
13665 	}
13666 
13667 	/* Only after disabling all output pipelines that will be changed can we
13668 	 * update the the output configuration. */
13669 	intel_modeset_update_crtc_state(state);
13670 
13671 	if (intel_state->modeset) {
13672 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13673 
13674 		if (dev_priv->display.modeset_commit_cdclk &&
13675 		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
13676 			dev_priv->display.modeset_commit_cdclk(state);
13677 
13678 		intel_modeset_verify_disabled(dev);
13679 	}
13680 
13681 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13682 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13683 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13684 		bool modeset = needs_modeset(crtc->state);
13685 		struct intel_crtc_state *pipe_config =
13686 			to_intel_crtc_state(crtc->state);
13687 		bool update_pipe = !modeset && pipe_config->update_pipe;
13688 
13689 		if (modeset && crtc->state->active) {
13690 			update_scanline_offset(to_intel_crtc(crtc));
13691 			dev_priv->display.crtc_enable(crtc);
13692 		}
13693 
13694 		if (!modeset)
13695 			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13696 
13697 		if (crtc->state->active &&
13698 		    drm_atomic_get_existing_plane_state(state, crtc->primary))
13699 			intel_fbc_enable(intel_crtc);
13700 
13701 		if (crtc->state->active &&
13702 		    (crtc->state->planes_changed || update_pipe))
13703 			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13704 
13705 		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13706 			crtc_vblank_mask |= 1 << i;
13707 	}
13708 
13709 	/* FIXME: add subpixel order */
13710 
13711 	if (!state->legacy_cursor_update)
13712 		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13713 
13714 	/*
13715 	 * Now that the vblank has passed, we can go ahead and program the
13716 	 * optimal watermarks on platforms that need two-step watermark
13717 	 * programming.
13718 	 *
13719 	 * TODO: Move this (and other cleanup) to an async worker eventually.
13720 	 */
13721 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13722 		intel_cstate = to_intel_crtc_state(crtc->state);
13723 
13724 		if (dev_priv->display.optimize_watermarks)
13725 			dev_priv->display.optimize_watermarks(intel_cstate);
13726 	}
13727 
13728 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13729 		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13730 
13731 		if (put_domains[i])
13732 			modeset_put_power_domains(dev_priv, put_domains[i]);
13733 
13734 		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13735 	}
13736 
13737 	if (intel_state->modeset)
13738 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13739 
13740 	mutex_lock(&dev->struct_mutex);
13741 	drm_atomic_helper_cleanup_planes(dev, state);
13742 	mutex_unlock(&dev->struct_mutex);
13743 
13744 	drm_atomic_state_free(state);
13745 
13746 	/* As one of the primary mmio accessors, KMS has a high likelihood
13747 	 * of triggering bugs in unclaimed access. After we finish
13748 	 * modesetting, see if an error has been flagged, and if so
13749 	 * enable debugging for the next modeset - and hope we catch
13750 	 * the culprit.
13751 	 *
13752 	 * XXX note that we assume display power is on at this point.
13753 	 * This might hold true now but we need to add pm helper to check
13754 	 * unclaimed only when the hardware is on, as atomic commits
13755 	 * can happen also when the device is completely off.
13756 	 */
13757 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13758 
13759 	return 0;
13760 }
13761 
13762 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13763 {
13764 	struct drm_device *dev = crtc->dev;
13765 	struct drm_atomic_state *state;
13766 	struct drm_crtc_state *crtc_state;
13767 	int ret;
13768 
13769 	state = drm_atomic_state_alloc(dev);
13770 	if (!state) {
13771 		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13772 			      crtc->base.id);
13773 		return;
13774 	}
13775 
13776 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13777 
13778 retry:
13779 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13780 	ret = PTR_ERR_OR_ZERO(crtc_state);
13781 	if (!ret) {
13782 		if (!crtc_state->active)
13783 			goto out;
13784 
13785 		crtc_state->mode_changed = true;
13786 		ret = drm_atomic_commit(state);
13787 	}
13788 
13789 	if (ret == -EDEADLK) {
13790 		drm_atomic_state_clear(state);
13791 		drm_modeset_backoff(state->acquire_ctx);
13792 		goto retry;
13793 	}
13794 
13795 	if (ret)
13796 out:
13797 		drm_atomic_state_free(state);
13798 }
13799 
13800 #undef for_each_intel_crtc_masked
13801 
13802 static const struct drm_crtc_funcs intel_crtc_funcs = {
13803 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
13804 	.set_config = drm_atomic_helper_set_config,
13805 	.set_property = drm_atomic_helper_crtc_set_property,
13806 	.destroy = intel_crtc_destroy,
13807 	.page_flip = intel_crtc_page_flip,
13808 	.atomic_duplicate_state = intel_crtc_duplicate_state,
13809 	.atomic_destroy_state = intel_crtc_destroy_state,
13810 };
13811 
13812 /**
13813  * intel_prepare_plane_fb - Prepare fb for usage on plane
13814  * @plane: drm plane to prepare for
13815  * @fb: framebuffer to prepare for presentation
13816  *
13817  * Prepares a framebuffer for usage on a display plane.  Generally this
13818  * involves pinning the underlying object and updating the frontbuffer tracking
13819  * bits.  Some older platforms need special physical address handling for
13820  * cursor planes.
13821  *
13822  * Must be called with struct_mutex held.
13823  *
13824  * Returns 0 on success, negative error code on failure.
13825  */
13826 int
13827 intel_prepare_plane_fb(struct drm_plane *plane,
13828 		       struct drm_plane_state *new_state)
13829 {
13830 	struct drm_device *dev = plane->dev;
13831 	struct drm_framebuffer *fb = new_state->fb;
13832 	struct intel_plane *intel_plane = to_intel_plane(plane);
13833 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13834 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13835 	int ret = 0;
13836 
13837 	if (!obj && !old_obj)
13838 		return 0;
13839 
13840 	if (old_obj) {
13841 		struct drm_crtc_state *crtc_state =
13842 			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13843 
13844 		/* Big Hammer, we also need to ensure that any pending
13845 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13846 		 * current scanout is retired before unpinning the old
13847 		 * framebuffer. Note that we rely on userspace rendering
13848 		 * into the buffer attached to the pipe they are waiting
13849 		 * on. If not, userspace generates a GPU hang with IPEHR
13850 		 * point to the MI_WAIT_FOR_EVENT.
13851 		 *
13852 		 * This should only fail upon a hung GPU, in which case we
13853 		 * can safely continue.
13854 		 */
13855 		if (needs_modeset(crtc_state))
13856 			ret = i915_gem_object_wait_rendering(old_obj, true);
13857 		if (ret) {
13858 			/* GPU hangs should have been swallowed by the wait */
13859 			WARN_ON(ret == -EIO);
13860 			return ret;
13861 		}
13862 	}
13863 
13864 	/* For framebuffer backed by dmabuf, wait for fence */
13865 #if 0
13866 	if (obj && obj->base.dma_buf) {
13867 		long lret;
13868 
13869 		lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13870 							   false, true,
13871 							   MAX_SCHEDULE_TIMEOUT);
13872 		if (lret == -ERESTARTSYS)
13873 			return lret;
13874 
13875 		WARN(lret < 0, "waiting returns %li\n", lret);
13876 	}
13877 #endif
13878 
13879 	if (!obj) {
13880 		ret = 0;
13881 	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13882 	    INTEL_INFO(dev)->cursor_needs_physical) {
13883 		int align = IS_I830(dev) ? 16 * 1024 : 256;
13884 		ret = i915_gem_object_attach_phys(obj, align);
13885 		if (ret)
13886 			DRM_DEBUG_KMS("failed to attach phys object\n");
13887 	} else {
13888 		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
13889 	}
13890 
13891 	if (ret == 0) {
13892 		if (obj) {
13893 			struct intel_plane_state *plane_state =
13894 				to_intel_plane_state(new_state);
13895 
13896 			i915_gem_request_assign(&plane_state->wait_req,
13897 						obj->last_write_req);
13898 		}
13899 
13900 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13901 	}
13902 
13903 	return ret;
13904 }
13905 
13906 /**
13907  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13908  * @plane: drm plane to clean up for
13909  * @fb: old framebuffer that was on plane
13910  *
13911  * Cleans up a framebuffer that has just been removed from a plane.
13912  *
13913  * Must be called with struct_mutex held.
13914  */
13915 void
13916 intel_cleanup_plane_fb(struct drm_plane *plane,
13917 		       struct drm_plane_state *old_state)
13918 {
13919 	struct drm_device *dev = plane->dev;
13920 	struct intel_plane *intel_plane = to_intel_plane(plane);
13921 	struct intel_plane_state *old_intel_state;
13922 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13923 	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13924 
13925 	old_intel_state = to_intel_plane_state(old_state);
13926 
13927 	if (!obj && !old_obj)
13928 		return;
13929 
13930 	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13931 	    !INTEL_INFO(dev)->cursor_needs_physical))
13932 		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
13933 
13934 	/* prepare_fb aborted? */
13935 	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13936 	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13937 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13938 
13939 	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13940 }
13941 
13942 int
13943 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13944 {
13945 	int max_scale;
13946 	struct drm_device *dev;
13947 	struct drm_i915_private *dev_priv;
13948 	int crtc_clock, cdclk;
13949 
13950 	if (!intel_crtc || !crtc_state->base.enable)
13951 		return DRM_PLANE_HELPER_NO_SCALING;
13952 
13953 	dev = intel_crtc->base.dev;
13954 	dev_priv = dev->dev_private;
13955 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13956 	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13957 
13958 	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13959 		return DRM_PLANE_HELPER_NO_SCALING;
13960 
13961 	/*
13962 	 * skl max scale is lower of:
13963 	 *    close to 3 but not 3, -1 is for that purpose
13964 	 *            or
13965 	 *    cdclk/crtc_clock
13966 	 */
13967 	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13968 
13969 	return max_scale;
13970 }
13971 
13972 static int
13973 intel_check_primary_plane(struct drm_plane *plane,
13974 			  struct intel_crtc_state *crtc_state,
13975 			  struct intel_plane_state *state)
13976 {
13977 	struct drm_crtc *crtc = state->base.crtc;
13978 	struct drm_framebuffer *fb = state->base.fb;
13979 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13980 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13981 	bool can_position = false;
13982 
13983 	if (INTEL_INFO(plane->dev)->gen >= 9) {
13984 		/* use scaler when colorkey is not required */
13985 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13986 			min_scale = 1;
13987 			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13988 		}
13989 		can_position = true;
13990 	}
13991 
13992 	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13993 					     &state->dst, &state->clip,
13994 					     min_scale, max_scale,
13995 					     can_position, true,
13996 					     &state->visible);
13997 }
13998 
13999 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14000 				    struct drm_crtc_state *old_crtc_state)
14001 {
14002 	struct drm_device *dev = crtc->dev;
14003 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14004 	struct intel_crtc_state *old_intel_state =
14005 		to_intel_crtc_state(old_crtc_state);
14006 	bool modeset = needs_modeset(crtc->state);
14007 
14008 	/* Perform vblank evasion around commit operation */
14009 	intel_pipe_update_start(intel_crtc);
14010 
14011 	if (modeset)
14012 		return;
14013 
14014 	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14015 		intel_color_set_csc(crtc->state);
14016 		intel_color_load_luts(crtc->state);
14017 	}
14018 
14019 	if (to_intel_crtc_state(crtc->state)->update_pipe)
14020 		intel_update_pipe_config(intel_crtc, old_intel_state);
14021 	else if (INTEL_INFO(dev)->gen >= 9)
14022 		skl_detach_scalers(intel_crtc);
14023 }
14024 
14025 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14026 				     struct drm_crtc_state *old_crtc_state)
14027 {
14028 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14029 
14030 	intel_pipe_update_end(intel_crtc);
14031 }
14032 
14033 /**
14034  * intel_plane_destroy - destroy a plane
14035  * @plane: plane to destroy
14036  *
14037  * Common destruction function for all types of planes (primary, cursor,
14038  * sprite).
14039  */
14040 void intel_plane_destroy(struct drm_plane *plane)
14041 {
14042 	struct intel_plane *intel_plane = to_intel_plane(plane);
14043 	drm_plane_cleanup(plane);
14044 	kfree(intel_plane);
14045 }
14046 
14047 const struct drm_plane_funcs intel_plane_funcs = {
14048 	.update_plane = drm_atomic_helper_update_plane,
14049 	.disable_plane = drm_atomic_helper_disable_plane,
14050 	.destroy = intel_plane_destroy,
14051 	.set_property = drm_atomic_helper_plane_set_property,
14052 	.atomic_get_property = intel_plane_atomic_get_property,
14053 	.atomic_set_property = intel_plane_atomic_set_property,
14054 	.atomic_duplicate_state = intel_plane_duplicate_state,
14055 	.atomic_destroy_state = intel_plane_destroy_state,
14056 
14057 };
14058 
14059 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14060 						    int pipe)
14061 {
14062 	struct intel_plane *primary = NULL;
14063 	struct intel_plane_state *state = NULL;
14064 	const uint32_t *intel_primary_formats;
14065 	unsigned int num_formats;
14066 	int ret;
14067 
14068 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14069 	if (!primary)
14070 		goto fail;
14071 
14072 	state = intel_create_plane_state(&primary->base);
14073 	if (!state)
14074 		goto fail;
14075 	primary->base.state = &state->base;
14076 
14077 	primary->can_scale = false;
14078 	primary->max_downscale = 1;
14079 	if (INTEL_INFO(dev)->gen >= 9) {
14080 		primary->can_scale = true;
14081 		state->scaler_id = -1;
14082 	}
14083 	primary->pipe = pipe;
14084 	primary->plane = pipe;
14085 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14086 	primary->check_plane = intel_check_primary_plane;
14087 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14088 		primary->plane = !pipe;
14089 
14090 	if (INTEL_INFO(dev)->gen >= 9) {
14091 		intel_primary_formats = skl_primary_formats;
14092 		num_formats = ARRAY_SIZE(skl_primary_formats);
14093 
14094 		primary->update_plane = skylake_update_primary_plane;
14095 		primary->disable_plane = skylake_disable_primary_plane;
14096 	} else if (HAS_PCH_SPLIT(dev)) {
14097 		intel_primary_formats = i965_primary_formats;
14098 		num_formats = ARRAY_SIZE(i965_primary_formats);
14099 
14100 		primary->update_plane = ironlake_update_primary_plane;
14101 		primary->disable_plane = i9xx_disable_primary_plane;
14102 	} else if (INTEL_INFO(dev)->gen >= 4) {
14103 		intel_primary_formats = i965_primary_formats;
14104 		num_formats = ARRAY_SIZE(i965_primary_formats);
14105 
14106 		primary->update_plane = i9xx_update_primary_plane;
14107 		primary->disable_plane = i9xx_disable_primary_plane;
14108 	} else {
14109 		intel_primary_formats = i8xx_primary_formats;
14110 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
14111 
14112 		primary->update_plane = i9xx_update_primary_plane;
14113 		primary->disable_plane = i9xx_disable_primary_plane;
14114 	}
14115 
14116 	ret = drm_universal_plane_init(dev, &primary->base, 0,
14117 				       &intel_plane_funcs,
14118 				       intel_primary_formats, num_formats,
14119 				       DRM_PLANE_TYPE_PRIMARY, NULL);
14120 	if (ret)
14121 		goto fail;
14122 
14123 	if (INTEL_INFO(dev)->gen >= 4)
14124 		intel_create_rotation_property(dev, primary);
14125 
14126 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14127 
14128 	return &primary->base;
14129 
14130 fail:
14131 	kfree(state);
14132 	kfree(primary);
14133 
14134 	return NULL;
14135 }
14136 
14137 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14138 {
14139 	if (!dev->mode_config.rotation_property) {
14140 		unsigned long flags = BIT(DRM_ROTATE_0) |
14141 			BIT(DRM_ROTATE_180);
14142 
14143 		if (INTEL_INFO(dev)->gen >= 9)
14144 			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14145 
14146 		dev->mode_config.rotation_property =
14147 			drm_mode_create_rotation_property(dev, flags);
14148 	}
14149 	if (dev->mode_config.rotation_property)
14150 		drm_object_attach_property(&plane->base.base,
14151 				dev->mode_config.rotation_property,
14152 				plane->base.state->rotation);
14153 }
14154 
14155 static int
14156 intel_check_cursor_plane(struct drm_plane *plane,
14157 			 struct intel_crtc_state *crtc_state,
14158 			 struct intel_plane_state *state)
14159 {
14160 	struct drm_crtc *crtc = crtc_state->base.crtc;
14161 	struct drm_framebuffer *fb = state->base.fb;
14162 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14163 	enum i915_pipe pipe = to_intel_plane(plane)->pipe;
14164 	unsigned stride;
14165 	int ret;
14166 
14167 	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14168 					    &state->dst, &state->clip,
14169 					    DRM_PLANE_HELPER_NO_SCALING,
14170 					    DRM_PLANE_HELPER_NO_SCALING,
14171 					    true, true, &state->visible);
14172 	if (ret)
14173 		return ret;
14174 
14175 	/* if we want to turn off the cursor ignore width and height */
14176 	if (!obj)
14177 		return 0;
14178 
14179 	/* Check for which cursor types we support */
14180 	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14181 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14182 			  state->base.crtc_w, state->base.crtc_h);
14183 		return -EINVAL;
14184 	}
14185 
14186 	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14187 	if (obj->base.size < stride * state->base.crtc_h) {
14188 		DRM_DEBUG_KMS("buffer is too small\n");
14189 		return -ENOMEM;
14190 	}
14191 
14192 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14193 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
14194 		return -EINVAL;
14195 	}
14196 
14197 	/*
14198 	 * There's something wrong with the cursor on CHV pipe C.
14199 	 * If it straddles the left edge of the screen then
14200 	 * moving it away from the edge or disabling it often
14201 	 * results in a pipe underrun, and often that can lead to
14202 	 * dead pipe (constant underrun reported, and it scans
14203 	 * out just a solid color). To recover from that, the
14204 	 * display power well must be turned off and on again.
14205 	 * Refuse the put the cursor into that compromised position.
14206 	 */
14207 	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14208 	    state->visible && state->base.crtc_x < 0) {
14209 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14210 		return -EINVAL;
14211 	}
14212 
14213 	return 0;
14214 }
14215 
14216 static void
14217 intel_disable_cursor_plane(struct drm_plane *plane,
14218 			   struct drm_crtc *crtc)
14219 {
14220 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14221 
14222 	intel_crtc->cursor_addr = 0;
14223 	intel_crtc_update_cursor(crtc, NULL);
14224 }
14225 
14226 static void
14227 intel_update_cursor_plane(struct drm_plane *plane,
14228 			  const struct intel_crtc_state *crtc_state,
14229 			  const struct intel_plane_state *state)
14230 {
14231 	struct drm_crtc *crtc = crtc_state->base.crtc;
14232 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14233 	struct drm_device *dev = plane->dev;
14234 	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14235 	uint32_t addr;
14236 
14237 	if (!obj)
14238 		addr = 0;
14239 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14240 		addr = i915_gem_obj_ggtt_offset(obj);
14241 	else
14242 		addr = obj->phys_handle->busaddr;
14243 
14244 	intel_crtc->cursor_addr = addr;
14245 	intel_crtc_update_cursor(crtc, state);
14246 }
14247 
14248 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14249 						   int pipe)
14250 {
14251 	struct intel_plane *cursor = NULL;
14252 	struct intel_plane_state *state = NULL;
14253 	int ret;
14254 
14255 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14256 	if (!cursor)
14257 		goto fail;
14258 
14259 	state = intel_create_plane_state(&cursor->base);
14260 	if (!state)
14261 		goto fail;
14262 	cursor->base.state = &state->base;
14263 
14264 	cursor->can_scale = false;
14265 	cursor->max_downscale = 1;
14266 	cursor->pipe = pipe;
14267 	cursor->plane = pipe;
14268 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14269 	cursor->check_plane = intel_check_cursor_plane;
14270 	cursor->update_plane = intel_update_cursor_plane;
14271 	cursor->disable_plane = intel_disable_cursor_plane;
14272 
14273 	ret = drm_universal_plane_init(dev, &cursor->base, 0,
14274 				       &intel_plane_funcs,
14275 				       intel_cursor_formats,
14276 				       ARRAY_SIZE(intel_cursor_formats),
14277 				       DRM_PLANE_TYPE_CURSOR, NULL);
14278 	if (ret)
14279 		goto fail;
14280 
14281 	if (INTEL_INFO(dev)->gen >= 4) {
14282 		if (!dev->mode_config.rotation_property)
14283 			dev->mode_config.rotation_property =
14284 				drm_mode_create_rotation_property(dev,
14285 							BIT(DRM_ROTATE_0) |
14286 							BIT(DRM_ROTATE_180));
14287 		if (dev->mode_config.rotation_property)
14288 			drm_object_attach_property(&cursor->base.base,
14289 				dev->mode_config.rotation_property,
14290 				state->base.rotation);
14291 	}
14292 
14293 	if (INTEL_INFO(dev)->gen >=9)
14294 		state->scaler_id = -1;
14295 
14296 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14297 
14298 	return &cursor->base;
14299 
14300 fail:
14301 	kfree(state);
14302 	kfree(cursor);
14303 
14304 	return NULL;
14305 }
14306 
14307 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14308 	struct intel_crtc_state *crtc_state)
14309 {
14310 	int i;
14311 	struct intel_scaler *intel_scaler;
14312 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14313 
14314 	for (i = 0; i < intel_crtc->num_scalers; i++) {
14315 		intel_scaler = &scaler_state->scalers[i];
14316 		intel_scaler->in_use = 0;
14317 		intel_scaler->mode = PS_SCALER_MODE_DYN;
14318 	}
14319 
14320 	scaler_state->scaler_id = -1;
14321 }
14322 
14323 static void intel_crtc_init(struct drm_device *dev, int pipe)
14324 {
14325 	struct drm_i915_private *dev_priv = dev->dev_private;
14326 	struct intel_crtc *intel_crtc;
14327 	struct intel_crtc_state *crtc_state = NULL;
14328 	struct drm_plane *primary = NULL;
14329 	struct drm_plane *cursor = NULL;
14330 	int ret;
14331 
14332 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14333 	if (intel_crtc == NULL)
14334 		return;
14335 
14336 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14337 	if (!crtc_state)
14338 		goto fail;
14339 	intel_crtc->config = crtc_state;
14340 	intel_crtc->base.state = &crtc_state->base;
14341 	crtc_state->base.crtc = &intel_crtc->base;
14342 
14343 	/* initialize shared scalers */
14344 	if (INTEL_INFO(dev)->gen >= 9) {
14345 		if (pipe == PIPE_C)
14346 			intel_crtc->num_scalers = 1;
14347 		else
14348 			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14349 
14350 		skl_init_scalers(dev, intel_crtc, crtc_state);
14351 	}
14352 
14353 	primary = intel_primary_plane_create(dev, pipe);
14354 	if (!primary)
14355 		goto fail;
14356 
14357 	cursor = intel_cursor_plane_create(dev, pipe);
14358 	if (!cursor)
14359 		goto fail;
14360 
14361 	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14362 					cursor, &intel_crtc_funcs, NULL);
14363 	if (ret)
14364 		goto fail;
14365 
14366 	/*
14367 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14368 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14369 	 */
14370 	intel_crtc->pipe = pipe;
14371 	intel_crtc->plane = pipe;
14372 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14373 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14374 		intel_crtc->plane = !pipe;
14375 	}
14376 
14377 	intel_crtc->cursor_base = ~0;
14378 	intel_crtc->cursor_cntl = ~0;
14379 	intel_crtc->cursor_size = ~0;
14380 
14381 	intel_crtc->wm.cxsr_allowed = true;
14382 
14383 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14384 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14385 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14386 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14387 
14388 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14389 
14390 	intel_color_init(&intel_crtc->base);
14391 
14392 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14393 	return;
14394 
14395 fail:
14396 	if (primary)
14397 		drm_plane_cleanup(primary);
14398 	if (cursor)
14399 		drm_plane_cleanup(cursor);
14400 	kfree(crtc_state);
14401 	kfree(intel_crtc);
14402 }
14403 
14404 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14405 {
14406 	struct drm_encoder *encoder = connector->base.encoder;
14407 	struct drm_device *dev = connector->base.dev;
14408 
14409 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14410 
14411 	if (!encoder || WARN_ON(!encoder->crtc))
14412 		return INVALID_PIPE;
14413 
14414 	return to_intel_crtc(encoder->crtc)->pipe;
14415 }
14416 
14417 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14418 				struct drm_file *file)
14419 {
14420 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14421 	struct drm_crtc *drmmode_crtc;
14422 	struct intel_crtc *crtc;
14423 
14424 	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14425 
14426 	if (!drmmode_crtc) {
14427 		DRM_ERROR("no such CRTC id\n");
14428 		return -ENOENT;
14429 	}
14430 
14431 	crtc = to_intel_crtc(drmmode_crtc);
14432 	pipe_from_crtc_id->pipe = crtc->pipe;
14433 
14434 	return 0;
14435 }
14436 
14437 static int intel_encoder_clones(struct intel_encoder *encoder)
14438 {
14439 	struct drm_device *dev = encoder->base.dev;
14440 	struct intel_encoder *source_encoder;
14441 	int index_mask = 0;
14442 	int entry = 0;
14443 
14444 	for_each_intel_encoder(dev, source_encoder) {
14445 		if (encoders_cloneable(encoder, source_encoder))
14446 			index_mask |= (1 << entry);
14447 
14448 		entry++;
14449 	}
14450 
14451 	return index_mask;
14452 }
14453 
14454 static bool has_edp_a(struct drm_device *dev)
14455 {
14456 	struct drm_i915_private *dev_priv = dev->dev_private;
14457 
14458 	if (!IS_MOBILE(dev))
14459 		return false;
14460 
14461 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14462 		return false;
14463 
14464 	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14465 		return false;
14466 
14467 	return true;
14468 }
14469 
14470 static bool intel_crt_present(struct drm_device *dev)
14471 {
14472 	struct drm_i915_private *dev_priv = dev->dev_private;
14473 
14474 	if (INTEL_INFO(dev)->gen >= 9)
14475 		return false;
14476 
14477 	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14478 		return false;
14479 
14480 	if (IS_CHERRYVIEW(dev))
14481 		return false;
14482 
14483 	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14484 		return false;
14485 
14486 	/* DDI E can't be used if DDI A requires 4 lanes */
14487 	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14488 		return false;
14489 
14490 	if (!dev_priv->vbt.int_crt_support)
14491 		return false;
14492 
14493 	return true;
14494 }
14495 
14496 static void intel_setup_outputs(struct drm_device *dev)
14497 {
14498 	struct drm_i915_private *dev_priv = dev->dev_private;
14499 	struct intel_encoder *encoder;
14500 	bool dpd_is_edp = false;
14501 
14502 	intel_lvds_init(dev);
14503 
14504 	if (intel_crt_present(dev))
14505 		intel_crt_init(dev);
14506 
14507 	if (IS_BROXTON(dev)) {
14508 		/*
14509 		 * FIXME: Broxton doesn't support port detection via the
14510 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14511 		 * detect the ports.
14512 		 */
14513 		intel_ddi_init(dev, PORT_A);
14514 		intel_ddi_init(dev, PORT_B);
14515 		intel_ddi_init(dev, PORT_C);
14516 
14517 		intel_dsi_init(dev);
14518 	} else if (HAS_DDI(dev)) {
14519 		int found;
14520 
14521 		/*
14522 		 * Haswell uses DDI functions to detect digital outputs.
14523 		 * On SKL pre-D0 the strap isn't connected, so we assume
14524 		 * it's there.
14525 		 */
14526 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14527 		/* WaIgnoreDDIAStrap: skl */
14528 		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14529 			intel_ddi_init(dev, PORT_A);
14530 
14531 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14532 		 * register */
14533 		found = I915_READ(SFUSE_STRAP);
14534 
14535 		if (found & SFUSE_STRAP_DDIB_DETECTED)
14536 			intel_ddi_init(dev, PORT_B);
14537 		if (found & SFUSE_STRAP_DDIC_DETECTED)
14538 			intel_ddi_init(dev, PORT_C);
14539 		if (found & SFUSE_STRAP_DDID_DETECTED)
14540 			intel_ddi_init(dev, PORT_D);
14541 		/*
14542 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14543 		 */
14544 		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14545 		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14546 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14547 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14548 			intel_ddi_init(dev, PORT_E);
14549 
14550 	} else if (HAS_PCH_SPLIT(dev)) {
14551 		int found;
14552 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14553 
14554 		if (has_edp_a(dev))
14555 			intel_dp_init(dev, DP_A, PORT_A);
14556 
14557 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14558 			/* PCH SDVOB multiplex with HDMIB */
14559 			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14560 			if (!found)
14561 				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14562 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14563 				intel_dp_init(dev, PCH_DP_B, PORT_B);
14564 		}
14565 
14566 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14567 			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14568 
14569 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14570 			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14571 
14572 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14573 			intel_dp_init(dev, PCH_DP_C, PORT_C);
14574 
14575 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14576 			intel_dp_init(dev, PCH_DP_D, PORT_D);
14577 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14578 		bool has_edp, has_port;
14579 
14580 		/*
14581 		 * The DP_DETECTED bit is the latched state of the DDC
14582 		 * SDA pin at boot. However since eDP doesn't require DDC
14583 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14584 		 * eDP ports may have been muxed to an alternate function.
14585 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14586 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14587 		 * detect eDP ports.
14588 		 *
14589 		 * Sadly the straps seem to be missing sometimes even for HDMI
14590 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14591 		 * and VBT for the presence of the port. Additionally we can't
14592 		 * trust the port type the VBT declares as we've seen at least
14593 		 * HDMI ports that the VBT claim are DP or eDP.
14594 		 */
14595 		has_edp = intel_dp_is_edp(dev, PORT_B);
14596 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14597 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14598 			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14599 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14600 			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14601 
14602 		has_edp = intel_dp_is_edp(dev, PORT_C);
14603 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14604 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14605 			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14606 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14607 			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14608 
14609 		if (IS_CHERRYVIEW(dev)) {
14610 			/*
14611 			 * eDP not supported on port D,
14612 			 * so no need to worry about it
14613 			 */
14614 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14615 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14616 				intel_dp_init(dev, CHV_DP_D, PORT_D);
14617 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14618 				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14619 		}
14620 
14621 		intel_dsi_init(dev);
14622 	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14623 		bool found = false;
14624 
14625 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14626 			DRM_DEBUG_KMS("probing SDVOB\n");
14627 			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14628 			if (!found && IS_G4X(dev)) {
14629 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14630 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14631 			}
14632 
14633 			if (!found && IS_G4X(dev))
14634 				intel_dp_init(dev, DP_B, PORT_B);
14635 		}
14636 
14637 		/* Before G4X SDVOC doesn't have its own detect register */
14638 
14639 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14640 			DRM_DEBUG_KMS("probing SDVOC\n");
14641 			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14642 		}
14643 
14644 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14645 
14646 			if (IS_G4X(dev)) {
14647 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14648 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14649 			}
14650 			if (IS_G4X(dev))
14651 				intel_dp_init(dev, DP_C, PORT_C);
14652 		}
14653 
14654 		if (IS_G4X(dev) &&
14655 		    (I915_READ(DP_D) & DP_DETECTED))
14656 			intel_dp_init(dev, DP_D, PORT_D);
14657 	} else if (IS_GEN2(dev))
14658 		intel_dvo_init(dev);
14659 
14660 	if (SUPPORTS_TV(dev))
14661 		intel_tv_init(dev);
14662 
14663 	intel_psr_init(dev);
14664 
14665 	for_each_intel_encoder(dev, encoder) {
14666 		encoder->base.possible_crtcs = encoder->crtc_mask;
14667 		encoder->base.possible_clones =
14668 			intel_encoder_clones(encoder);
14669 	}
14670 
14671 	intel_init_pch_refclk(dev);
14672 
14673 	drm_helper_move_panel_connectors_to_head(dev);
14674 }
14675 
14676 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14677 {
14678 	struct drm_device *dev = fb->dev;
14679 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14680 
14681 	drm_framebuffer_cleanup(fb);
14682 	mutex_lock(&dev->struct_mutex);
14683 	WARN_ON(!intel_fb->obj->framebuffer_references--);
14684 	drm_gem_object_unreference(&intel_fb->obj->base);
14685 	mutex_unlock(&dev->struct_mutex);
14686 	kfree(intel_fb);
14687 }
14688 
14689 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14690 						struct drm_file *file,
14691 						unsigned int *handle)
14692 {
14693 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14694 	struct drm_i915_gem_object *obj = intel_fb->obj;
14695 
14696 	if (obj->userptr.mm) {
14697 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14698 		return -EINVAL;
14699 	}
14700 
14701 	return drm_gem_handle_create(file, &obj->base, handle);
14702 }
14703 
14704 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14705 					struct drm_file *file,
14706 					unsigned flags, unsigned color,
14707 					struct drm_clip_rect *clips,
14708 					unsigned num_clips)
14709 {
14710 	struct drm_device *dev = fb->dev;
14711 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14712 	struct drm_i915_gem_object *obj = intel_fb->obj;
14713 
14714 	mutex_lock(&dev->struct_mutex);
14715 	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14716 	mutex_unlock(&dev->struct_mutex);
14717 
14718 	return 0;
14719 }
14720 
14721 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14722 	.destroy = intel_user_framebuffer_destroy,
14723 	.create_handle = intel_user_framebuffer_create_handle,
14724 	.dirty = intel_user_framebuffer_dirty,
14725 };
14726 
14727 static
14728 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14729 			 uint32_t pixel_format)
14730 {
14731 	u32 gen = INTEL_INFO(dev)->gen;
14732 
14733 	if (gen >= 9) {
14734 		int cpp = drm_format_plane_cpp(pixel_format, 0);
14735 
14736 		/* "The stride in bytes must not exceed the of the size of 8K
14737 		 *  pixels and 32K bytes."
14738 		 */
14739 		return min(8192 * cpp, 32768);
14740 	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14741 		return 32*1024;
14742 	} else if (gen >= 4) {
14743 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14744 			return 16*1024;
14745 		else
14746 			return 32*1024;
14747 	} else if (gen >= 3) {
14748 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14749 			return 8*1024;
14750 		else
14751 			return 16*1024;
14752 	} else {
14753 		/* XXX DSPC is limited to 4k tiled */
14754 		return 8*1024;
14755 	}
14756 }
14757 
14758 static int intel_framebuffer_init(struct drm_device *dev,
14759 				  struct intel_framebuffer *intel_fb,
14760 				  struct drm_mode_fb_cmd2 *mode_cmd,
14761 				  struct drm_i915_gem_object *obj)
14762 {
14763 	struct drm_i915_private *dev_priv = to_i915(dev);
14764 	unsigned int aligned_height;
14765 	int ret;
14766 	u32 pitch_limit, stride_alignment;
14767 
14768 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14769 
14770 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14771 		/* Enforce that fb modifier and tiling mode match, but only for
14772 		 * X-tiled. This is needed for FBC. */
14773 		if (!!(obj->tiling_mode == I915_TILING_X) !=
14774 		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14775 			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14776 			return -EINVAL;
14777 		}
14778 	} else {
14779 		if (obj->tiling_mode == I915_TILING_X)
14780 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14781 		else if (obj->tiling_mode == I915_TILING_Y) {
14782 			DRM_DEBUG("No Y tiling for legacy addfb\n");
14783 			return -EINVAL;
14784 		}
14785 	}
14786 
14787 	/* Passed in modifier sanity checking. */
14788 	switch (mode_cmd->modifier[0]) {
14789 	case I915_FORMAT_MOD_Y_TILED:
14790 	case I915_FORMAT_MOD_Yf_TILED:
14791 		if (INTEL_INFO(dev)->gen < 9) {
14792 			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14793 				  mode_cmd->modifier[0]);
14794 			return -EINVAL;
14795 		}
14796 	case DRM_FORMAT_MOD_NONE:
14797 	case I915_FORMAT_MOD_X_TILED:
14798 		break;
14799 	default:
14800 		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14801 			  mode_cmd->modifier[0]);
14802 		return -EINVAL;
14803 	}
14804 
14805 	stride_alignment = intel_fb_stride_alignment(dev_priv,
14806 						     mode_cmd->modifier[0],
14807 						     mode_cmd->pixel_format);
14808 	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14809 		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14810 			  mode_cmd->pitches[0], stride_alignment);
14811 		return -EINVAL;
14812 	}
14813 
14814 	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14815 					   mode_cmd->pixel_format);
14816 	if (mode_cmd->pitches[0] > pitch_limit) {
14817 		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14818 			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14819 			  "tiled" : "linear",
14820 			  mode_cmd->pitches[0], pitch_limit);
14821 		return -EINVAL;
14822 	}
14823 
14824 	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14825 	    mode_cmd->pitches[0] != obj->stride) {
14826 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14827 			  mode_cmd->pitches[0], obj->stride);
14828 		return -EINVAL;
14829 	}
14830 
14831 	/* Reject formats not supported by any plane early. */
14832 	switch (mode_cmd->pixel_format) {
14833 	case DRM_FORMAT_C8:
14834 	case DRM_FORMAT_RGB565:
14835 	case DRM_FORMAT_XRGB8888:
14836 	case DRM_FORMAT_ARGB8888:
14837 		break;
14838 	case DRM_FORMAT_XRGB1555:
14839 		if (INTEL_INFO(dev)->gen > 3) {
14840 			DRM_DEBUG("unsupported pixel format: %s\n",
14841 				  drm_get_format_name(mode_cmd->pixel_format));
14842 			return -EINVAL;
14843 		}
14844 		break;
14845 	case DRM_FORMAT_ABGR8888:
14846 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14847 		    INTEL_INFO(dev)->gen < 9) {
14848 			DRM_DEBUG("unsupported pixel format: %s\n",
14849 				  drm_get_format_name(mode_cmd->pixel_format));
14850 			return -EINVAL;
14851 		}
14852 		break;
14853 	case DRM_FORMAT_XBGR8888:
14854 	case DRM_FORMAT_XRGB2101010:
14855 	case DRM_FORMAT_XBGR2101010:
14856 		if (INTEL_INFO(dev)->gen < 4) {
14857 			DRM_DEBUG("unsupported pixel format: %s\n",
14858 				  drm_get_format_name(mode_cmd->pixel_format));
14859 			return -EINVAL;
14860 		}
14861 		break;
14862 	case DRM_FORMAT_ABGR2101010:
14863 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14864 			DRM_DEBUG("unsupported pixel format: %s\n",
14865 				  drm_get_format_name(mode_cmd->pixel_format));
14866 			return -EINVAL;
14867 		}
14868 		break;
14869 	case DRM_FORMAT_YUYV:
14870 	case DRM_FORMAT_UYVY:
14871 	case DRM_FORMAT_YVYU:
14872 	case DRM_FORMAT_VYUY:
14873 		if (INTEL_INFO(dev)->gen < 5) {
14874 			DRM_DEBUG("unsupported pixel format: %s\n",
14875 				  drm_get_format_name(mode_cmd->pixel_format));
14876 			return -EINVAL;
14877 		}
14878 		break;
14879 	default:
14880 		DRM_DEBUG("unsupported pixel format: %s\n",
14881 			  drm_get_format_name(mode_cmd->pixel_format));
14882 		return -EINVAL;
14883 	}
14884 
14885 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14886 	if (mode_cmd->offsets[0] != 0)
14887 		return -EINVAL;
14888 
14889 	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14890 					       mode_cmd->pixel_format,
14891 					       mode_cmd->modifier[0]);
14892 	/* FIXME drm helper for size checks (especially planar formats)? */
14893 	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14894 		return -EINVAL;
14895 
14896 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14897 	intel_fb->obj = obj;
14898 
14899 	intel_fill_fb_info(dev_priv, &intel_fb->base);
14900 
14901 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14902 	if (ret) {
14903 		DRM_ERROR("framebuffer init failed %d\n", ret);
14904 		return ret;
14905 	}
14906 
14907 	intel_fb->obj->framebuffer_references++;
14908 
14909 	return 0;
14910 }
14911 
14912 static struct drm_framebuffer *
14913 intel_user_framebuffer_create(struct drm_device *dev,
14914 			      struct drm_file *filp,
14915 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
14916 {
14917 	struct drm_framebuffer *fb;
14918 	struct drm_i915_gem_object *obj;
14919 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14920 
14921 	obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
14922 	if (&obj->base == NULL)
14923 		return ERR_PTR(-ENOENT);
14924 
14925 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14926 	if (IS_ERR(fb))
14927 		drm_gem_object_unreference_unlocked(&obj->base);
14928 
14929 	return fb;
14930 }
14931 
14932 #ifndef CONFIG_DRM_FBDEV_EMULATION
14933 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14934 {
14935 }
14936 #endif
14937 
14938 static const struct drm_mode_config_funcs intel_mode_funcs = {
14939 	.fb_create = intel_user_framebuffer_create,
14940 	.output_poll_changed = intel_fbdev_output_poll_changed,
14941 	.atomic_check = intel_atomic_check,
14942 	.atomic_commit = intel_atomic_commit,
14943 	.atomic_state_alloc = intel_atomic_state_alloc,
14944 	.atomic_state_clear = intel_atomic_state_clear,
14945 };
14946 
14947 /**
14948  * intel_init_display_hooks - initialize the display modesetting hooks
14949  * @dev_priv: device private
14950  */
14951 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14952 {
14953 	if (INTEL_INFO(dev_priv)->gen >= 9) {
14954 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14955 		dev_priv->display.get_initial_plane_config =
14956 			skylake_get_initial_plane_config;
14957 		dev_priv->display.crtc_compute_clock =
14958 			haswell_crtc_compute_clock;
14959 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14960 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14961 	} else if (HAS_DDI(dev_priv)) {
14962 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14963 		dev_priv->display.get_initial_plane_config =
14964 			ironlake_get_initial_plane_config;
14965 		dev_priv->display.crtc_compute_clock =
14966 			haswell_crtc_compute_clock;
14967 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14968 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14969 	} else if (HAS_PCH_SPLIT(dev_priv)) {
14970 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14971 		dev_priv->display.get_initial_plane_config =
14972 			ironlake_get_initial_plane_config;
14973 		dev_priv->display.crtc_compute_clock =
14974 			ironlake_crtc_compute_clock;
14975 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14976 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14977 	} else if (IS_CHERRYVIEW(dev_priv)) {
14978 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14979 		dev_priv->display.get_initial_plane_config =
14980 			i9xx_get_initial_plane_config;
14981 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14982 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14983 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14984 	} else if (IS_VALLEYVIEW(dev_priv)) {
14985 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14986 		dev_priv->display.get_initial_plane_config =
14987 			i9xx_get_initial_plane_config;
14988 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14989 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14990 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14991 	} else if (IS_G4X(dev_priv)) {
14992 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14993 		dev_priv->display.get_initial_plane_config =
14994 			i9xx_get_initial_plane_config;
14995 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14996 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14997 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14998 	} else if (IS_PINEVIEW(dev_priv)) {
14999 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15000 		dev_priv->display.get_initial_plane_config =
15001 			i9xx_get_initial_plane_config;
15002 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15003 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15004 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15005 	} else if (!IS_GEN2(dev_priv)) {
15006 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15007 		dev_priv->display.get_initial_plane_config =
15008 			i9xx_get_initial_plane_config;
15009 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15010 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15011 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15012 	} else {
15013 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15014 		dev_priv->display.get_initial_plane_config =
15015 			i9xx_get_initial_plane_config;
15016 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15017 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15018 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15019 	}
15020 
15021 	/* Returns the core display clock speed */
15022 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
15023 		dev_priv->display.get_display_clock_speed =
15024 			skylake_get_display_clock_speed;
15025 	else if (IS_BROXTON(dev_priv))
15026 		dev_priv->display.get_display_clock_speed =
15027 			broxton_get_display_clock_speed;
15028 	else if (IS_BROADWELL(dev_priv))
15029 		dev_priv->display.get_display_clock_speed =
15030 			broadwell_get_display_clock_speed;
15031 	else if (IS_HASWELL(dev_priv))
15032 		dev_priv->display.get_display_clock_speed =
15033 			haswell_get_display_clock_speed;
15034 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15035 		dev_priv->display.get_display_clock_speed =
15036 			valleyview_get_display_clock_speed;
15037 	else if (IS_GEN5(dev_priv))
15038 		dev_priv->display.get_display_clock_speed =
15039 			ilk_get_display_clock_speed;
15040 	else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15041 		 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
15042 		dev_priv->display.get_display_clock_speed =
15043 			i945_get_display_clock_speed;
15044 	else if (IS_GM45(dev_priv))
15045 		dev_priv->display.get_display_clock_speed =
15046 			gm45_get_display_clock_speed;
15047 	else if (IS_CRESTLINE(dev_priv))
15048 		dev_priv->display.get_display_clock_speed =
15049 			i965gm_get_display_clock_speed;
15050 	else if (IS_PINEVIEW(dev_priv))
15051 		dev_priv->display.get_display_clock_speed =
15052 			pnv_get_display_clock_speed;
15053 	else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
15054 		dev_priv->display.get_display_clock_speed =
15055 			g33_get_display_clock_speed;
15056 	else if (IS_I915G(dev_priv))
15057 		dev_priv->display.get_display_clock_speed =
15058 			i915_get_display_clock_speed;
15059 	else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
15060 		dev_priv->display.get_display_clock_speed =
15061 			i9xx_misc_get_display_clock_speed;
15062 	else if (IS_I915GM(dev_priv))
15063 		dev_priv->display.get_display_clock_speed =
15064 			i915gm_get_display_clock_speed;
15065 	else if (IS_I865G(dev_priv))
15066 		dev_priv->display.get_display_clock_speed =
15067 			i865_get_display_clock_speed;
15068 	else if (IS_I85X(dev_priv))
15069 		dev_priv->display.get_display_clock_speed =
15070 			i85x_get_display_clock_speed;
15071 	else { /* 830 */
15072 		WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
15073 		dev_priv->display.get_display_clock_speed =
15074 			i830_get_display_clock_speed;
15075 	}
15076 
15077 	if (IS_GEN5(dev_priv)) {
15078 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15079 	} else if (IS_GEN6(dev_priv)) {
15080 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15081 	} else if (IS_IVYBRIDGE(dev_priv)) {
15082 		/* FIXME: detect B0+ stepping and use auto training */
15083 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15084 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15085 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15086 		if (IS_BROADWELL(dev_priv)) {
15087 			dev_priv->display.modeset_commit_cdclk =
15088 				broadwell_modeset_commit_cdclk;
15089 			dev_priv->display.modeset_calc_cdclk =
15090 				broadwell_modeset_calc_cdclk;
15091 		}
15092 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15093 		dev_priv->display.modeset_commit_cdclk =
15094 			valleyview_modeset_commit_cdclk;
15095 		dev_priv->display.modeset_calc_cdclk =
15096 			valleyview_modeset_calc_cdclk;
15097 	} else if (IS_BROXTON(dev_priv)) {
15098 		dev_priv->display.modeset_commit_cdclk =
15099 			broxton_modeset_commit_cdclk;
15100 		dev_priv->display.modeset_calc_cdclk =
15101 			broxton_modeset_calc_cdclk;
15102 	}
15103 
15104 	switch (INTEL_INFO(dev_priv)->gen) {
15105 	case 2:
15106 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15107 		break;
15108 
15109 	case 3:
15110 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15111 		break;
15112 
15113 	case 4:
15114 	case 5:
15115 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15116 		break;
15117 
15118 	case 6:
15119 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15120 		break;
15121 	case 7:
15122 	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15123 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15124 		break;
15125 	case 9:
15126 		/* Drop through - unsupported since execlist only. */
15127 	default:
15128 		/* Default just returns -ENODEV to indicate unsupported */
15129 		dev_priv->display.queue_flip = intel_default_queue_flip;
15130 	}
15131 }
15132 
15133 /*
15134  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15135  * resume, or other times.  This quirk makes sure that's the case for
15136  * affected systems.
15137  */
15138 static void quirk_pipea_force(struct drm_device *dev)
15139 {
15140 	struct drm_i915_private *dev_priv = dev->dev_private;
15141 
15142 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15143 	DRM_INFO("applying pipe a force quirk\n");
15144 }
15145 
15146 static void quirk_pipeb_force(struct drm_device *dev)
15147 {
15148 	struct drm_i915_private *dev_priv = dev->dev_private;
15149 
15150 	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15151 	DRM_INFO("applying pipe b force quirk\n");
15152 }
15153 
15154 /*
15155  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15156  */
15157 static void quirk_ssc_force_disable(struct drm_device *dev)
15158 {
15159 	struct drm_i915_private *dev_priv = dev->dev_private;
15160 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15161 	DRM_INFO("applying lvds SSC disable quirk\n");
15162 }
15163 
15164 /*
15165  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15166  * brightness value
15167  */
15168 static void quirk_invert_brightness(struct drm_device *dev)
15169 {
15170 	struct drm_i915_private *dev_priv = dev->dev_private;
15171 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15172 	DRM_INFO("applying inverted panel brightness quirk\n");
15173 }
15174 
15175 /* Some VBT's incorrectly indicate no backlight is present */
15176 static void quirk_backlight_present(struct drm_device *dev)
15177 {
15178 	struct drm_i915_private *dev_priv = dev->dev_private;
15179 	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15180 	DRM_INFO("applying backlight present quirk\n");
15181 }
15182 
15183 struct intel_quirk {
15184 	int device;
15185 	int subsystem_vendor;
15186 	int subsystem_device;
15187 	void (*hook)(struct drm_device *dev);
15188 };
15189 
15190 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15191 struct intel_dmi_quirk {
15192 	void (*hook)(struct drm_device *dev);
15193 	const struct dmi_system_id (*dmi_id_list)[];
15194 };
15195 
15196 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15197 {
15198 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15199 	return 1;
15200 }
15201 
15202 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15203 	{
15204 		.dmi_id_list = &(const struct dmi_system_id[]) {
15205 			{
15206 				.callback = intel_dmi_reverse_brightness,
15207 				.ident = "NCR Corporation",
15208 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15209 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15210 				},
15211 			},
15212 			{ }  /* terminating entry */
15213 		},
15214 		.hook = quirk_invert_brightness,
15215 	},
15216 };
15217 
15218 static struct intel_quirk intel_quirks[] = {
15219 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15220 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15221 
15222 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15223 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15224 
15225 	/* 830 needs to leave pipe A & dpll A up */
15226 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15227 
15228 	/* 830 needs to leave pipe B & dpll B up */
15229 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15230 
15231 	/* Lenovo U160 cannot use SSC on LVDS */
15232 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15233 
15234 	/* Sony Vaio Y cannot use SSC on LVDS */
15235 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15236 
15237 	/* Acer Aspire 5734Z must invert backlight brightness */
15238 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15239 
15240 	/* Acer/eMachines G725 */
15241 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15242 
15243 	/* Acer/eMachines e725 */
15244 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15245 
15246 	/* Acer/Packard Bell NCL20 */
15247 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15248 
15249 	/* Acer Aspire 4736Z */
15250 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15251 
15252 	/* Acer Aspire 5336 */
15253 	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15254 
15255 	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15256 	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15257 
15258 	/* Acer C720 Chromebook (Core i3 4005U) */
15259 	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15260 
15261 	/* Apple Macbook 2,1 (Core 2 T7400) */
15262 	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15263 
15264 	/* Apple Macbook 4,1 */
15265 	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15266 
15267 	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15268 	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15269 
15270 	/* HP Chromebook 14 (Celeron 2955U) */
15271 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15272 
15273 	/* Dell Chromebook 11 */
15274 	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15275 
15276 	/* Dell Chromebook 11 (2015 version) */
15277 	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15278 };
15279 
15280 static void intel_init_quirks(struct drm_device *dev)
15281 {
15282 	struct pci_dev *d = dev->pdev;
15283 	int i;
15284 
15285 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15286 		struct intel_quirk *q = &intel_quirks[i];
15287 
15288 		if (d->device == q->device &&
15289 		    (d->subsystem_vendor == q->subsystem_vendor ||
15290 		     q->subsystem_vendor == PCI_ANY_ID) &&
15291 		    (d->subsystem_device == q->subsystem_device ||
15292 		     q->subsystem_device == PCI_ANY_ID))
15293 			q->hook(dev);
15294 	}
15295 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15296 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15297 			intel_dmi_quirks[i].hook(dev);
15298 	}
15299 }
15300 
15301 /* Disable the VGA plane that we never use */
15302 static void i915_disable_vga(struct drm_device *dev)
15303 {
15304 	struct drm_i915_private *dev_priv = dev->dev_private;
15305 	u8 sr1;
15306 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15307 
15308 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15309 #if 0
15310 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15311 #endif
15312 	outb(VGA_SR_INDEX, SR01);
15313 	sr1 = inb(VGA_SR_DATA);
15314 	outb(VGA_SR_DATA, sr1 | 1 << 5);
15315 #if 0
15316 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15317 #endif
15318 	udelay(300);
15319 
15320 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15321 	POSTING_READ(vga_reg);
15322 }
15323 
15324 void intel_modeset_init_hw(struct drm_device *dev)
15325 {
15326 	struct drm_i915_private *dev_priv = dev->dev_private;
15327 
15328 	intel_update_cdclk(dev);
15329 
15330 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15331 
15332 	intel_init_clock_gating(dev);
15333 	intel_enable_gt_powersave(dev);
15334 }
15335 
15336 /*
15337  * Calculate what we think the watermarks should be for the state we've read
15338  * out of the hardware and then immediately program those watermarks so that
15339  * we ensure the hardware settings match our internal state.
15340  *
15341  * We can calculate what we think WM's should be by creating a duplicate of the
15342  * current state (which was constructed during hardware readout) and running it
15343  * through the atomic check code to calculate new watermark values in the
15344  * state object.
15345  */
15346 static void sanitize_watermarks(struct drm_device *dev)
15347 {
15348 	struct drm_i915_private *dev_priv = to_i915(dev);
15349 	struct drm_atomic_state *state;
15350 	struct drm_crtc *crtc;
15351 	struct drm_crtc_state *cstate;
15352 	struct drm_modeset_acquire_ctx ctx;
15353 	int ret;
15354 	int i;
15355 
15356 	/* Only supported on platforms that use atomic watermark design */
15357 	if (!dev_priv->display.optimize_watermarks)
15358 		return;
15359 
15360 	/*
15361 	 * We need to hold connection_mutex before calling duplicate_state so
15362 	 * that the connector loop is protected.
15363 	 */
15364 	drm_modeset_acquire_init(&ctx, 0);
15365 retry:
15366 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
15367 	if (ret == -EDEADLK) {
15368 		drm_modeset_backoff(&ctx);
15369 		goto retry;
15370 	} else if (WARN_ON(ret)) {
15371 		goto fail;
15372 	}
15373 
15374 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
15375 	if (WARN_ON(IS_ERR(state)))
15376 		goto fail;
15377 
15378 	/*
15379 	 * Hardware readout is the only time we don't want to calculate
15380 	 * intermediate watermarks (since we don't trust the current
15381 	 * watermarks).
15382 	 */
15383 	to_intel_atomic_state(state)->skip_intermediate_wm = true;
15384 
15385 	ret = intel_atomic_check(dev, state);
15386 	if (ret) {
15387 		/*
15388 		 * If we fail here, it means that the hardware appears to be
15389 		 * programmed in a way that shouldn't be possible, given our
15390 		 * understanding of watermark requirements.  This might mean a
15391 		 * mistake in the hardware readout code or a mistake in the
15392 		 * watermark calculations for a given platform.  Raise a WARN
15393 		 * so that this is noticeable.
15394 		 *
15395 		 * If this actually happens, we'll have to just leave the
15396 		 * BIOS-programmed watermarks untouched and hope for the best.
15397 		 */
15398 		WARN(true, "Could not determine valid watermarks for inherited state\n");
15399 		goto fail;
15400 	}
15401 
15402 	/* Write calculated watermark values back */
15403 	to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15404 	for_each_crtc_in_state(state, crtc, cstate, i) {
15405 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15406 
15407 		cs->wm.need_postvbl_update = true;
15408 		dev_priv->display.optimize_watermarks(cs);
15409 	}
15410 
15411 	drm_atomic_state_free(state);
15412 fail:
15413 	drm_modeset_drop_locks(&ctx);
15414 	drm_modeset_acquire_fini(&ctx);
15415 }
15416 
15417 void intel_modeset_init(struct drm_device *dev)
15418 {
15419 	struct drm_i915_private *dev_priv = to_i915(dev);
15420 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
15421 	int sprite, ret;
15422 	enum i915_pipe pipe;
15423 	struct intel_crtc *crtc;
15424 
15425 	drm_mode_config_init(dev);
15426 
15427 	dev->mode_config.min_width = 0;
15428 	dev->mode_config.min_height = 0;
15429 
15430 	dev->mode_config.preferred_depth = 24;
15431 	dev->mode_config.prefer_shadow = 1;
15432 
15433 	dev->mode_config.allow_fb_modifiers = true;
15434 
15435 	dev->mode_config.funcs = &intel_mode_funcs;
15436 
15437 	intel_init_quirks(dev);
15438 
15439 	intel_init_pm(dev);
15440 
15441 	if (INTEL_INFO(dev)->num_pipes == 0)
15442 		return;
15443 
15444 	/*
15445 	 * There may be no VBT; and if the BIOS enabled SSC we can
15446 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15447 	 * BIOS isn't using it, don't assume it will work even if the VBT
15448 	 * indicates as much.
15449 	 */
15450 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15451 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15452 					    DREF_SSC1_ENABLE);
15453 
15454 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15455 			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15456 				     bios_lvds_use_ssc ? "en" : "dis",
15457 				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15458 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15459 		}
15460 	}
15461 
15462 	if (IS_GEN2(dev)) {
15463 		dev->mode_config.max_width = 2048;
15464 		dev->mode_config.max_height = 2048;
15465 	} else if (IS_GEN3(dev)) {
15466 		dev->mode_config.max_width = 4096;
15467 		dev->mode_config.max_height = 4096;
15468 	} else {
15469 		dev->mode_config.max_width = 8192;
15470 		dev->mode_config.max_height = 8192;
15471 	}
15472 
15473 	if (IS_845G(dev) || IS_I865G(dev)) {
15474 		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15475 		dev->mode_config.cursor_height = 1023;
15476 	} else if (IS_GEN2(dev)) {
15477 		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15478 		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15479 	} else {
15480 		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15481 		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15482 	}
15483 
15484 	dev->mode_config.fb_base = ggtt->mappable_base;
15485 
15486 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
15487 		      INTEL_INFO(dev)->num_pipes,
15488 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15489 
15490 	for_each_pipe(dev_priv, pipe) {
15491 		intel_crtc_init(dev, pipe);
15492 		for_each_sprite(dev_priv, pipe, sprite) {
15493 			ret = intel_plane_init(dev, pipe, sprite);
15494 			if (ret)
15495 				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15496 					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
15497 		}
15498 	}
15499 
15500 	intel_update_czclk(dev_priv);
15501 	intel_update_rawclk(dev_priv);
15502 	intel_update_cdclk(dev);
15503 
15504 	intel_shared_dpll_init(dev);
15505 
15506 	/* Just disable it once at startup */
15507 	i915_disable_vga(dev);
15508 	intel_setup_outputs(dev);
15509 
15510 	drm_modeset_lock_all(dev);
15511 	intel_modeset_setup_hw_state(dev);
15512 	drm_modeset_unlock_all(dev);
15513 
15514 	for_each_intel_crtc(dev, crtc) {
15515 		struct intel_initial_plane_config plane_config = {};
15516 
15517 		if (!crtc->active)
15518 			continue;
15519 
15520 		/*
15521 		 * Note that reserving the BIOS fb up front prevents us
15522 		 * from stuffing other stolen allocations like the ring
15523 		 * on top.  This prevents some ugliness at boot time, and
15524 		 * can even allow for smooth boot transitions if the BIOS
15525 		 * fb is large enough for the active pipe configuration.
15526 		 */
15527 		dev_priv->display.get_initial_plane_config(crtc,
15528 							   &plane_config);
15529 
15530 		/*
15531 		 * If the fb is shared between multiple heads, we'll
15532 		 * just get the first one.
15533 		 */
15534 		intel_find_initial_plane_obj(crtc, &plane_config);
15535 	}
15536 
15537 	/*
15538 	 * Make sure hardware watermarks really match the state we read out.
15539 	 * Note that we need to do this after reconstructing the BIOS fb's
15540 	 * since the watermark calculation done here will use pstate->fb.
15541 	 */
15542 	sanitize_watermarks(dev);
15543 }
15544 
15545 static void intel_enable_pipe_a(struct drm_device *dev)
15546 {
15547 	struct intel_connector *connector;
15548 	struct drm_connector *crt = NULL;
15549 	struct intel_load_detect_pipe load_detect_temp;
15550 	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15551 
15552 	/* We can't just switch on the pipe A, we need to set things up with a
15553 	 * proper mode and output configuration. As a gross hack, enable pipe A
15554 	 * by enabling the load detect pipe once. */
15555 	for_each_intel_connector(dev, connector) {
15556 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15557 			crt = &connector->base;
15558 			break;
15559 		}
15560 	}
15561 
15562 	if (!crt)
15563 		return;
15564 
15565 	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15566 		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15567 }
15568 
15569 static bool
15570 intel_check_plane_mapping(struct intel_crtc *crtc)
15571 {
15572 	struct drm_device *dev = crtc->base.dev;
15573 	struct drm_i915_private *dev_priv = dev->dev_private;
15574 	u32 val;
15575 
15576 	if (INTEL_INFO(dev)->num_pipes == 1)
15577 		return true;
15578 
15579 	val = I915_READ(DSPCNTR(!crtc->plane));
15580 
15581 	if ((val & DISPLAY_PLANE_ENABLE) &&
15582 	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15583 		return false;
15584 
15585 	return true;
15586 }
15587 
15588 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15589 {
15590 	struct drm_device *dev = crtc->base.dev;
15591 	struct intel_encoder *encoder;
15592 
15593 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15594 		return true;
15595 
15596 	return false;
15597 }
15598 
15599 static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15600 {
15601 	struct drm_device *dev = encoder->base.dev;
15602 	struct intel_connector *connector;
15603 
15604 	for_each_connector_on_encoder(dev, &encoder->base, connector)
15605 		return true;
15606 
15607 	return false;
15608 }
15609 
15610 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15611 {
15612 	struct drm_device *dev = crtc->base.dev;
15613 	struct drm_i915_private *dev_priv = dev->dev_private;
15614 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15615 
15616 	/* Clear any frame start delays used for debugging left by the BIOS */
15617 	if (!transcoder_is_dsi(cpu_transcoder)) {
15618 		i915_reg_t reg = PIPECONF(cpu_transcoder);
15619 
15620 		I915_WRITE(reg,
15621 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15622 	}
15623 
15624 	/* restore vblank interrupts to correct state */
15625 	drm_crtc_vblank_reset(&crtc->base);
15626 	if (crtc->active) {
15627 		struct intel_plane *plane;
15628 
15629 		drm_crtc_vblank_on(&crtc->base);
15630 
15631 		/* Disable everything but the primary plane */
15632 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15633 			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15634 				continue;
15635 
15636 			plane->disable_plane(&plane->base, &crtc->base);
15637 		}
15638 	}
15639 
15640 	/* We need to sanitize the plane -> pipe mapping first because this will
15641 	 * disable the crtc (and hence change the state) if it is wrong. Note
15642 	 * that gen4+ has a fixed plane -> pipe mapping.  */
15643 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15644 		bool plane;
15645 
15646 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15647 			      crtc->base.base.id);
15648 
15649 		/* Pipe has the wrong plane attached and the plane is active.
15650 		 * Temporarily change the plane mapping and disable everything
15651 		 * ...  */
15652 		plane = crtc->plane;
15653 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15654 		crtc->plane = !plane;
15655 		intel_crtc_disable_noatomic(&crtc->base);
15656 		crtc->plane = plane;
15657 	}
15658 
15659 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15660 	    crtc->pipe == PIPE_A && !crtc->active) {
15661 		/* BIOS forgot to enable pipe A, this mostly happens after
15662 		 * resume. Force-enable the pipe to fix this, the update_dpms
15663 		 * call below we restore the pipe to the right state, but leave
15664 		 * the required bits on. */
15665 		intel_enable_pipe_a(dev);
15666 	}
15667 
15668 	/* Adjust the state of the output pipe according to whether we
15669 	 * have active connectors/encoders. */
15670 	if (crtc->active && !intel_crtc_has_encoders(crtc))
15671 		intel_crtc_disable_noatomic(&crtc->base);
15672 
15673 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15674 		/*
15675 		 * We start out with underrun reporting disabled to avoid races.
15676 		 * For correct bookkeeping mark this on active crtcs.
15677 		 *
15678 		 * Also on gmch platforms we dont have any hardware bits to
15679 		 * disable the underrun reporting. Which means we need to start
15680 		 * out with underrun reporting disabled also on inactive pipes,
15681 		 * since otherwise we'll complain about the garbage we read when
15682 		 * e.g. coming up after runtime pm.
15683 		 *
15684 		 * No protection against concurrent access is required - at
15685 		 * worst a fifo underrun happens which also sets this to false.
15686 		 */
15687 		crtc->cpu_fifo_underrun_disabled = true;
15688 		crtc->pch_fifo_underrun_disabled = true;
15689 	}
15690 }
15691 
15692 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15693 {
15694 	struct intel_connector *connector;
15695 	struct drm_device *dev = encoder->base.dev;
15696 
15697 	/* We need to check both for a crtc link (meaning that the
15698 	 * encoder is active and trying to read from a pipe) and the
15699 	 * pipe itself being active. */
15700 	bool has_active_crtc = encoder->base.crtc &&
15701 		to_intel_crtc(encoder->base.crtc)->active;
15702 
15703 	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15704 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15705 			      encoder->base.base.id,
15706 			      encoder->base.name);
15707 
15708 		/* Connector is active, but has no active pipe. This is
15709 		 * fallout from our resume register restoring. Disable
15710 		 * the encoder manually again. */
15711 		if (encoder->base.crtc) {
15712 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15713 				      encoder->base.base.id,
15714 				      encoder->base.name);
15715 			encoder->disable(encoder);
15716 			if (encoder->post_disable)
15717 				encoder->post_disable(encoder);
15718 		}
15719 		encoder->base.crtc = NULL;
15720 
15721 		/* Inconsistent output/port/pipe state happens presumably due to
15722 		 * a bug in one of the get_hw_state functions. Or someplace else
15723 		 * in our code, like the register restore mess on resume. Clamp
15724 		 * things to off as a safer default. */
15725 		for_each_intel_connector(dev, connector) {
15726 			if (connector->encoder != encoder)
15727 				continue;
15728 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15729 			connector->base.encoder = NULL;
15730 		}
15731 	}
15732 	/* Enabled encoders without active connectors will be fixed in
15733 	 * the crtc fixup. */
15734 }
15735 
15736 void i915_redisable_vga_power_on(struct drm_device *dev)
15737 {
15738 	struct drm_i915_private *dev_priv = dev->dev_private;
15739 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15740 
15741 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15742 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15743 		i915_disable_vga(dev);
15744 	}
15745 }
15746 
15747 void i915_redisable_vga(struct drm_device *dev)
15748 {
15749 	struct drm_i915_private *dev_priv = dev->dev_private;
15750 
15751 	/* This function can be called both from intel_modeset_setup_hw_state or
15752 	 * at a very early point in our resume sequence, where the power well
15753 	 * structures are not yet restored. Since this function is at a very
15754 	 * paranoid "someone might have enabled VGA while we were not looking"
15755 	 * level, just check if the power well is enabled instead of trying to
15756 	 * follow the "don't touch the power well if we don't need it" policy
15757 	 * the rest of the driver uses. */
15758 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15759 		return;
15760 
15761 	i915_redisable_vga_power_on(dev);
15762 
15763 	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15764 }
15765 
15766 static bool primary_get_hw_state(struct intel_plane *plane)
15767 {
15768 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15769 
15770 	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15771 }
15772 
15773 /* FIXME read out full plane state for all planes */
15774 static void readout_plane_state(struct intel_crtc *crtc)
15775 {
15776 	struct drm_plane *primary = crtc->base.primary;
15777 	struct intel_plane_state *plane_state =
15778 		to_intel_plane_state(primary->state);
15779 
15780 	plane_state->visible = crtc->active &&
15781 		primary_get_hw_state(to_intel_plane(primary));
15782 
15783 	if (plane_state->visible)
15784 		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15785 }
15786 
15787 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15788 {
15789 	struct drm_i915_private *dev_priv = dev->dev_private;
15790 	enum i915_pipe pipe;
15791 	struct intel_crtc *crtc;
15792 	struct intel_encoder *encoder;
15793 	struct intel_connector *connector;
15794 	int i;
15795 
15796 	dev_priv->active_crtcs = 0;
15797 
15798 	for_each_intel_crtc(dev, crtc) {
15799 		struct intel_crtc_state *crtc_state = crtc->config;
15800 		int pixclk = 0;
15801 
15802 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15803 		memset(crtc_state, 0, sizeof(*crtc_state));
15804 		crtc_state->base.crtc = &crtc->base;
15805 
15806 		crtc_state->base.active = crtc_state->base.enable =
15807 			dev_priv->display.get_pipe_config(crtc, crtc_state);
15808 
15809 		crtc->base.enabled = crtc_state->base.enable;
15810 		crtc->active = crtc_state->base.active;
15811 
15812 		if (crtc_state->base.active) {
15813 			dev_priv->active_crtcs |= 1 << crtc->pipe;
15814 
15815 			if (IS_BROADWELL(dev_priv)) {
15816 				pixclk = ilk_pipe_pixel_rate(crtc_state);
15817 
15818 				/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15819 				if (crtc_state->ips_enabled)
15820 					pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15821 			} else if (IS_VALLEYVIEW(dev_priv) ||
15822 				   IS_CHERRYVIEW(dev_priv) ||
15823 				   IS_BROXTON(dev_priv))
15824 				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15825 			else
15826 				WARN_ON(dev_priv->display.modeset_calc_cdclk);
15827 		}
15828 
15829 		dev_priv->min_pixclk[crtc->pipe] = pixclk;
15830 
15831 		readout_plane_state(crtc);
15832 
15833 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15834 			      crtc->base.base.id,
15835 			      crtc->active ? "enabled" : "disabled");
15836 	}
15837 
15838 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15839 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15840 
15841 		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15842 						  &pll->config.hw_state);
15843 		pll->config.crtc_mask = 0;
15844 		for_each_intel_crtc(dev, crtc) {
15845 			if (crtc->active && crtc->config->shared_dpll == pll)
15846 				pll->config.crtc_mask |= 1 << crtc->pipe;
15847 		}
15848 		pll->active_mask = pll->config.crtc_mask;
15849 
15850 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15851 			      pll->name, pll->config.crtc_mask, pll->on);
15852 	}
15853 
15854 	for_each_intel_encoder(dev, encoder) {
15855 		pipe = 0;
15856 
15857 		if (encoder->get_hw_state(encoder, &pipe)) {
15858 			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15859 			encoder->base.crtc = &crtc->base;
15860 			encoder->get_config(encoder, crtc->config);
15861 		} else {
15862 			encoder->base.crtc = NULL;
15863 		}
15864 
15865 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15866 			      encoder->base.base.id,
15867 			      encoder->base.name,
15868 			      encoder->base.crtc ? "enabled" : "disabled",
15869 			      pipe_name(pipe));
15870 	}
15871 
15872 	for_each_intel_connector(dev, connector) {
15873 		if (connector->get_hw_state(connector)) {
15874 			connector->base.dpms = DRM_MODE_DPMS_ON;
15875 
15876 			encoder = connector->encoder;
15877 			connector->base.encoder = &encoder->base;
15878 
15879 			if (encoder->base.crtc &&
15880 			    encoder->base.crtc->state->active) {
15881 				/*
15882 				 * This has to be done during hardware readout
15883 				 * because anything calling .crtc_disable may
15884 				 * rely on the connector_mask being accurate.
15885 				 */
15886 				encoder->base.crtc->state->connector_mask |=
15887 					1 << drm_connector_index(&connector->base);
15888 				encoder->base.crtc->state->encoder_mask |=
15889 					1 << drm_encoder_index(&encoder->base);
15890 			}
15891 
15892 		} else {
15893 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15894 			connector->base.encoder = NULL;
15895 		}
15896 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15897 			      connector->base.base.id,
15898 			      connector->base.name,
15899 			      connector->base.encoder ? "enabled" : "disabled");
15900 	}
15901 
15902 	for_each_intel_crtc(dev, crtc) {
15903 		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15904 
15905 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15906 		if (crtc->base.state->active) {
15907 			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15908 			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15909 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15910 
15911 			/*
15912 			 * The initial mode needs to be set in order to keep
15913 			 * the atomic core happy. It wants a valid mode if the
15914 			 * crtc's enabled, so we do the above call.
15915 			 *
15916 			 * At this point some state updated by the connectors
15917 			 * in their ->detect() callback has not run yet, so
15918 			 * no recalculation can be done yet.
15919 			 *
15920 			 * Even if we could do a recalculation and modeset
15921 			 * right now it would cause a double modeset if
15922 			 * fbdev or userspace chooses a different initial mode.
15923 			 *
15924 			 * If that happens, someone indicated they wanted a
15925 			 * mode change, which means it's safe to do a full
15926 			 * recalculation.
15927 			 */
15928 			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15929 
15930 			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15931 			update_scanline_offset(crtc);
15932 		}
15933 
15934 		intel_pipe_config_sanity_check(dev_priv, crtc->config);
15935 	}
15936 }
15937 
15938 /* Scan out the current hw modeset state,
15939  * and sanitizes it to the current state
15940  */
15941 static void
15942 intel_modeset_setup_hw_state(struct drm_device *dev)
15943 {
15944 	struct drm_i915_private *dev_priv = dev->dev_private;
15945 	enum i915_pipe pipe;
15946 	struct intel_crtc *crtc;
15947 	struct intel_encoder *encoder;
15948 	int i;
15949 
15950 	intel_modeset_readout_hw_state(dev);
15951 
15952 	/* HW state is read out, now we need to sanitize this mess. */
15953 	for_each_intel_encoder(dev, encoder) {
15954 		intel_sanitize_encoder(encoder);
15955 	}
15956 
15957 	for_each_pipe(dev_priv, pipe) {
15958 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15959 		intel_sanitize_crtc(crtc);
15960 		intel_dump_pipe_config(crtc, crtc->config,
15961 				       "[setup_hw_state]");
15962 	}
15963 
15964 	intel_modeset_update_connector_atomic_state(dev);
15965 
15966 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15967 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15968 
15969 		if (!pll->on || pll->active_mask)
15970 			continue;
15971 
15972 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15973 
15974 		pll->funcs.disable(dev_priv, pll);
15975 		pll->on = false;
15976 	}
15977 
15978 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15979 		vlv_wm_get_hw_state(dev);
15980 	else if (IS_GEN9(dev))
15981 		skl_wm_get_hw_state(dev);
15982 	else if (HAS_PCH_SPLIT(dev))
15983 		ilk_wm_get_hw_state(dev);
15984 
15985 	for_each_intel_crtc(dev, crtc) {
15986 		unsigned long put_domains;
15987 
15988 		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15989 		if (WARN_ON(put_domains))
15990 			modeset_put_power_domains(dev_priv, put_domains);
15991 	}
15992 	intel_display_set_init_power(dev_priv, false);
15993 
15994 	intel_fbc_init_pipe_state(dev_priv);
15995 }
15996 
15997 void intel_display_resume(struct drm_device *dev)
15998 {
15999 	struct drm_i915_private *dev_priv = to_i915(dev);
16000 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16001 	struct drm_modeset_acquire_ctx ctx;
16002 	int ret;
16003 	bool setup = false;
16004 
16005 	dev_priv->modeset_restore_state = NULL;
16006 
16007 	/*
16008 	 * This is a cludge because with real atomic modeset mode_config.mutex
16009 	 * won't be taken. Unfortunately some probed state like
16010 	 * audio_codec_enable is still protected by mode_config.mutex, so lock
16011 	 * it here for now.
16012 	 */
16013 	mutex_lock(&dev->mode_config.mutex);
16014 	drm_modeset_acquire_init(&ctx, 0);
16015 
16016 retry:
16017 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
16018 
16019 	if (ret == 0 && !setup) {
16020 		setup = true;
16021 
16022 		intel_modeset_setup_hw_state(dev);
16023 		i915_redisable_vga(dev);
16024 	}
16025 
16026 	if (ret == 0 && state) {
16027 		struct drm_crtc_state *crtc_state;
16028 		struct drm_crtc *crtc;
16029 		int i;
16030 
16031 		state->acquire_ctx = &ctx;
16032 
16033 		/* ignore any reset values/BIOS leftovers in the WM registers */
16034 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
16035 
16036 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
16037 			/*
16038 			 * Force recalculation even if we restore
16039 			 * current state. With fast modeset this may not result
16040 			 * in a modeset when the state is compatible.
16041 			 */
16042 			crtc_state->mode_changed = true;
16043 		}
16044 
16045 		ret = drm_atomic_commit(state);
16046 	}
16047 
16048 	if (ret == -EDEADLK) {
16049 		drm_modeset_backoff(&ctx);
16050 		goto retry;
16051 	}
16052 
16053 	drm_modeset_drop_locks(&ctx);
16054 	drm_modeset_acquire_fini(&ctx);
16055 	mutex_unlock(&dev->mode_config.mutex);
16056 
16057 	if (ret) {
16058 		DRM_ERROR("Restoring old state failed with %i\n", ret);
16059 		drm_atomic_state_free(state);
16060 	}
16061 }
16062 
16063 void intel_modeset_gem_init(struct drm_device *dev)
16064 {
16065 	struct drm_crtc *c;
16066 	struct drm_i915_gem_object *obj;
16067 	int ret;
16068 
16069 	intel_init_gt_powersave(dev);
16070 
16071 	intel_modeset_init_hw(dev);
16072 
16073 	intel_setup_overlay(dev);
16074 
16075 	/*
16076 	 * Make sure any fbs we allocated at startup are properly
16077 	 * pinned & fenced.  When we do the allocation it's too early
16078 	 * for this.
16079 	 */
16080 	for_each_crtc(dev, c) {
16081 		obj = intel_fb_obj(c->primary->fb);
16082 		if (obj == NULL)
16083 			continue;
16084 
16085 		mutex_lock(&dev->struct_mutex);
16086 		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16087 						 c->primary->state->rotation);
16088 		mutex_unlock(&dev->struct_mutex);
16089 		if (ret) {
16090 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
16091 				  to_intel_crtc(c)->pipe);
16092 			drm_framebuffer_unreference(c->primary->fb);
16093 			c->primary->fb = NULL;
16094 			c->primary->crtc = c->primary->state->crtc = NULL;
16095 			update_state_fb(c->primary);
16096 			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16097 		}
16098 	}
16099 
16100 	intel_backlight_register(dev);
16101 }
16102 
16103 void intel_connector_unregister(struct intel_connector *intel_connector)
16104 {
16105 	struct drm_connector *connector = &intel_connector->base;
16106 
16107 	intel_panel_destroy_backlight(connector);
16108 	drm_connector_unregister(connector);
16109 }
16110 
16111 void intel_modeset_cleanup(struct drm_device *dev)
16112 {
16113 	struct drm_i915_private *dev_priv = dev->dev_private;
16114 	struct intel_connector *connector;
16115 
16116 	intel_disable_gt_powersave(dev);
16117 
16118 	intel_backlight_unregister(dev);
16119 
16120 	/*
16121 	 * Interrupts and polling as the first thing to avoid creating havoc.
16122 	 * Too much stuff here (turning of connectors, ...) would
16123 	 * experience fancy races otherwise.
16124 	 */
16125 	intel_irq_uninstall(dev_priv);
16126 
16127 	/*
16128 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16129 	 * poll handlers. Hence disable polling after hpd handling is shut down.
16130 	 */
16131 	drm_kms_helper_poll_fini(dev);
16132 
16133 	intel_unregister_dsm_handler();
16134 
16135 	intel_fbc_global_disable(dev_priv);
16136 
16137 	/* flush any delayed tasks or pending work */
16138 	flush_scheduled_work();
16139 
16140 	/* destroy the backlight and sysfs files before encoders/connectors */
16141 	for_each_intel_connector(dev, connector)
16142 		connector->unregister(connector);
16143 
16144 	drm_mode_config_cleanup(dev);
16145 
16146 	intel_cleanup_overlay(dev);
16147 
16148 	intel_cleanup_gt_powersave(dev);
16149 
16150 	intel_teardown_gmbus(dev);
16151 }
16152 
16153 /*
16154  * Return which encoder is currently attached for connector.
16155  */
16156 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16157 {
16158 	return &intel_attached_encoder(connector)->base;
16159 }
16160 
16161 void intel_connector_attach_encoder(struct intel_connector *connector,
16162 				    struct intel_encoder *encoder)
16163 {
16164 	connector->encoder = encoder;
16165 	drm_mode_connector_attach_encoder(&connector->base,
16166 					  &encoder->base);
16167 }
16168 
16169 /*
16170  * set vga decode state - true == enable VGA decode
16171  */
16172 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16173 {
16174 	struct drm_i915_private *dev_priv = dev->dev_private;
16175 	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16176 	u16 gmch_ctrl;
16177 
16178 	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16179 		DRM_ERROR("failed to read control word\n");
16180 		return -EIO;
16181 	}
16182 
16183 	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16184 		return 0;
16185 
16186 	if (state)
16187 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16188 	else
16189 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16190 
16191 	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16192 		DRM_ERROR("failed to write control word\n");
16193 		return -EIO;
16194 	}
16195 
16196 	return 0;
16197 }
16198 
16199 #if 0
16200 struct intel_display_error_state {
16201 
16202 	u32 power_well_driver;
16203 
16204 	int num_transcoders;
16205 
16206 	struct intel_cursor_error_state {
16207 		u32 control;
16208 		u32 position;
16209 		u32 base;
16210 		u32 size;
16211 	} cursor[I915_MAX_PIPES];
16212 
16213 	struct intel_pipe_error_state {
16214 		bool power_domain_on;
16215 		u32 source;
16216 		u32 stat;
16217 	} pipe[I915_MAX_PIPES];
16218 
16219 	struct intel_plane_error_state {
16220 		u32 control;
16221 		u32 stride;
16222 		u32 size;
16223 		u32 pos;
16224 		u32 addr;
16225 		u32 surface;
16226 		u32 tile_offset;
16227 	} plane[I915_MAX_PIPES];
16228 
16229 	struct intel_transcoder_error_state {
16230 		bool power_domain_on;
16231 		enum transcoder cpu_transcoder;
16232 
16233 		u32 conf;
16234 
16235 		u32 htotal;
16236 		u32 hblank;
16237 		u32 hsync;
16238 		u32 vtotal;
16239 		u32 vblank;
16240 		u32 vsync;
16241 	} transcoder[4];
16242 };
16243 
16244 struct intel_display_error_state *
16245 intel_display_capture_error_state(struct drm_device *dev)
16246 {
16247 	struct drm_i915_private *dev_priv = dev->dev_private;
16248 	struct intel_display_error_state *error;
16249 	int transcoders[] = {
16250 		TRANSCODER_A,
16251 		TRANSCODER_B,
16252 		TRANSCODER_C,
16253 		TRANSCODER_EDP,
16254 	};
16255 	int i;
16256 
16257 	if (INTEL_INFO(dev)->num_pipes == 0)
16258 		return NULL;
16259 
16260 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
16261 	if (error == NULL)
16262 		return NULL;
16263 
16264 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16265 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16266 
16267 	for_each_pipe(dev_priv, i) {
16268 		error->pipe[i].power_domain_on =
16269 			__intel_display_power_is_enabled(dev_priv,
16270 							 POWER_DOMAIN_PIPE(i));
16271 		if (!error->pipe[i].power_domain_on)
16272 			continue;
16273 
16274 		error->cursor[i].control = I915_READ(CURCNTR(i));
16275 		error->cursor[i].position = I915_READ(CURPOS(i));
16276 		error->cursor[i].base = I915_READ(CURBASE(i));
16277 
16278 		error->plane[i].control = I915_READ(DSPCNTR(i));
16279 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16280 		if (INTEL_INFO(dev)->gen <= 3) {
16281 			error->plane[i].size = I915_READ(DSPSIZE(i));
16282 			error->plane[i].pos = I915_READ(DSPPOS(i));
16283 		}
16284 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16285 			error->plane[i].addr = I915_READ(DSPADDR(i));
16286 		if (INTEL_INFO(dev)->gen >= 4) {
16287 			error->plane[i].surface = I915_READ(DSPSURF(i));
16288 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16289 		}
16290 
16291 		error->pipe[i].source = I915_READ(PIPESRC(i));
16292 
16293 		if (HAS_GMCH_DISPLAY(dev))
16294 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
16295 	}
16296 
16297 	/* Note: this does not include DSI transcoders. */
16298 	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16299 	if (HAS_DDI(dev_priv))
16300 		error->num_transcoders++; /* Account for eDP. */
16301 
16302 	for (i = 0; i < error->num_transcoders; i++) {
16303 		enum transcoder cpu_transcoder = transcoders[i];
16304 
16305 		error->transcoder[i].power_domain_on =
16306 			__intel_display_power_is_enabled(dev_priv,
16307 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16308 		if (!error->transcoder[i].power_domain_on)
16309 			continue;
16310 
16311 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16312 
16313 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16314 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16315 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16316 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16317 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16318 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16319 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16320 	}
16321 
16322 	return error;
16323 }
16324 
16325 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16326 
16327 void
16328 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16329 				struct drm_device *dev,
16330 				struct intel_display_error_state *error)
16331 {
16332 	struct drm_i915_private *dev_priv = dev->dev_private;
16333 	int i;
16334 
16335 	if (!error)
16336 		return;
16337 
16338 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16339 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16340 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16341 			   error->power_well_driver);
16342 	for_each_pipe(dev_priv, i) {
16343 		err_printf(m, "Pipe [%d]:\n", i);
16344 		err_printf(m, "  Power: %s\n",
16345 			   onoff(error->pipe[i].power_domain_on));
16346 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16347 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16348 
16349 		err_printf(m, "Plane [%d]:\n", i);
16350 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16351 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16352 		if (INTEL_INFO(dev)->gen <= 3) {
16353 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16354 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16355 		}
16356 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16357 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16358 		if (INTEL_INFO(dev)->gen >= 4) {
16359 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16360 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16361 		}
16362 
16363 		err_printf(m, "Cursor [%d]:\n", i);
16364 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16365 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16366 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16367 	}
16368 
16369 	for (i = 0; i < error->num_transcoders; i++) {
16370 		err_printf(m, "CPU transcoder: %s\n",
16371 			   transcoder_name(error->transcoder[i].cpu_transcoder));
16372 		err_printf(m, "  Power: %s\n",
16373 			   onoff(error->transcoder[i].power_domain_on));
16374 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16375 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16376 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16377 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16378 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16379 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16380 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16381 	}
16382 }
16383 #endif
16384