xref: /dragonfly/sys/dev/drm/i915/intel_display.c (revision 5062ee70)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/i2c.h>
30 #include <linux/kernel.h>
31 #include <drm/drm_edid.h>
32 #include <drm/drmP.h>
33 #include "intel_drv.h"
34 #include <drm/i915_drm.h>
35 #include "i915_drv.h"
36 #include "intel_dsi.h"
37 #include "i915_trace.h"
38 #include <drm/drm_atomic.h>
39 #include <drm/drm_atomic_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_crtc_helper.h>
42 #include <drm/drm_plane_helper.h>
43 #include <drm/drm_rect.h>
44 #include <linux/dma_remapping.h>
45 #include <linux/reservation.h>
46 #include <linux/dma-buf.h>
47 
48 /* Primary plane formats for gen <= 3 */
49 static const uint32_t i8xx_primary_formats[] = {
50 	DRM_FORMAT_C8,
51 	DRM_FORMAT_RGB565,
52 	DRM_FORMAT_XRGB1555,
53 	DRM_FORMAT_XRGB8888,
54 };
55 
56 /* Primary plane formats for gen >= 4 */
57 static const uint32_t i965_primary_formats[] = {
58 	DRM_FORMAT_C8,
59 	DRM_FORMAT_RGB565,
60 	DRM_FORMAT_XRGB8888,
61 	DRM_FORMAT_XBGR8888,
62 	DRM_FORMAT_XRGB2101010,
63 	DRM_FORMAT_XBGR2101010,
64 };
65 
66 static const uint32_t skl_primary_formats[] = {
67 	DRM_FORMAT_C8,
68 	DRM_FORMAT_RGB565,
69 	DRM_FORMAT_XRGB8888,
70 	DRM_FORMAT_XBGR8888,
71 	DRM_FORMAT_ARGB8888,
72 	DRM_FORMAT_ABGR8888,
73 	DRM_FORMAT_XRGB2101010,
74 	DRM_FORMAT_XBGR2101010,
75 	DRM_FORMAT_YUYV,
76 	DRM_FORMAT_YVYU,
77 	DRM_FORMAT_UYVY,
78 	DRM_FORMAT_VYUY,
79 };
80 
81 /* Cursor formats */
82 static const uint32_t intel_cursor_formats[] = {
83 	DRM_FORMAT_ARGB8888,
84 };
85 
86 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
87 				struct intel_crtc_state *pipe_config);
88 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
89 				   struct intel_crtc_state *pipe_config);
90 
91 static int intel_framebuffer_init(struct drm_device *dev,
92 				  struct intel_framebuffer *ifb,
93 				  struct drm_mode_fb_cmd2 *mode_cmd,
94 				  struct drm_i915_gem_object *obj);
95 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
96 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
97 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
98 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
99 					 struct intel_link_m_n *m_n,
100 					 struct intel_link_m_n *m2_n2);
101 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
102 static void haswell_set_pipeconf(struct drm_crtc *crtc);
103 static void haswell_set_pipemisc(struct drm_crtc *crtc);
104 static void vlv_prepare_pll(struct intel_crtc *crtc,
105 			    const struct intel_crtc_state *pipe_config);
106 static void chv_prepare_pll(struct intel_crtc *crtc,
107 			    const struct intel_crtc_state *pipe_config);
108 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
109 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
111 	struct intel_crtc_state *crtc_state);
112 static void skylake_pfit_enable(struct intel_crtc *crtc);
113 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
114 static void ironlake_pfit_enable(struct intel_crtc *crtc);
115 static void intel_modeset_setup_hw_state(struct drm_device *dev);
116 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
117 
118 typedef struct {
119 	int	min, max;
120 } intel_range_t;
121 
122 typedef struct {
123 	int	dot_limit;
124 	int	p2_slow, p2_fast;
125 } intel_p2_t;
126 
127 typedef struct intel_limit intel_limit_t;
128 struct intel_limit {
129 	intel_range_t   dot, vco, n, m, m1, m2, p, p1;
130 	intel_p2_t	    p2;
131 };
132 
133 /* returns HPLL frequency in kHz */
134 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
135 {
136 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
137 
138 	/* Obtain SKU information */
139 	mutex_lock(&dev_priv->sb_lock);
140 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
141 		CCK_FUSE_HPLL_FREQ_MASK;
142 	mutex_unlock(&dev_priv->sb_lock);
143 
144 	return vco_freq[hpll_freq] * 1000;
145 }
146 
147 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
148 		      const char *name, u32 reg, int ref_freq)
149 {
150 	u32 val;
151 	int divider;
152 
153 	mutex_lock(&dev_priv->sb_lock);
154 	val = vlv_cck_read(dev_priv, reg);
155 	mutex_unlock(&dev_priv->sb_lock);
156 
157 	divider = val & CCK_FREQUENCY_VALUES;
158 
159 	WARN((val & CCK_FREQUENCY_STATUS) !=
160 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
161 	     "%s change in progress\n", name);
162 
163 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
164 }
165 
166 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
167 				  const char *name, u32 reg)
168 {
169 	if (dev_priv->hpll_freq == 0)
170 		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
171 
172 	return vlv_get_cck_clock(dev_priv, name, reg,
173 				 dev_priv->hpll_freq);
174 }
175 
176 static int
177 intel_pch_rawclk(struct drm_i915_private *dev_priv)
178 {
179 	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
180 }
181 
182 static int
183 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
184 {
185 	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
186 				      CCK_DISPLAY_REF_CLOCK_CONTROL);
187 }
188 
189 static int
190 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
191 {
192 	uint32_t clkcfg;
193 
194 	/* hrawclock is 1/4 the FSB frequency */
195 	clkcfg = I915_READ(CLKCFG);
196 	switch (clkcfg & CLKCFG_FSB_MASK) {
197 	case CLKCFG_FSB_400:
198 		return 100000;
199 	case CLKCFG_FSB_533:
200 		return 133333;
201 	case CLKCFG_FSB_667:
202 		return 166667;
203 	case CLKCFG_FSB_800:
204 		return 200000;
205 	case CLKCFG_FSB_1067:
206 		return 266667;
207 	case CLKCFG_FSB_1333:
208 		return 333333;
209 	/* these two are just a guess; one of them might be right */
210 	case CLKCFG_FSB_1600:
211 	case CLKCFG_FSB_1600_ALT:
212 		return 400000;
213 	default:
214 		return 133333;
215 	}
216 }
217 
218 static void intel_update_rawclk(struct drm_i915_private *dev_priv)
219 {
220 	if (HAS_PCH_SPLIT(dev_priv))
221 		dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
222 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
223 		dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
224 	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
225 		dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
226 	else
227 		return; /* no rawclk on other platforms, or no need to know it */
228 
229 	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
230 }
231 
232 static void intel_update_czclk(struct drm_i915_private *dev_priv)
233 {
234 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
235 		return;
236 
237 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
238 						      CCK_CZ_CLOCK_CONTROL);
239 
240 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
241 }
242 
243 static inline u32 /* units of 100MHz */
244 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
245 		    const struct intel_crtc_state *pipe_config)
246 {
247 	if (HAS_DDI(dev_priv))
248 		return pipe_config->port_clock; /* SPLL */
249 	else if (IS_GEN5(dev_priv))
250 		return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
251 	else
252 		return 270000;
253 }
254 
255 static const intel_limit_t intel_limits_i8xx_dac = {
256 	.dot = { .min = 25000, .max = 350000 },
257 	.vco = { .min = 908000, .max = 1512000 },
258 	.n = { .min = 2, .max = 16 },
259 	.m = { .min = 96, .max = 140 },
260 	.m1 = { .min = 18, .max = 26 },
261 	.m2 = { .min = 6, .max = 16 },
262 	.p = { .min = 4, .max = 128 },
263 	.p1 = { .min = 2, .max = 33 },
264 	.p2 = { .dot_limit = 165000,
265 		.p2_slow = 4, .p2_fast = 2 },
266 };
267 
268 static const intel_limit_t intel_limits_i8xx_dvo = {
269 	.dot = { .min = 25000, .max = 350000 },
270 	.vco = { .min = 908000, .max = 1512000 },
271 	.n = { .min = 2, .max = 16 },
272 	.m = { .min = 96, .max = 140 },
273 	.m1 = { .min = 18, .max = 26 },
274 	.m2 = { .min = 6, .max = 16 },
275 	.p = { .min = 4, .max = 128 },
276 	.p1 = { .min = 2, .max = 33 },
277 	.p2 = { .dot_limit = 165000,
278 		.p2_slow = 4, .p2_fast = 4 },
279 };
280 
281 static const intel_limit_t intel_limits_i8xx_lvds = {
282 	.dot = { .min = 25000, .max = 350000 },
283 	.vco = { .min = 908000, .max = 1512000 },
284 	.n = { .min = 2, .max = 16 },
285 	.m = { .min = 96, .max = 140 },
286 	.m1 = { .min = 18, .max = 26 },
287 	.m2 = { .min = 6, .max = 16 },
288 	.p = { .min = 4, .max = 128 },
289 	.p1 = { .min = 1, .max = 6 },
290 	.p2 = { .dot_limit = 165000,
291 		.p2_slow = 14, .p2_fast = 7 },
292 };
293 
294 static const intel_limit_t intel_limits_i9xx_sdvo = {
295 	.dot = { .min = 20000, .max = 400000 },
296 	.vco = { .min = 1400000, .max = 2800000 },
297 	.n = { .min = 1, .max = 6 },
298 	.m = { .min = 70, .max = 120 },
299 	.m1 = { .min = 8, .max = 18 },
300 	.m2 = { .min = 3, .max = 7 },
301 	.p = { .min = 5, .max = 80 },
302 	.p1 = { .min = 1, .max = 8 },
303 	.p2 = { .dot_limit = 200000,
304 		.p2_slow = 10, .p2_fast = 5 },
305 };
306 
307 static const intel_limit_t intel_limits_i9xx_lvds = {
308 	.dot = { .min = 20000, .max = 400000 },
309 	.vco = { .min = 1400000, .max = 2800000 },
310 	.n = { .min = 1, .max = 6 },
311 	.m = { .min = 70, .max = 120 },
312 	.m1 = { .min = 8, .max = 18 },
313 	.m2 = { .min = 3, .max = 7 },
314 	.p = { .min = 7, .max = 98 },
315 	.p1 = { .min = 1, .max = 8 },
316 	.p2 = { .dot_limit = 112000,
317 		.p2_slow = 14, .p2_fast = 7 },
318 };
319 
320 
321 static const intel_limit_t intel_limits_g4x_sdvo = {
322 	.dot = { .min = 25000, .max = 270000 },
323 	.vco = { .min = 1750000, .max = 3500000},
324 	.n = { .min = 1, .max = 4 },
325 	.m = { .min = 104, .max = 138 },
326 	.m1 = { .min = 17, .max = 23 },
327 	.m2 = { .min = 5, .max = 11 },
328 	.p = { .min = 10, .max = 30 },
329 	.p1 = { .min = 1, .max = 3},
330 	.p2 = { .dot_limit = 270000,
331 		.p2_slow = 10,
332 		.p2_fast = 10
333 	},
334 };
335 
336 static const intel_limit_t intel_limits_g4x_hdmi = {
337 	.dot = { .min = 22000, .max = 400000 },
338 	.vco = { .min = 1750000, .max = 3500000},
339 	.n = { .min = 1, .max = 4 },
340 	.m = { .min = 104, .max = 138 },
341 	.m1 = { .min = 16, .max = 23 },
342 	.m2 = { .min = 5, .max = 11 },
343 	.p = { .min = 5, .max = 80 },
344 	.p1 = { .min = 1, .max = 8},
345 	.p2 = { .dot_limit = 165000,
346 		.p2_slow = 10, .p2_fast = 5 },
347 };
348 
349 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
350 	.dot = { .min = 20000, .max = 115000 },
351 	.vco = { .min = 1750000, .max = 3500000 },
352 	.n = { .min = 1, .max = 3 },
353 	.m = { .min = 104, .max = 138 },
354 	.m1 = { .min = 17, .max = 23 },
355 	.m2 = { .min = 5, .max = 11 },
356 	.p = { .min = 28, .max = 112 },
357 	.p1 = { .min = 2, .max = 8 },
358 	.p2 = { .dot_limit = 0,
359 		.p2_slow = 14, .p2_fast = 14
360 	},
361 };
362 
363 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
364 	.dot = { .min = 80000, .max = 224000 },
365 	.vco = { .min = 1750000, .max = 3500000 },
366 	.n = { .min = 1, .max = 3 },
367 	.m = { .min = 104, .max = 138 },
368 	.m1 = { .min = 17, .max = 23 },
369 	.m2 = { .min = 5, .max = 11 },
370 	.p = { .min = 14, .max = 42 },
371 	.p1 = { .min = 2, .max = 6 },
372 	.p2 = { .dot_limit = 0,
373 		.p2_slow = 7, .p2_fast = 7
374 	},
375 };
376 
377 static const intel_limit_t intel_limits_pineview_sdvo = {
378 	.dot = { .min = 20000, .max = 400000},
379 	.vco = { .min = 1700000, .max = 3500000 },
380 	/* Pineview's Ncounter is a ring counter */
381 	.n = { .min = 3, .max = 6 },
382 	.m = { .min = 2, .max = 256 },
383 	/* Pineview only has one combined m divider, which we treat as m2. */
384 	.m1 = { .min = 0, .max = 0 },
385 	.m2 = { .min = 0, .max = 254 },
386 	.p = { .min = 5, .max = 80 },
387 	.p1 = { .min = 1, .max = 8 },
388 	.p2 = { .dot_limit = 200000,
389 		.p2_slow = 10, .p2_fast = 5 },
390 };
391 
392 static const intel_limit_t intel_limits_pineview_lvds = {
393 	.dot = { .min = 20000, .max = 400000 },
394 	.vco = { .min = 1700000, .max = 3500000 },
395 	.n = { .min = 3, .max = 6 },
396 	.m = { .min = 2, .max = 256 },
397 	.m1 = { .min = 0, .max = 0 },
398 	.m2 = { .min = 0, .max = 254 },
399 	.p = { .min = 7, .max = 112 },
400 	.p1 = { .min = 1, .max = 8 },
401 	.p2 = { .dot_limit = 112000,
402 		.p2_slow = 14, .p2_fast = 14 },
403 };
404 
405 /* Ironlake / Sandybridge
406  *
407  * We calculate clock using (register_value + 2) for N/M1/M2, so here
408  * the range value for them is (actual_value - 2).
409  */
410 static const intel_limit_t intel_limits_ironlake_dac = {
411 	.dot = { .min = 25000, .max = 350000 },
412 	.vco = { .min = 1760000, .max = 3510000 },
413 	.n = { .min = 1, .max = 5 },
414 	.m = { .min = 79, .max = 127 },
415 	.m1 = { .min = 12, .max = 22 },
416 	.m2 = { .min = 5, .max = 9 },
417 	.p = { .min = 5, .max = 80 },
418 	.p1 = { .min = 1, .max = 8 },
419 	.p2 = { .dot_limit = 225000,
420 		.p2_slow = 10, .p2_fast = 5 },
421 };
422 
423 static const intel_limit_t intel_limits_ironlake_single_lvds = {
424 	.dot = { .min = 25000, .max = 350000 },
425 	.vco = { .min = 1760000, .max = 3510000 },
426 	.n = { .min = 1, .max = 3 },
427 	.m = { .min = 79, .max = 118 },
428 	.m1 = { .min = 12, .max = 22 },
429 	.m2 = { .min = 5, .max = 9 },
430 	.p = { .min = 28, .max = 112 },
431 	.p1 = { .min = 2, .max = 8 },
432 	.p2 = { .dot_limit = 225000,
433 		.p2_slow = 14, .p2_fast = 14 },
434 };
435 
436 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
437 	.dot = { .min = 25000, .max = 350000 },
438 	.vco = { .min = 1760000, .max = 3510000 },
439 	.n = { .min = 1, .max = 3 },
440 	.m = { .min = 79, .max = 127 },
441 	.m1 = { .min = 12, .max = 22 },
442 	.m2 = { .min = 5, .max = 9 },
443 	.p = { .min = 14, .max = 56 },
444 	.p1 = { .min = 2, .max = 8 },
445 	.p2 = { .dot_limit = 225000,
446 		.p2_slow = 7, .p2_fast = 7 },
447 };
448 
449 /* LVDS 100mhz refclk limits. */
450 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
451 	.dot = { .min = 25000, .max = 350000 },
452 	.vco = { .min = 1760000, .max = 3510000 },
453 	.n = { .min = 1, .max = 2 },
454 	.m = { .min = 79, .max = 126 },
455 	.m1 = { .min = 12, .max = 22 },
456 	.m2 = { .min = 5, .max = 9 },
457 	.p = { .min = 28, .max = 112 },
458 	.p1 = { .min = 2, .max = 8 },
459 	.p2 = { .dot_limit = 225000,
460 		.p2_slow = 14, .p2_fast = 14 },
461 };
462 
463 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
464 	.dot = { .min = 25000, .max = 350000 },
465 	.vco = { .min = 1760000, .max = 3510000 },
466 	.n = { .min = 1, .max = 3 },
467 	.m = { .min = 79, .max = 126 },
468 	.m1 = { .min = 12, .max = 22 },
469 	.m2 = { .min = 5, .max = 9 },
470 	.p = { .min = 14, .max = 42 },
471 	.p1 = { .min = 2, .max = 6 },
472 	.p2 = { .dot_limit = 225000,
473 		.p2_slow = 7, .p2_fast = 7 },
474 };
475 
476 static const intel_limit_t intel_limits_vlv = {
477 	 /*
478 	  * These are the data rate limits (measured in fast clocks)
479 	  * since those are the strictest limits we have. The fast
480 	  * clock and actual rate limits are more relaxed, so checking
481 	  * them would make no difference.
482 	  */
483 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
484 	.vco = { .min = 4000000, .max = 6000000 },
485 	.n = { .min = 1, .max = 7 },
486 	.m1 = { .min = 2, .max = 3 },
487 	.m2 = { .min = 11, .max = 156 },
488 	.p1 = { .min = 2, .max = 3 },
489 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
490 };
491 
492 static const intel_limit_t intel_limits_chv = {
493 	/*
494 	 * These are the data rate limits (measured in fast clocks)
495 	 * since those are the strictest limits we have.  The fast
496 	 * clock and actual rate limits are more relaxed, so checking
497 	 * them would make no difference.
498 	 */
499 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
500 	.vco = { .min = 4800000, .max = 6480000 },
501 	.n = { .min = 1, .max = 1 },
502 	.m1 = { .min = 2, .max = 2 },
503 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
504 	.p1 = { .min = 2, .max = 4 },
505 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
506 };
507 
508 static const intel_limit_t intel_limits_bxt = {
509 	/* FIXME: find real dot limits */
510 	.dot = { .min = 0, .max = INT_MAX },
511 	.vco = { .min = 4800000, .max = 6700000 },
512 	.n = { .min = 1, .max = 1 },
513 	.m1 = { .min = 2, .max = 2 },
514 	/* FIXME: find real m2 limits */
515 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
516 	.p1 = { .min = 2, .max = 4 },
517 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
518 };
519 
520 static bool
521 needs_modeset(struct drm_crtc_state *state)
522 {
523 	return drm_atomic_crtc_needs_modeset(state);
524 }
525 
526 /**
527  * Returns whether any output on the specified pipe is of the specified type
528  */
529 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
530 {
531 	struct drm_device *dev = crtc->base.dev;
532 	struct intel_encoder *encoder;
533 
534 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
535 		if (encoder->type == type)
536 			return true;
537 
538 	return false;
539 }
540 
541 /**
542  * Returns whether any output on the specified pipe will have the specified
543  * type after a staged modeset is complete, i.e., the same as
544  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
545  * encoder->crtc.
546  */
547 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
548 				      int type)
549 {
550 	struct drm_atomic_state *state = crtc_state->base.state;
551 	struct drm_connector *connector;
552 	struct drm_connector_state *connector_state;
553 	struct intel_encoder *encoder;
554 	int i, num_connectors = 0;
555 
556 	for_each_connector_in_state(state, connector, connector_state, i) {
557 		if (connector_state->crtc != crtc_state->base.crtc)
558 			continue;
559 
560 		num_connectors++;
561 
562 		encoder = to_intel_encoder(connector_state->best_encoder);
563 		if (encoder->type == type)
564 			return true;
565 	}
566 
567 	WARN_ON(num_connectors == 0);
568 
569 	return false;
570 }
571 
572 /*
573  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
574  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
575  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
576  * The helpers' return value is the rate of the clock that is fed to the
577  * display engine's pipe which can be the above fast dot clock rate or a
578  * divided-down version of it.
579  */
580 /* m1 is reserved as 0 in Pineview, n is a ring counter */
581 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
582 {
583 	clock->m = clock->m2 + 2;
584 	clock->p = clock->p1 * clock->p2;
585 	if (WARN_ON(clock->n == 0 || clock->p == 0))
586 		return 0;
587 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
588 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
589 
590 	return clock->dot;
591 }
592 
593 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
594 {
595 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
596 }
597 
598 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
599 {
600 	clock->m = i9xx_dpll_compute_m(clock);
601 	clock->p = clock->p1 * clock->p2;
602 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
603 		return 0;
604 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
605 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
606 
607 	return clock->dot;
608 }
609 
610 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
611 {
612 	clock->m = clock->m1 * clock->m2;
613 	clock->p = clock->p1 * clock->p2;
614 	if (WARN_ON(clock->n == 0 || clock->p == 0))
615 		return 0;
616 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
617 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
618 
619 	return clock->dot / 5;
620 }
621 
622 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
623 {
624 	clock->m = clock->m1 * clock->m2;
625 	clock->p = clock->p1 * clock->p2;
626 	if (WARN_ON(clock->n == 0 || clock->p == 0))
627 		return 0;
628 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
629 			clock->n << 22);
630 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
631 
632 	return clock->dot / 5;
633 }
634 
635 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
636 /**
637  * Returns whether the given set of divisors are valid for a given refclk with
638  * the given connectors.
639  */
640 
641 static bool intel_PLL_is_valid(struct drm_device *dev,
642 			       const intel_limit_t *limit,
643 			       const intel_clock_t *clock)
644 {
645 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
646 		INTELPllInvalid("n out of range\n");
647 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
648 		INTELPllInvalid("p1 out of range\n");
649 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
650 		INTELPllInvalid("m2 out of range\n");
651 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
652 		INTELPllInvalid("m1 out of range\n");
653 
654 	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
655 	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
656 		if (clock->m1 <= clock->m2)
657 			INTELPllInvalid("m1 <= m2\n");
658 
659 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
660 		if (clock->p < limit->p.min || limit->p.max < clock->p)
661 			INTELPllInvalid("p out of range\n");
662 		if (clock->m < limit->m.min || limit->m.max < clock->m)
663 			INTELPllInvalid("m out of range\n");
664 	}
665 
666 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
667 		INTELPllInvalid("vco out of range\n");
668 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
669 	 * connector, etc., rather than just a single range.
670 	 */
671 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
672 		INTELPllInvalid("dot out of range\n");
673 
674 	return true;
675 }
676 
677 static int
678 i9xx_select_p2_div(const intel_limit_t *limit,
679 		   const struct intel_crtc_state *crtc_state,
680 		   int target)
681 {
682 	struct drm_device *dev = crtc_state->base.crtc->dev;
683 
684 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
685 		/*
686 		 * For LVDS just rely on its current settings for dual-channel.
687 		 * We haven't figured out how to reliably set up different
688 		 * single/dual channel state, if we even can.
689 		 */
690 		if (intel_is_dual_link_lvds(dev))
691 			return limit->p2.p2_fast;
692 		else
693 			return limit->p2.p2_slow;
694 	} else {
695 		if (target < limit->p2.dot_limit)
696 			return limit->p2.p2_slow;
697 		else
698 			return limit->p2.p2_fast;
699 	}
700 }
701 
702 /*
703  * Returns a set of divisors for the desired target clock with the given
704  * refclk, or FALSE.  The returned values represent the clock equation:
705  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
706  *
707  * Target and reference clocks are specified in kHz.
708  *
709  * If match_clock is provided, then best_clock P divider must match the P
710  * divider from @match_clock used for LVDS downclocking.
711  */
712 static bool
713 i9xx_find_best_dpll(const intel_limit_t *limit,
714 		    struct intel_crtc_state *crtc_state,
715 		    int target, int refclk, intel_clock_t *match_clock,
716 		    intel_clock_t *best_clock)
717 {
718 	struct drm_device *dev = crtc_state->base.crtc->dev;
719 	intel_clock_t clock;
720 	int err = target;
721 
722 	memset(best_clock, 0, sizeof(*best_clock));
723 
724 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
725 
726 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
727 	     clock.m1++) {
728 		for (clock.m2 = limit->m2.min;
729 		     clock.m2 <= limit->m2.max; clock.m2++) {
730 			if (clock.m2 >= clock.m1)
731 				break;
732 			for (clock.n = limit->n.min;
733 			     clock.n <= limit->n.max; clock.n++) {
734 				for (clock.p1 = limit->p1.min;
735 					clock.p1 <= limit->p1.max; clock.p1++) {
736 					int this_err;
737 
738 					i9xx_calc_dpll_params(refclk, &clock);
739 					if (!intel_PLL_is_valid(dev, limit,
740 								&clock))
741 						continue;
742 					if (match_clock &&
743 					    clock.p != match_clock->p)
744 						continue;
745 
746 					this_err = abs(clock.dot - target);
747 					if (this_err < err) {
748 						*best_clock = clock;
749 						err = this_err;
750 					}
751 				}
752 			}
753 		}
754 	}
755 
756 	return (err != target);
757 }
758 
759 /*
760  * Returns a set of divisors for the desired target clock with the given
761  * refclk, or FALSE.  The returned values represent the clock equation:
762  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
763  *
764  * Target and reference clocks are specified in kHz.
765  *
766  * If match_clock is provided, then best_clock P divider must match the P
767  * divider from @match_clock used for LVDS downclocking.
768  */
769 static bool
770 pnv_find_best_dpll(const intel_limit_t *limit,
771 		   struct intel_crtc_state *crtc_state,
772 		   int target, int refclk, intel_clock_t *match_clock,
773 		   intel_clock_t *best_clock)
774 {
775 	struct drm_device *dev = crtc_state->base.crtc->dev;
776 	intel_clock_t clock;
777 	int err = target;
778 
779 	memset(best_clock, 0, sizeof(*best_clock));
780 
781 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
782 
783 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
784 	     clock.m1++) {
785 		for (clock.m2 = limit->m2.min;
786 		     clock.m2 <= limit->m2.max; clock.m2++) {
787 			for (clock.n = limit->n.min;
788 			     clock.n <= limit->n.max; clock.n++) {
789 				for (clock.p1 = limit->p1.min;
790 					clock.p1 <= limit->p1.max; clock.p1++) {
791 					int this_err;
792 
793 					pnv_calc_dpll_params(refclk, &clock);
794 					if (!intel_PLL_is_valid(dev, limit,
795 								&clock))
796 						continue;
797 					if (match_clock &&
798 					    clock.p != match_clock->p)
799 						continue;
800 
801 					this_err = abs(clock.dot - target);
802 					if (this_err < err) {
803 						*best_clock = clock;
804 						err = this_err;
805 					}
806 				}
807 			}
808 		}
809 	}
810 
811 	return (err != target);
812 }
813 
814 /*
815  * Returns a set of divisors for the desired target clock with the given
816  * refclk, or FALSE.  The returned values represent the clock equation:
817  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
818  *
819  * Target and reference clocks are specified in kHz.
820  *
821  * If match_clock is provided, then best_clock P divider must match the P
822  * divider from @match_clock used for LVDS downclocking.
823  */
824 static bool
825 g4x_find_best_dpll(const intel_limit_t *limit,
826 		   struct intel_crtc_state *crtc_state,
827 		   int target, int refclk, intel_clock_t *match_clock,
828 		   intel_clock_t *best_clock)
829 {
830 	struct drm_device *dev = crtc_state->base.crtc->dev;
831 	intel_clock_t clock;
832 	int max_n;
833 	bool found = false;
834 	/* approximately equals target * 0.00585 */
835 	int err_most = (target >> 8) + (target >> 9);
836 
837 	memset(best_clock, 0, sizeof(*best_clock));
838 
839 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
840 
841 	max_n = limit->n.max;
842 	/* based on hardware requirement, prefer smaller n to precision */
843 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
844 		/* based on hardware requirement, prefere larger m1,m2 */
845 		for (clock.m1 = limit->m1.max;
846 		     clock.m1 >= limit->m1.min; clock.m1--) {
847 			for (clock.m2 = limit->m2.max;
848 			     clock.m2 >= limit->m2.min; clock.m2--) {
849 				for (clock.p1 = limit->p1.max;
850 				     clock.p1 >= limit->p1.min; clock.p1--) {
851 					int this_err;
852 
853 					i9xx_calc_dpll_params(refclk, &clock);
854 					if (!intel_PLL_is_valid(dev, limit,
855 								&clock))
856 						continue;
857 
858 					this_err = abs(clock.dot - target);
859 					if (this_err < err_most) {
860 						*best_clock = clock;
861 						err_most = this_err;
862 						max_n = clock.n;
863 						found = true;
864 					}
865 				}
866 			}
867 		}
868 	}
869 	return found;
870 }
871 
872 /*
873  * Check if the calculated PLL configuration is more optimal compared to the
874  * best configuration and error found so far. Return the calculated error.
875  */
876 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
877 			       const intel_clock_t *calculated_clock,
878 			       const intel_clock_t *best_clock,
879 			       unsigned int best_error_ppm,
880 			       unsigned int *error_ppm)
881 {
882 	/*
883 	 * For CHV ignore the error and consider only the P value.
884 	 * Prefer a bigger P value based on HW requirements.
885 	 */
886 	if (IS_CHERRYVIEW(dev)) {
887 		*error_ppm = 0;
888 
889 		return calculated_clock->p > best_clock->p;
890 	}
891 
892 	if (WARN_ON_ONCE(!target_freq))
893 		return false;
894 
895 	*error_ppm = div_u64(1000000ULL *
896 				abs(target_freq - calculated_clock->dot),
897 			     target_freq);
898 	/*
899 	 * Prefer a better P value over a better (smaller) error if the error
900 	 * is small. Ensure this preference for future configurations too by
901 	 * setting the error to 0.
902 	 */
903 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
904 		*error_ppm = 0;
905 
906 		return true;
907 	}
908 
909 	return *error_ppm + 10 < best_error_ppm;
910 }
911 
912 /*
913  * Returns a set of divisors for the desired target clock with the given
914  * refclk, or FALSE.  The returned values represent the clock equation:
915  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
916  */
917 static bool
918 vlv_find_best_dpll(const intel_limit_t *limit,
919 		   struct intel_crtc_state *crtc_state,
920 		   int target, int refclk, intel_clock_t *match_clock,
921 		   intel_clock_t *best_clock)
922 {
923 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
924 	struct drm_device *dev = crtc->base.dev;
925 	intel_clock_t clock;
926 	unsigned int bestppm = 1000000;
927 	/* min update 19.2 MHz */
928 	int max_n = min(limit->n.max, refclk / 19200);
929 	bool found = false;
930 
931 	target *= 5; /* fast clock */
932 
933 	memset(best_clock, 0, sizeof(*best_clock));
934 
935 	/* based on hardware requirement, prefer smaller n to precision */
936 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
937 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
938 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
939 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
940 				clock.p = clock.p1 * clock.p2;
941 				/* based on hardware requirement, prefer bigger m1,m2 values */
942 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
943 					unsigned int ppm;
944 
945 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
946 								     refclk * clock.m1);
947 
948 					vlv_calc_dpll_params(refclk, &clock);
949 
950 					if (!intel_PLL_is_valid(dev, limit,
951 								&clock))
952 						continue;
953 
954 					if (!vlv_PLL_is_optimal(dev, target,
955 								&clock,
956 								best_clock,
957 								bestppm, &ppm))
958 						continue;
959 
960 					*best_clock = clock;
961 					bestppm = ppm;
962 					found = true;
963 				}
964 			}
965 		}
966 	}
967 
968 	return found;
969 }
970 
971 /*
972  * Returns a set of divisors for the desired target clock with the given
973  * refclk, or FALSE.  The returned values represent the clock equation:
974  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
975  */
976 static bool
977 chv_find_best_dpll(const intel_limit_t *limit,
978 		   struct intel_crtc_state *crtc_state,
979 		   int target, int refclk, intel_clock_t *match_clock,
980 		   intel_clock_t *best_clock)
981 {
982 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
983 	struct drm_device *dev = crtc->base.dev;
984 	unsigned int best_error_ppm;
985 	intel_clock_t clock;
986 	uint64_t m2;
987 	int found = false;
988 
989 	memset(best_clock, 0, sizeof(*best_clock));
990 	best_error_ppm = 1000000;
991 
992 	/*
993 	 * Based on hardware doc, the n always set to 1, and m1 always
994 	 * set to 2.  If requires to support 200Mhz refclk, we need to
995 	 * revisit this because n may not 1 anymore.
996 	 */
997 	clock.n = 1, clock.m1 = 2;
998 	target *= 5;	/* fast clock */
999 
1000 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1001 		for (clock.p2 = limit->p2.p2_fast;
1002 				clock.p2 >= limit->p2.p2_slow;
1003 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1004 			unsigned int error_ppm;
1005 
1006 			clock.p = clock.p1 * clock.p2;
1007 
1008 			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1009 					clock.n) << 22, refclk * clock.m1);
1010 
1011 			if (m2 > INT_MAX/clock.m1)
1012 				continue;
1013 
1014 			clock.m2 = m2;
1015 
1016 			chv_calc_dpll_params(refclk, &clock);
1017 
1018 			if (!intel_PLL_is_valid(dev, limit, &clock))
1019 				continue;
1020 
1021 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1022 						best_error_ppm, &error_ppm))
1023 				continue;
1024 
1025 			*best_clock = clock;
1026 			best_error_ppm = error_ppm;
1027 			found = true;
1028 		}
1029 	}
1030 
1031 	return found;
1032 }
1033 
1034 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1035 			intel_clock_t *best_clock)
1036 {
1037 	int refclk = 100000;
1038 	const intel_limit_t *limit = &intel_limits_bxt;
1039 
1040 	return chv_find_best_dpll(limit, crtc_state,
1041 				  target_clock, refclk, NULL, best_clock);
1042 }
1043 
1044 bool intel_crtc_active(struct drm_crtc *crtc)
1045 {
1046 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1047 
1048 	/* Be paranoid as we can arrive here with only partial
1049 	 * state retrieved from the hardware during setup.
1050 	 *
1051 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1052 	 * as Haswell has gained clock readout/fastboot support.
1053 	 *
1054 	 * We can ditch the crtc->primary->fb check as soon as we can
1055 	 * properly reconstruct framebuffers.
1056 	 *
1057 	 * FIXME: The intel_crtc->active here should be switched to
1058 	 * crtc->state->active once we have proper CRTC states wired up
1059 	 * for atomic.
1060 	 */
1061 	return intel_crtc->active && crtc->primary->state->fb &&
1062 		intel_crtc->config->base.adjusted_mode.crtc_clock;
1063 }
1064 
1065 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1066 					     enum i915_pipe pipe)
1067 {
1068 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1069 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1070 
1071 	return intel_crtc->config->cpu_transcoder;
1072 }
1073 
1074 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe)
1075 {
1076 	struct drm_i915_private *dev_priv = dev->dev_private;
1077 	i915_reg_t reg = PIPEDSL(pipe);
1078 	u32 line1, line2;
1079 	u32 line_mask;
1080 
1081 	if (IS_GEN2(dev))
1082 		line_mask = DSL_LINEMASK_GEN2;
1083 	else
1084 		line_mask = DSL_LINEMASK_GEN3;
1085 
1086 	line1 = I915_READ(reg) & line_mask;
1087 	msleep(5);
1088 	line2 = I915_READ(reg) & line_mask;
1089 
1090 	return line1 == line2;
1091 }
1092 
1093 /*
1094  * intel_wait_for_pipe_off - wait for pipe to turn off
1095  * @crtc: crtc whose pipe to wait for
1096  *
1097  * After disabling a pipe, we can't wait for vblank in the usual way,
1098  * spinning on the vblank interrupt status bit, since we won't actually
1099  * see an interrupt when the pipe is disabled.
1100  *
1101  * On Gen4 and above:
1102  *   wait for the pipe register state bit to turn off
1103  *
1104  * Otherwise:
1105  *   wait for the display line value to settle (it usually
1106  *   ends up stopping at the start of the next frame).
1107  *
1108  */
1109 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1110 {
1111 	struct drm_device *dev = crtc->base.dev;
1112 	struct drm_i915_private *dev_priv = dev->dev_private;
1113 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1114 	enum i915_pipe pipe = crtc->pipe;
1115 
1116 	if (INTEL_INFO(dev)->gen >= 4) {
1117 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1118 
1119 		/* Wait for the Pipe State to go off */
1120 		if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1121 			     100))
1122 			WARN(1, "pipe_off wait timed out\n");
1123 	} else {
1124 		/* Wait for the display line to settle */
1125 		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1126 			WARN(1, "pipe_off wait timed out\n");
1127 	}
1128 }
1129 
1130 /* Only for pre-ILK configs */
1131 void assert_pll(struct drm_i915_private *dev_priv,
1132 		enum i915_pipe pipe, bool state)
1133 {
1134 	u32 val;
1135 	bool cur_state;
1136 
1137 	val = I915_READ(DPLL(pipe));
1138 	cur_state = !!(val & DPLL_VCO_ENABLE);
1139 	I915_STATE_WARN(cur_state != state,
1140 	     "PLL state assertion failure (expected %s, current %s)\n",
1141 			onoff(state), onoff(cur_state));
1142 }
1143 
1144 /* XXX: the dsi pll is shared between MIPI DSI ports */
1145 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1146 {
1147 	u32 val;
1148 	bool cur_state;
1149 
1150 	mutex_lock(&dev_priv->sb_lock);
1151 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1152 	mutex_unlock(&dev_priv->sb_lock);
1153 
1154 	cur_state = val & DSI_PLL_VCO_EN;
1155 	I915_STATE_WARN(cur_state != state,
1156 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1157 			onoff(state), onoff(cur_state));
1158 }
1159 
1160 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1161 			  enum i915_pipe pipe, bool state)
1162 {
1163 	bool cur_state;
1164 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1165 								      pipe);
1166 
1167 	if (HAS_DDI(dev_priv)) {
1168 		/* DDI does not have a specific FDI_TX register */
1169 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1170 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1171 	} else {
1172 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1173 		cur_state = !!(val & FDI_TX_ENABLE);
1174 	}
1175 	I915_STATE_WARN(cur_state != state,
1176 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1177 			onoff(state), onoff(cur_state));
1178 }
1179 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1180 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1181 
1182 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1183 			  enum i915_pipe pipe, bool state)
1184 {
1185 	u32 val;
1186 	bool cur_state;
1187 
1188 	val = I915_READ(FDI_RX_CTL(pipe));
1189 	cur_state = !!(val & FDI_RX_ENABLE);
1190 	I915_STATE_WARN(cur_state != state,
1191 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1192 			onoff(state), onoff(cur_state));
1193 }
1194 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1195 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1196 
1197 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1198 				      enum i915_pipe pipe)
1199 {
1200 	u32 val;
1201 
1202 	/* ILK FDI PLL is always enabled */
1203 	if (INTEL_INFO(dev_priv)->gen == 5)
1204 		return;
1205 
1206 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1207 	if (HAS_DDI(dev_priv))
1208 		return;
1209 
1210 	val = I915_READ(FDI_TX_CTL(pipe));
1211 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1212 }
1213 
1214 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1215 		       enum i915_pipe pipe, bool state)
1216 {
1217 	u32 val;
1218 	bool cur_state;
1219 
1220 	val = I915_READ(FDI_RX_CTL(pipe));
1221 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1222 	I915_STATE_WARN(cur_state != state,
1223 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1224 			onoff(state), onoff(cur_state));
1225 }
1226 
1227 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1228 			   enum i915_pipe pipe)
1229 {
1230 	struct drm_device *dev = dev_priv->dev;
1231 	i915_reg_t pp_reg;
1232 	u32 val;
1233 	enum i915_pipe panel_pipe = PIPE_A;
1234 	bool locked = true;
1235 
1236 	if (WARN_ON(HAS_DDI(dev)))
1237 		return;
1238 
1239 	if (HAS_PCH_SPLIT(dev)) {
1240 		u32 port_sel;
1241 
1242 		pp_reg = PCH_PP_CONTROL;
1243 		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1244 
1245 		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1246 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1247 			panel_pipe = PIPE_B;
1248 		/* XXX: else fix for eDP */
1249 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1250 		/* presumably write lock depends on pipe, not port select */
1251 		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1252 		panel_pipe = pipe;
1253 	} else {
1254 		pp_reg = PP_CONTROL;
1255 		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1256 			panel_pipe = PIPE_B;
1257 	}
1258 
1259 	val = I915_READ(pp_reg);
1260 	if (!(val & PANEL_POWER_ON) ||
1261 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1262 		locked = false;
1263 
1264 	I915_STATE_WARN(panel_pipe == pipe && locked,
1265 	     "panel assertion failure, pipe %c regs locked\n",
1266 	     pipe_name(pipe));
1267 }
1268 
1269 static void assert_cursor(struct drm_i915_private *dev_priv,
1270 			  enum i915_pipe pipe, bool state)
1271 {
1272 	struct drm_device *dev = dev_priv->dev;
1273 	bool cur_state;
1274 
1275 	if (IS_845G(dev) || IS_I865G(dev))
1276 		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1277 	else
1278 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1279 
1280 	I915_STATE_WARN(cur_state != state,
1281 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1282 			pipe_name(pipe), onoff(state), onoff(cur_state));
1283 }
1284 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1285 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1286 
1287 void assert_pipe(struct drm_i915_private *dev_priv,
1288 		 enum i915_pipe pipe, bool state)
1289 {
1290 	bool cur_state;
1291 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1292 								      pipe);
1293 	enum intel_display_power_domain power_domain;
1294 
1295 	/* if we need the pipe quirk it must be always on */
1296 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1297 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1298 		state = true;
1299 
1300 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1301 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1302 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1303 		cur_state = !!(val & PIPECONF_ENABLE);
1304 
1305 		intel_display_power_put(dev_priv, power_domain);
1306 	} else {
1307 		cur_state = false;
1308 	}
1309 
1310 	I915_STATE_WARN(cur_state != state,
1311 	     "pipe %c assertion failure (expected %s, current %s)\n",
1312 			pipe_name(pipe), onoff(state), onoff(cur_state));
1313 }
1314 
1315 static void assert_plane(struct drm_i915_private *dev_priv,
1316 			 enum plane plane, bool state)
1317 {
1318 	u32 val;
1319 	bool cur_state;
1320 
1321 	val = I915_READ(DSPCNTR(plane));
1322 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1323 	I915_STATE_WARN(cur_state != state,
1324 	     "plane %c assertion failure (expected %s, current %s)\n",
1325 			plane_name(plane), onoff(state), onoff(cur_state));
1326 }
1327 
1328 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1329 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1330 
1331 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1332 				   enum i915_pipe pipe)
1333 {
1334 	struct drm_device *dev = dev_priv->dev;
1335 	int i;
1336 
1337 	/* Primary planes are fixed to pipes on gen4+ */
1338 	if (INTEL_INFO(dev)->gen >= 4) {
1339 		u32 val = I915_READ(DSPCNTR(pipe));
1340 		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1341 		     "plane %c assertion failure, should be disabled but not\n",
1342 		     plane_name(pipe));
1343 		return;
1344 	}
1345 
1346 	/* Need to check both planes against the pipe */
1347 	for_each_pipe(dev_priv, i) {
1348 		u32 val = I915_READ(DSPCNTR(i));
1349 		enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1350 			DISPPLANE_SEL_PIPE_SHIFT;
1351 		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1352 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1353 		     plane_name(i), pipe_name(pipe));
1354 	}
1355 }
1356 
1357 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1358 				    enum i915_pipe pipe)
1359 {
1360 	struct drm_device *dev = dev_priv->dev;
1361 	int sprite;
1362 
1363 	if (INTEL_INFO(dev)->gen >= 9) {
1364 		for_each_sprite(dev_priv, pipe, sprite) {
1365 			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1366 			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1367 			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1368 			     sprite, pipe_name(pipe));
1369 		}
1370 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1371 		for_each_sprite(dev_priv, pipe, sprite) {
1372 			u32 val = I915_READ(SPCNTR(pipe, sprite));
1373 			I915_STATE_WARN(val & SP_ENABLE,
1374 			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1375 			     sprite_name(pipe, sprite), pipe_name(pipe));
1376 		}
1377 	} else if (INTEL_INFO(dev)->gen >= 7) {
1378 		u32 val = I915_READ(SPRCTL(pipe));
1379 		I915_STATE_WARN(val & SPRITE_ENABLE,
1380 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1381 		     plane_name(pipe), pipe_name(pipe));
1382 	} else if (INTEL_INFO(dev)->gen >= 5) {
1383 		u32 val = I915_READ(DVSCNTR(pipe));
1384 		I915_STATE_WARN(val & DVS_ENABLE,
1385 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1386 		     plane_name(pipe), pipe_name(pipe));
1387 	}
1388 }
1389 
1390 static void assert_vblank_disabled(struct drm_crtc *crtc)
1391 {
1392 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1393 		drm_crtc_vblank_put(crtc);
1394 }
1395 
1396 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1397 				    enum i915_pipe pipe)
1398 {
1399 	u32 val;
1400 	bool enabled;
1401 
1402 	val = I915_READ(PCH_TRANSCONF(pipe));
1403 	enabled = !!(val & TRANS_ENABLE);
1404 	I915_STATE_WARN(enabled,
1405 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1406 	     pipe_name(pipe));
1407 }
1408 
1409 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1410 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1411 {
1412 	if ((val & DP_PORT_EN) == 0)
1413 		return false;
1414 
1415 	if (HAS_PCH_CPT(dev_priv)) {
1416 		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1417 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1418 			return false;
1419 	} else if (IS_CHERRYVIEW(dev_priv)) {
1420 		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1421 			return false;
1422 	} else {
1423 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1424 			return false;
1425 	}
1426 	return true;
1427 }
1428 
1429 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1430 			      enum i915_pipe pipe, u32 val)
1431 {
1432 	if ((val & SDVO_ENABLE) == 0)
1433 		return false;
1434 
1435 	if (HAS_PCH_CPT(dev_priv)) {
1436 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1437 			return false;
1438 	} else if (IS_CHERRYVIEW(dev_priv)) {
1439 		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1440 			return false;
1441 	} else {
1442 		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1443 			return false;
1444 	}
1445 	return true;
1446 }
1447 
1448 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1449 			      enum i915_pipe pipe, u32 val)
1450 {
1451 	if ((val & LVDS_PORT_EN) == 0)
1452 		return false;
1453 
1454 	if (HAS_PCH_CPT(dev_priv)) {
1455 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1456 			return false;
1457 	} else {
1458 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1459 			return false;
1460 	}
1461 	return true;
1462 }
1463 
1464 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1465 			      enum i915_pipe pipe, u32 val)
1466 {
1467 	if ((val & ADPA_DAC_ENABLE) == 0)
1468 		return false;
1469 	if (HAS_PCH_CPT(dev_priv)) {
1470 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1471 			return false;
1472 	} else {
1473 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1474 			return false;
1475 	}
1476 	return true;
1477 }
1478 
1479 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1480 				   enum i915_pipe pipe, i915_reg_t reg,
1481 				   u32 port_sel)
1482 {
1483 	u32 val = I915_READ(reg);
1484 	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1485 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1486 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1487 
1488 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1489 	     && (val & DP_PIPEB_SELECT),
1490 	     "IBX PCH dp port still using transcoder B\n");
1491 }
1492 
1493 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1494 				     enum i915_pipe pipe, i915_reg_t reg)
1495 {
1496 	u32 val = I915_READ(reg);
1497 	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1498 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1499 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1500 
1501 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1502 	     && (val & SDVO_PIPE_B_SELECT),
1503 	     "IBX PCH hdmi port still using transcoder B\n");
1504 }
1505 
1506 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1507 				      enum i915_pipe pipe)
1508 {
1509 	u32 val;
1510 
1511 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1512 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1513 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1514 
1515 	val = I915_READ(PCH_ADPA);
1516 	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1517 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1518 	     pipe_name(pipe));
1519 
1520 	val = I915_READ(PCH_LVDS);
1521 	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1522 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1523 	     pipe_name(pipe));
1524 
1525 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1526 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1527 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1528 }
1529 
1530 static void _vlv_enable_pll(struct intel_crtc *crtc,
1531 			    const struct intel_crtc_state *pipe_config)
1532 {
1533 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1534 	enum i915_pipe pipe = crtc->pipe;
1535 
1536 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1537 	POSTING_READ(DPLL(pipe));
1538 	udelay(150);
1539 
1540 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1541 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
1542 }
1543 
1544 static void vlv_enable_pll(struct intel_crtc *crtc,
1545 			   const struct intel_crtc_state *pipe_config)
1546 {
1547 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1548 	enum i915_pipe pipe = crtc->pipe;
1549 
1550 	assert_pipe_disabled(dev_priv, pipe);
1551 
1552 	/* PLL is protected by panel, make sure we can write it */
1553 	assert_panel_unlocked(dev_priv, pipe);
1554 
1555 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1556 		_vlv_enable_pll(crtc, pipe_config);
1557 
1558 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1559 	POSTING_READ(DPLL_MD(pipe));
1560 }
1561 
1562 
1563 static void _chv_enable_pll(struct intel_crtc *crtc,
1564 			    const struct intel_crtc_state *pipe_config)
1565 {
1566 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1567 	enum i915_pipe pipe = crtc->pipe;
1568 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1569 	u32 tmp;
1570 
1571 	mutex_lock(&dev_priv->sb_lock);
1572 
1573 	/* Enable back the 10bit clock to display controller */
1574 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1575 	tmp |= DPIO_DCLKP_EN;
1576 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1577 
1578 	mutex_unlock(&dev_priv->sb_lock);
1579 
1580 	/*
1581 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1582 	 */
1583 	udelay(1);
1584 
1585 	/* Enable PLL */
1586 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1587 
1588 	/* Check PLL is locked */
1589 	if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1590 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1591 }
1592 
1593 static void chv_enable_pll(struct intel_crtc *crtc,
1594 			   const struct intel_crtc_state *pipe_config)
1595 {
1596 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1597 	enum i915_pipe pipe = crtc->pipe;
1598 
1599 	assert_pipe_disabled(dev_priv, pipe);
1600 
1601 	/* PLL is protected by panel, make sure we can write it */
1602 	assert_panel_unlocked(dev_priv, pipe);
1603 
1604 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1605 		_chv_enable_pll(crtc, pipe_config);
1606 
1607 	if (pipe != PIPE_A) {
1608 		/*
1609 		 * WaPixelRepeatModeFixForC0:chv
1610 		 *
1611 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1612 		 * the value from DPLLBMD to either pipe B or C.
1613 		 */
1614 		I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1615 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1616 		I915_WRITE(CBR4_VLV, 0);
1617 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1618 
1619 		/*
1620 		 * DPLLB VGA mode also seems to cause problems.
1621 		 * We should always have it disabled.
1622 		 */
1623 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1624 	} else {
1625 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1626 		POSTING_READ(DPLL_MD(pipe));
1627 	}
1628 }
1629 
1630 static int intel_num_dvo_pipes(struct drm_device *dev)
1631 {
1632 	struct intel_crtc *crtc;
1633 	int count = 0;
1634 
1635 	for_each_intel_crtc(dev, crtc)
1636 		count += crtc->base.state->active &&
1637 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1638 
1639 	return count;
1640 }
1641 
1642 static void i9xx_enable_pll(struct intel_crtc *crtc)
1643 {
1644 	struct drm_device *dev = crtc->base.dev;
1645 	struct drm_i915_private *dev_priv = dev->dev_private;
1646 	i915_reg_t reg = DPLL(crtc->pipe);
1647 	u32 dpll = crtc->config->dpll_hw_state.dpll;
1648 
1649 	assert_pipe_disabled(dev_priv, crtc->pipe);
1650 
1651 	/* PLL is protected by panel, make sure we can write it */
1652 	if (IS_MOBILE(dev) && !IS_I830(dev))
1653 		assert_panel_unlocked(dev_priv, crtc->pipe);
1654 
1655 	/* Enable DVO 2x clock on both PLLs if necessary */
1656 	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1657 		/*
1658 		 * It appears to be important that we don't enable this
1659 		 * for the current pipe before otherwise configuring the
1660 		 * PLL. No idea how this should be handled if multiple
1661 		 * DVO outputs are enabled simultaneosly.
1662 		 */
1663 		dpll |= DPLL_DVO_2X_MODE;
1664 		I915_WRITE(DPLL(!crtc->pipe),
1665 			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1666 	}
1667 
1668 	/*
1669 	 * Apparently we need to have VGA mode enabled prior to changing
1670 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1671 	 * dividers, even though the register value does change.
1672 	 */
1673 	I915_WRITE(reg, 0);
1674 
1675 	I915_WRITE(reg, dpll);
1676 
1677 	/* Wait for the clocks to stabilize. */
1678 	POSTING_READ(reg);
1679 	udelay(150);
1680 
1681 	if (INTEL_INFO(dev)->gen >= 4) {
1682 		I915_WRITE(DPLL_MD(crtc->pipe),
1683 			   crtc->config->dpll_hw_state.dpll_md);
1684 	} else {
1685 		/* The pixel multiplier can only be updated once the
1686 		 * DPLL is enabled and the clocks are stable.
1687 		 *
1688 		 * So write it again.
1689 		 */
1690 		I915_WRITE(reg, dpll);
1691 	}
1692 
1693 	/* We do this three times for luck */
1694 	I915_WRITE(reg, dpll);
1695 	POSTING_READ(reg);
1696 	udelay(150); /* wait for warmup */
1697 	I915_WRITE(reg, dpll);
1698 	POSTING_READ(reg);
1699 	udelay(150); /* wait for warmup */
1700 	I915_WRITE(reg, dpll);
1701 	POSTING_READ(reg);
1702 	udelay(150); /* wait for warmup */
1703 }
1704 
1705 /**
1706  * i9xx_disable_pll - disable a PLL
1707  * @dev_priv: i915 private structure
1708  * @pipe: pipe PLL to disable
1709  *
1710  * Disable the PLL for @pipe, making sure the pipe is off first.
1711  *
1712  * Note!  This is for pre-ILK only.
1713  */
1714 static void i9xx_disable_pll(struct intel_crtc *crtc)
1715 {
1716 	struct drm_device *dev = crtc->base.dev;
1717 	struct drm_i915_private *dev_priv = dev->dev_private;
1718 	enum i915_pipe pipe = crtc->pipe;
1719 
1720 	/* Disable DVO 2x clock on both PLLs if necessary */
1721 	if (IS_I830(dev) &&
1722 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1723 	    !intel_num_dvo_pipes(dev)) {
1724 		I915_WRITE(DPLL(PIPE_B),
1725 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1726 		I915_WRITE(DPLL(PIPE_A),
1727 			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1728 	}
1729 
1730 	/* Don't disable pipe or pipe PLLs if needed */
1731 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1732 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1733 		return;
1734 
1735 	/* Make sure the pipe isn't still relying on us */
1736 	assert_pipe_disabled(dev_priv, pipe);
1737 
1738 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1739 	POSTING_READ(DPLL(pipe));
1740 }
1741 
1742 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1743 {
1744 	u32 val;
1745 
1746 	/* Make sure the pipe isn't still relying on us */
1747 	assert_pipe_disabled(dev_priv, pipe);
1748 
1749 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1750 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1751 	if (pipe != PIPE_A)
1752 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1753 
1754 	I915_WRITE(DPLL(pipe), val);
1755 	POSTING_READ(DPLL(pipe));
1756 }
1757 
1758 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1759 {
1760 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1761 	u32 val;
1762 
1763 	/* Make sure the pipe isn't still relying on us */
1764 	assert_pipe_disabled(dev_priv, pipe);
1765 
1766 	val = DPLL_SSC_REF_CLK_CHV |
1767 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1768 	if (pipe != PIPE_A)
1769 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1770 
1771 	I915_WRITE(DPLL(pipe), val);
1772 	POSTING_READ(DPLL(pipe));
1773 
1774 	mutex_lock(&dev_priv->sb_lock);
1775 
1776 	/* Disable 10bit clock to display controller */
1777 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1778 	val &= ~DPIO_DCLKP_EN;
1779 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1780 
1781 	mutex_unlock(&dev_priv->sb_lock);
1782 }
1783 
1784 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1785 			 struct intel_digital_port *dport,
1786 			 unsigned int expected_mask)
1787 {
1788 	u32 port_mask;
1789 	i915_reg_t dpll_reg;
1790 
1791 	switch (dport->port) {
1792 	case PORT_B:
1793 		port_mask = DPLL_PORTB_READY_MASK;
1794 		dpll_reg = DPLL(0);
1795 		break;
1796 	case PORT_C:
1797 		port_mask = DPLL_PORTC_READY_MASK;
1798 		dpll_reg = DPLL(0);
1799 		expected_mask <<= 4;
1800 		break;
1801 	case PORT_D:
1802 		port_mask = DPLL_PORTD_READY_MASK;
1803 		dpll_reg = DPIO_PHY_STATUS;
1804 		break;
1805 	default:
1806 		BUG();
1807 	}
1808 
1809 	if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1810 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1811 		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1812 }
1813 
1814 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1815 					   enum i915_pipe pipe)
1816 {
1817 	struct drm_device *dev = dev_priv->dev;
1818 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1819 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1820 	i915_reg_t reg;
1821 	uint32_t val, pipeconf_val;
1822 
1823 	/* Make sure PCH DPLL is enabled */
1824 	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1825 
1826 	/* FDI must be feeding us bits for PCH ports */
1827 	assert_fdi_tx_enabled(dev_priv, pipe);
1828 	assert_fdi_rx_enabled(dev_priv, pipe);
1829 
1830 	if (HAS_PCH_CPT(dev)) {
1831 		/* Workaround: Set the timing override bit before enabling the
1832 		 * pch transcoder. */
1833 		reg = TRANS_CHICKEN2(pipe);
1834 		val = I915_READ(reg);
1835 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1836 		I915_WRITE(reg, val);
1837 	}
1838 
1839 	reg = PCH_TRANSCONF(pipe);
1840 	val = I915_READ(reg);
1841 	pipeconf_val = I915_READ(PIPECONF(pipe));
1842 
1843 	if (HAS_PCH_IBX(dev_priv)) {
1844 		/*
1845 		 * Make the BPC in transcoder be consistent with
1846 		 * that in pipeconf reg. For HDMI we must use 8bpc
1847 		 * here for both 8bpc and 12bpc.
1848 		 */
1849 		val &= ~PIPECONF_BPC_MASK;
1850 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1851 			val |= PIPECONF_8BPC;
1852 		else
1853 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1854 	}
1855 
1856 	val &= ~TRANS_INTERLACE_MASK;
1857 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1858 		if (HAS_PCH_IBX(dev_priv) &&
1859 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1860 			val |= TRANS_LEGACY_INTERLACED_ILK;
1861 		else
1862 			val |= TRANS_INTERLACED;
1863 	else
1864 		val |= TRANS_PROGRESSIVE;
1865 
1866 	I915_WRITE(reg, val | TRANS_ENABLE);
1867 	if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1868 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1869 }
1870 
1871 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1872 				      enum transcoder cpu_transcoder)
1873 {
1874 	u32 val, pipeconf_val;
1875 
1876 	/* FDI must be feeding us bits for PCH ports */
1877 	assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1878 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1879 
1880 	/* Workaround: set timing override bit. */
1881 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1882 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1883 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1884 
1885 	val = TRANS_ENABLE;
1886 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1887 
1888 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1889 	    PIPECONF_INTERLACED_ILK)
1890 		val |= TRANS_INTERLACED;
1891 	else
1892 		val |= TRANS_PROGRESSIVE;
1893 
1894 	I915_WRITE(LPT_TRANSCONF, val);
1895 	if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1896 		DRM_ERROR("Failed to enable PCH transcoder\n");
1897 }
1898 
1899 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1900 					    enum i915_pipe pipe)
1901 {
1902 	struct drm_device *dev = dev_priv->dev;
1903 	i915_reg_t reg;
1904 	uint32_t val;
1905 
1906 	/* FDI relies on the transcoder */
1907 	assert_fdi_tx_disabled(dev_priv, pipe);
1908 	assert_fdi_rx_disabled(dev_priv, pipe);
1909 
1910 	/* Ports must be off as well */
1911 	assert_pch_ports_disabled(dev_priv, pipe);
1912 
1913 	reg = PCH_TRANSCONF(pipe);
1914 	val = I915_READ(reg);
1915 	val &= ~TRANS_ENABLE;
1916 	I915_WRITE(reg, val);
1917 	/* wait for PCH transcoder off, transcoder state */
1918 	if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1919 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1920 
1921 	if (HAS_PCH_CPT(dev)) {
1922 		/* Workaround: Clear the timing override chicken bit again. */
1923 		reg = TRANS_CHICKEN2(pipe);
1924 		val = I915_READ(reg);
1925 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1926 		I915_WRITE(reg, val);
1927 	}
1928 }
1929 
1930 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1931 {
1932 	u32 val;
1933 
1934 	val = I915_READ(LPT_TRANSCONF);
1935 	val &= ~TRANS_ENABLE;
1936 	I915_WRITE(LPT_TRANSCONF, val);
1937 	/* wait for PCH transcoder off, transcoder state */
1938 	if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1939 		DRM_ERROR("Failed to disable PCH transcoder\n");
1940 
1941 	/* Workaround: clear timing override bit. */
1942 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1943 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1944 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1945 }
1946 
1947 /**
1948  * intel_enable_pipe - enable a pipe, asserting requirements
1949  * @crtc: crtc responsible for the pipe
1950  *
1951  * Enable @crtc's pipe, making sure that various hardware specific requirements
1952  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1953  */
1954 static void intel_enable_pipe(struct intel_crtc *crtc)
1955 {
1956 	struct drm_device *dev = crtc->base.dev;
1957 	struct drm_i915_private *dev_priv = dev->dev_private;
1958 	enum i915_pipe pipe = crtc->pipe;
1959 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1960 	enum i915_pipe pch_transcoder;
1961 	i915_reg_t reg;
1962 	u32 val;
1963 
1964 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1965 
1966 	assert_planes_disabled(dev_priv, pipe);
1967 	assert_cursor_disabled(dev_priv, pipe);
1968 	assert_sprites_disabled(dev_priv, pipe);
1969 
1970 	if (HAS_PCH_LPT(dev_priv))
1971 		pch_transcoder = TRANSCODER_A;
1972 	else
1973 		pch_transcoder = pipe;
1974 
1975 	/*
1976 	 * A pipe without a PLL won't actually be able to drive bits from
1977 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
1978 	 * need the check.
1979 	 */
1980 	if (HAS_GMCH_DISPLAY(dev_priv))
1981 		if (crtc->config->has_dsi_encoder)
1982 			assert_dsi_pll_enabled(dev_priv);
1983 		else
1984 			assert_pll_enabled(dev_priv, pipe);
1985 	else {
1986 		if (crtc->config->has_pch_encoder) {
1987 			/* if driving the PCH, we need FDI enabled */
1988 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1989 			assert_fdi_tx_pll_enabled(dev_priv,
1990 						  (enum i915_pipe) cpu_transcoder);
1991 		}
1992 		/* FIXME: assert CPU port conditions for SNB+ */
1993 	}
1994 
1995 	reg = PIPECONF(cpu_transcoder);
1996 	val = I915_READ(reg);
1997 	if (val & PIPECONF_ENABLE) {
1998 		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1999 			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2000 		return;
2001 	}
2002 
2003 	I915_WRITE(reg, val | PIPECONF_ENABLE);
2004 	POSTING_READ(reg);
2005 
2006 	/*
2007 	 * Until the pipe starts DSL will read as 0, which would cause
2008 	 * an apparent vblank timestamp jump, which messes up also the
2009 	 * frame count when it's derived from the timestamps. So let's
2010 	 * wait for the pipe to start properly before we call
2011 	 * drm_crtc_vblank_on()
2012 	 */
2013 	if (dev->max_vblank_count == 0 &&
2014 	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2015 		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2016 }
2017 
2018 /**
2019  * intel_disable_pipe - disable a pipe, asserting requirements
2020  * @crtc: crtc whose pipes is to be disabled
2021  *
2022  * Disable the pipe of @crtc, making sure that various hardware
2023  * specific requirements are met, if applicable, e.g. plane
2024  * disabled, panel fitter off, etc.
2025  *
2026  * Will wait until the pipe has shut down before returning.
2027  */
2028 static void intel_disable_pipe(struct intel_crtc *crtc)
2029 {
2030 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2031 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2032 	enum i915_pipe pipe = crtc->pipe;
2033 	i915_reg_t reg;
2034 	u32 val;
2035 
2036 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2037 
2038 	/*
2039 	 * Make sure planes won't keep trying to pump pixels to us,
2040 	 * or we might hang the display.
2041 	 */
2042 	assert_planes_disabled(dev_priv, pipe);
2043 	assert_cursor_disabled(dev_priv, pipe);
2044 	assert_sprites_disabled(dev_priv, pipe);
2045 
2046 	reg = PIPECONF(cpu_transcoder);
2047 	val = I915_READ(reg);
2048 	if ((val & PIPECONF_ENABLE) == 0)
2049 		return;
2050 
2051 	/*
2052 	 * Double wide has implications for planes
2053 	 * so best keep it disabled when not needed.
2054 	 */
2055 	if (crtc->config->double_wide)
2056 		val &= ~PIPECONF_DOUBLE_WIDE;
2057 
2058 	/* Don't disable pipe or pipe PLLs if needed */
2059 	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2060 	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2061 		val &= ~PIPECONF_ENABLE;
2062 
2063 	I915_WRITE(reg, val);
2064 	if ((val & PIPECONF_ENABLE) == 0)
2065 		intel_wait_for_pipe_off(crtc);
2066 }
2067 
2068 static bool need_vtd_wa(struct drm_device *dev)
2069 {
2070 #ifdef CONFIG_INTEL_IOMMU
2071 	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2072 		return true;
2073 #endif
2074 	return false;
2075 }
2076 
2077 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2078 {
2079 	return IS_GEN2(dev_priv) ? 2048 : 4096;
2080 }
2081 
2082 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2083 					   uint64_t fb_modifier, unsigned int cpp)
2084 {
2085 	switch (fb_modifier) {
2086 	case DRM_FORMAT_MOD_NONE:
2087 		return cpp;
2088 	case I915_FORMAT_MOD_X_TILED:
2089 		if (IS_GEN2(dev_priv))
2090 			return 128;
2091 		else
2092 			return 512;
2093 	case I915_FORMAT_MOD_Y_TILED:
2094 		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2095 			return 128;
2096 		else
2097 			return 512;
2098 	case I915_FORMAT_MOD_Yf_TILED:
2099 		switch (cpp) {
2100 		case 1:
2101 			return 64;
2102 		case 2:
2103 		case 4:
2104 			return 128;
2105 		case 8:
2106 		case 16:
2107 			return 256;
2108 		default:
2109 			MISSING_CASE(cpp);
2110 			return cpp;
2111 		}
2112 		break;
2113 	default:
2114 		MISSING_CASE(fb_modifier);
2115 		return cpp;
2116 	}
2117 }
2118 
2119 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2120 			       uint64_t fb_modifier, unsigned int cpp)
2121 {
2122 	if (fb_modifier == DRM_FORMAT_MOD_NONE)
2123 		return 1;
2124 	else
2125 		return intel_tile_size(dev_priv) /
2126 			intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2127 }
2128 
2129 /* Return the tile dimensions in pixel units */
2130 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2131 			    unsigned int *tile_width,
2132 			    unsigned int *tile_height,
2133 			    uint64_t fb_modifier,
2134 			    unsigned int cpp)
2135 {
2136 	unsigned int tile_width_bytes =
2137 		intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2138 
2139 	*tile_width = tile_width_bytes / cpp;
2140 	*tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2141 }
2142 
2143 unsigned int
2144 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2145 		      uint32_t pixel_format, uint64_t fb_modifier)
2146 {
2147 	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2148 	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2149 
2150 	return ALIGN(height, tile_height);
2151 }
2152 
2153 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2154 {
2155 	unsigned int size = 0;
2156 	int i;
2157 
2158 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2159 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2160 
2161 	return size;
2162 }
2163 
2164 static void
2165 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2166 			struct drm_framebuffer *fb,
2167 			unsigned int rotation)
2168 {
2169 	if (intel_rotation_90_or_270(rotation)) {
2170 		*view = i915_ggtt_view_rotated;
2171 		view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2172 	} else {
2173 		*view = i915_ggtt_view_normal;
2174 	}
2175 }
2176 
2177 static void
2178 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2179 		   struct drm_framebuffer *fb)
2180 {
2181 	struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2182 	unsigned int tile_size, tile_width, tile_height, cpp;
2183 
2184 	tile_size = intel_tile_size(dev_priv);
2185 
2186 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2187 	intel_tile_dims(dev_priv, &tile_width, &tile_height,
2188 			fb->modifier[0], cpp);
2189 
2190 	info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2191 	info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
2192 
2193 	if (info->pixel_format == DRM_FORMAT_NV12) {
2194 		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2195 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2196 				fb->modifier[1], cpp);
2197 
2198 		info->uv_offset = fb->offsets[1];
2199 		info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2200 		info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
2201 	}
2202 }
2203 
2204 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2205 {
2206 	if (INTEL_INFO(dev_priv)->gen >= 9)
2207 		return 256 * 1024;
2208 	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2209 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2210 		return 128 * 1024;
2211 	else if (INTEL_INFO(dev_priv)->gen >= 4)
2212 		return 4 * 1024;
2213 	else
2214 		return 0;
2215 }
2216 
2217 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2218 					 uint64_t fb_modifier)
2219 {
2220 	switch (fb_modifier) {
2221 	case DRM_FORMAT_MOD_NONE:
2222 		return intel_linear_alignment(dev_priv);
2223 	case I915_FORMAT_MOD_X_TILED:
2224 		if (INTEL_INFO(dev_priv)->gen >= 9)
2225 			return 256 * 1024;
2226 		return 0;
2227 	case I915_FORMAT_MOD_Y_TILED:
2228 	case I915_FORMAT_MOD_Yf_TILED:
2229 		return 1 * 1024 * 1024;
2230 	default:
2231 		MISSING_CASE(fb_modifier);
2232 		return 0;
2233 	}
2234 }
2235 
2236 int
2237 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2238 			   unsigned int rotation)
2239 {
2240 	struct drm_device *dev = fb->dev;
2241 	struct drm_i915_private *dev_priv = dev->dev_private;
2242 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2243 	struct i915_ggtt_view view;
2244 	u32 alignment;
2245 	int ret;
2246 
2247 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2248 
2249 	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2250 
2251 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2252 
2253 	/* Note that the w/a also requires 64 PTE of padding following the
2254 	 * bo. We currently fill all unused PTE with the shadow page and so
2255 	 * we should always have valid PTE following the scanout preventing
2256 	 * the VT-d warning.
2257 	 */
2258 	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2259 		alignment = 256 * 1024;
2260 
2261 	/*
2262 	 * Global gtt pte registers are special registers which actually forward
2263 	 * writes to a chunk of system memory. Which means that there is no risk
2264 	 * that the register values disappear as soon as we call
2265 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2266 	 * pin/unpin/fence and not more.
2267 	 */
2268 	intel_runtime_pm_get(dev_priv);
2269 
2270 	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2271 						   &view);
2272 	if (ret)
2273 		goto err_pm;
2274 
2275 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2276 	 * fence, whereas 965+ only requires a fence if using
2277 	 * framebuffer compression.  For simplicity, we always install
2278 	 * a fence as the cost is not that onerous.
2279 	 */
2280 	if (view.type == I915_GGTT_VIEW_NORMAL) {
2281 		ret = i915_gem_object_get_fence(obj);
2282 		if (ret == -EDEADLK) {
2283 			/*
2284 			 * -EDEADLK means there are no free fences
2285 			 * no pending flips.
2286 			 *
2287 			 * This is propagated to atomic, but it uses
2288 			 * -EDEADLK to force a locking recovery, so
2289 			 * change the returned error to -EBUSY.
2290 			 */
2291 			ret = -EBUSY;
2292 			goto err_unpin;
2293 		} else if (ret)
2294 			goto err_unpin;
2295 
2296 		i915_gem_object_pin_fence(obj);
2297 	}
2298 
2299 	intel_runtime_pm_put(dev_priv);
2300 	return 0;
2301 
2302 err_unpin:
2303 	i915_gem_object_unpin_from_display_plane(obj, &view);
2304 err_pm:
2305 	intel_runtime_pm_put(dev_priv);
2306 	return ret;
2307 }
2308 
2309 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2310 {
2311 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2312 	struct i915_ggtt_view view;
2313 
2314 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2315 
2316 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2317 
2318 	if (view.type == I915_GGTT_VIEW_NORMAL)
2319 		i915_gem_object_unpin_fence(obj);
2320 
2321 	i915_gem_object_unpin_from_display_plane(obj, &view);
2322 }
2323 
2324 /*
2325  * Adjust the tile offset by moving the difference into
2326  * the x/y offsets.
2327  *
2328  * Input tile dimensions and pitch must already be
2329  * rotated to match x and y, and in pixel units.
2330  */
2331 static u32 intel_adjust_tile_offset(int *x, int *y,
2332 				    unsigned int tile_width,
2333 				    unsigned int tile_height,
2334 				    unsigned int tile_size,
2335 				    unsigned int pitch_tiles,
2336 				    u32 old_offset,
2337 				    u32 new_offset)
2338 {
2339 	unsigned int tiles;
2340 
2341 	WARN_ON(old_offset & (tile_size - 1));
2342 	WARN_ON(new_offset & (tile_size - 1));
2343 	WARN_ON(new_offset > old_offset);
2344 
2345 	tiles = (old_offset - new_offset) / tile_size;
2346 
2347 	*y += tiles / pitch_tiles * tile_height;
2348 	*x += tiles % pitch_tiles * tile_width;
2349 
2350 	return new_offset;
2351 }
2352 
2353 /*
2354  * Computes the linear offset to the base tile and adjusts
2355  * x, y. bytes per pixel is assumed to be a power-of-two.
2356  *
2357  * In the 90/270 rotated case, x and y are assumed
2358  * to be already rotated to match the rotated GTT view, and
2359  * pitch is the tile_height aligned framebuffer height.
2360  */
2361 u32 intel_compute_tile_offset(int *x, int *y,
2362 			      const struct drm_framebuffer *fb, int plane,
2363 			      unsigned int pitch,
2364 			      unsigned int rotation)
2365 {
2366 	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2367 	uint64_t fb_modifier = fb->modifier[plane];
2368 	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2369 	u32 offset, offset_aligned, alignment;
2370 
2371 	alignment = intel_surf_alignment(dev_priv, fb_modifier);
2372 	if (alignment)
2373 		alignment--;
2374 
2375 	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2376 		unsigned int tile_size, tile_width, tile_height;
2377 		unsigned int tile_rows, tiles, pitch_tiles;
2378 
2379 		tile_size = intel_tile_size(dev_priv);
2380 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2381 				fb_modifier, cpp);
2382 
2383 		if (intel_rotation_90_or_270(rotation)) {
2384 			pitch_tiles = pitch / tile_height;
2385 			swap(tile_width, tile_height);
2386 		} else {
2387 			pitch_tiles = pitch / (tile_width * cpp);
2388 		}
2389 
2390 		tile_rows = *y / tile_height;
2391 		*y %= tile_height;
2392 
2393 		tiles = *x / tile_width;
2394 		*x %= tile_width;
2395 
2396 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2397 		offset_aligned = offset & ~alignment;
2398 
2399 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2400 					 tile_size, pitch_tiles,
2401 					 offset, offset_aligned);
2402 	} else {
2403 		offset = *y * pitch + *x * cpp;
2404 		offset_aligned = offset & ~alignment;
2405 
2406 		*y = (offset & alignment) / pitch;
2407 		*x = ((offset & alignment) - *y * pitch) / cpp;
2408 	}
2409 
2410 	return offset_aligned;
2411 }
2412 
2413 static int i9xx_format_to_fourcc(int format)
2414 {
2415 	switch (format) {
2416 	case DISPPLANE_8BPP:
2417 		return DRM_FORMAT_C8;
2418 	case DISPPLANE_BGRX555:
2419 		return DRM_FORMAT_XRGB1555;
2420 	case DISPPLANE_BGRX565:
2421 		return DRM_FORMAT_RGB565;
2422 	default:
2423 	case DISPPLANE_BGRX888:
2424 		return DRM_FORMAT_XRGB8888;
2425 	case DISPPLANE_RGBX888:
2426 		return DRM_FORMAT_XBGR8888;
2427 	case DISPPLANE_BGRX101010:
2428 		return DRM_FORMAT_XRGB2101010;
2429 	case DISPPLANE_RGBX101010:
2430 		return DRM_FORMAT_XBGR2101010;
2431 	}
2432 }
2433 
2434 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2435 {
2436 	switch (format) {
2437 	case PLANE_CTL_FORMAT_RGB_565:
2438 		return DRM_FORMAT_RGB565;
2439 	default:
2440 	case PLANE_CTL_FORMAT_XRGB_8888:
2441 		if (rgb_order) {
2442 			if (alpha)
2443 				return DRM_FORMAT_ABGR8888;
2444 			else
2445 				return DRM_FORMAT_XBGR8888;
2446 		} else {
2447 			if (alpha)
2448 				return DRM_FORMAT_ARGB8888;
2449 			else
2450 				return DRM_FORMAT_XRGB8888;
2451 		}
2452 	case PLANE_CTL_FORMAT_XRGB_2101010:
2453 		if (rgb_order)
2454 			return DRM_FORMAT_XBGR2101010;
2455 		else
2456 			return DRM_FORMAT_XRGB2101010;
2457 	}
2458 }
2459 
2460 static bool
2461 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2462 			      struct intel_initial_plane_config *plane_config)
2463 {
2464 	struct drm_device *dev = crtc->base.dev;
2465 	struct drm_i915_private *dev_priv = to_i915(dev);
2466 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2467 	struct drm_i915_gem_object *obj = NULL;
2468 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2469 	struct drm_framebuffer *fb = &plane_config->fb->base;
2470 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2471 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2472 				    PAGE_SIZE);
2473 
2474 	size_aligned -= base_aligned;
2475 
2476 	if (plane_config->size == 0)
2477 		return false;
2478 
2479 	/* If the FB is too big, just don't use it since fbdev is not very
2480 	 * important and we should probably use that space with FBC or other
2481 	 * features. */
2482 	if (size_aligned * 2 > ggtt->stolen_usable_size)
2483 		return false;
2484 
2485 	mutex_lock(&dev->struct_mutex);
2486 
2487 	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2488 							     base_aligned,
2489 							     base_aligned,
2490 							     size_aligned);
2491 	if (!obj) {
2492 		mutex_unlock(&dev->struct_mutex);
2493 		return false;
2494 	}
2495 
2496 	obj->tiling_mode = plane_config->tiling;
2497 	if (obj->tiling_mode == I915_TILING_X)
2498 		obj->stride = fb->pitches[0];
2499 
2500 	mode_cmd.pixel_format = fb->pixel_format;
2501 	mode_cmd.width = fb->width;
2502 	mode_cmd.height = fb->height;
2503 	mode_cmd.pitches[0] = fb->pitches[0];
2504 	mode_cmd.modifier[0] = fb->modifier[0];
2505 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2506 
2507 	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2508 				   &mode_cmd, obj)) {
2509 		DRM_DEBUG_KMS("intel fb init failed\n");
2510 		goto out_unref_obj;
2511 	}
2512 
2513 	mutex_unlock(&dev->struct_mutex);
2514 
2515 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2516 	return true;
2517 
2518 out_unref_obj:
2519 	drm_gem_object_unreference(&obj->base);
2520 	mutex_unlock(&dev->struct_mutex);
2521 	return false;
2522 }
2523 
2524 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2525 static void
2526 update_state_fb(struct drm_plane *plane)
2527 {
2528 	if (plane->fb == plane->state->fb)
2529 		return;
2530 
2531 	if (plane->state->fb)
2532 		drm_framebuffer_unreference(plane->state->fb);
2533 	plane->state->fb = plane->fb;
2534 	if (plane->state->fb)
2535 		drm_framebuffer_reference(plane->state->fb);
2536 }
2537 
2538 static void
2539 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2540 			     struct intel_initial_plane_config *plane_config)
2541 {
2542 	struct drm_device *dev = intel_crtc->base.dev;
2543 	struct drm_i915_private *dev_priv = dev->dev_private;
2544 	struct drm_crtc *c;
2545 	struct intel_crtc *i;
2546 	struct drm_i915_gem_object *obj;
2547 	struct drm_plane *primary = intel_crtc->base.primary;
2548 	struct drm_plane_state *plane_state = primary->state;
2549 	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2550 	struct intel_plane *intel_plane = to_intel_plane(primary);
2551 	struct intel_plane_state *intel_state =
2552 		to_intel_plane_state(plane_state);
2553 	struct drm_framebuffer *fb;
2554 
2555 	if (!plane_config->fb)
2556 		return;
2557 
2558 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2559 		fb = &plane_config->fb->base;
2560 		goto valid_fb;
2561 	}
2562 
2563 	kfree(plane_config->fb);
2564 
2565 	/*
2566 	 * Failed to alloc the obj, check to see if we should share
2567 	 * an fb with another CRTC instead
2568 	 */
2569 	for_each_crtc(dev, c) {
2570 		i = to_intel_crtc(c);
2571 
2572 		if (c == &intel_crtc->base)
2573 			continue;
2574 
2575 		if (!i->active)
2576 			continue;
2577 
2578 		fb = c->primary->fb;
2579 		if (!fb)
2580 			continue;
2581 
2582 		obj = intel_fb_obj(fb);
2583 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2584 			drm_framebuffer_reference(fb);
2585 			goto valid_fb;
2586 		}
2587 	}
2588 
2589 	/*
2590 	 * We've failed to reconstruct the BIOS FB.  Current display state
2591 	 * indicates that the primary plane is visible, but has a NULL FB,
2592 	 * which will lead to problems later if we don't fix it up.  The
2593 	 * simplest solution is to just disable the primary plane now and
2594 	 * pretend the BIOS never had it enabled.
2595 	 */
2596 	to_intel_plane_state(plane_state)->visible = false;
2597 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2598 	intel_pre_disable_primary_noatomic(&intel_crtc->base);
2599 	intel_plane->disable_plane(primary, &intel_crtc->base);
2600 
2601 	return;
2602 
2603 valid_fb:
2604 	plane_state->src_x = 0;
2605 	plane_state->src_y = 0;
2606 	plane_state->src_w = fb->width << 16;
2607 	plane_state->src_h = fb->height << 16;
2608 
2609 	plane_state->crtc_x = 0;
2610 	plane_state->crtc_y = 0;
2611 	plane_state->crtc_w = fb->width;
2612 	plane_state->crtc_h = fb->height;
2613 
2614 	intel_state->src.x1 = plane_state->src_x;
2615 	intel_state->src.y1 = plane_state->src_y;
2616 	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2617 	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2618 	intel_state->dst.x1 = plane_state->crtc_x;
2619 	intel_state->dst.y1 = plane_state->crtc_y;
2620 	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2621 	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2622 
2623 	obj = intel_fb_obj(fb);
2624 	if (obj->tiling_mode != I915_TILING_NONE)
2625 		dev_priv->preserve_bios_swizzle = true;
2626 
2627 	drm_framebuffer_reference(fb);
2628 	primary->fb = primary->state->fb = fb;
2629 	primary->crtc = primary->state->crtc = &intel_crtc->base;
2630 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2631 	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2632 }
2633 
2634 static void i9xx_update_primary_plane(struct drm_plane *primary,
2635 				      const struct intel_crtc_state *crtc_state,
2636 				      const struct intel_plane_state *plane_state)
2637 {
2638 	struct drm_device *dev = primary->dev;
2639 	struct drm_i915_private *dev_priv = dev->dev_private;
2640 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2641 	struct drm_framebuffer *fb = plane_state->base.fb;
2642 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2643 	int plane = intel_crtc->plane;
2644 	u32 linear_offset;
2645 	u32 dspcntr;
2646 	i915_reg_t reg = DSPCNTR(plane);
2647 	unsigned int rotation = plane_state->base.rotation;
2648 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2649 	int x = plane_state->src.x1 >> 16;
2650 	int y = plane_state->src.y1 >> 16;
2651 
2652 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2653 
2654 	dspcntr |= DISPLAY_PLANE_ENABLE;
2655 
2656 	if (INTEL_INFO(dev)->gen < 4) {
2657 		if (intel_crtc->pipe == PIPE_B)
2658 			dspcntr |= DISPPLANE_SEL_PIPE_B;
2659 
2660 		/* pipesrc and dspsize control the size that is scaled from,
2661 		 * which should always be the user's requested size.
2662 		 */
2663 		I915_WRITE(DSPSIZE(plane),
2664 			   ((crtc_state->pipe_src_h - 1) << 16) |
2665 			   (crtc_state->pipe_src_w - 1));
2666 		I915_WRITE(DSPPOS(plane), 0);
2667 	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2668 		I915_WRITE(PRIMSIZE(plane),
2669 			   ((crtc_state->pipe_src_h - 1) << 16) |
2670 			   (crtc_state->pipe_src_w - 1));
2671 		I915_WRITE(PRIMPOS(plane), 0);
2672 		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2673 	}
2674 
2675 	switch (fb->pixel_format) {
2676 	case DRM_FORMAT_C8:
2677 		dspcntr |= DISPPLANE_8BPP;
2678 		break;
2679 	case DRM_FORMAT_XRGB1555:
2680 		dspcntr |= DISPPLANE_BGRX555;
2681 		break;
2682 	case DRM_FORMAT_RGB565:
2683 		dspcntr |= DISPPLANE_BGRX565;
2684 		break;
2685 	case DRM_FORMAT_XRGB8888:
2686 		dspcntr |= DISPPLANE_BGRX888;
2687 		break;
2688 	case DRM_FORMAT_XBGR8888:
2689 		dspcntr |= DISPPLANE_RGBX888;
2690 		break;
2691 	case DRM_FORMAT_XRGB2101010:
2692 		dspcntr |= DISPPLANE_BGRX101010;
2693 		break;
2694 	case DRM_FORMAT_XBGR2101010:
2695 		dspcntr |= DISPPLANE_RGBX101010;
2696 		break;
2697 	default:
2698 		BUG();
2699 	}
2700 
2701 	if (INTEL_INFO(dev)->gen >= 4 &&
2702 	    obj->tiling_mode != I915_TILING_NONE)
2703 		dspcntr |= DISPPLANE_TILED;
2704 
2705 	if (IS_G4X(dev))
2706 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2707 
2708 	linear_offset = y * fb->pitches[0] + x * cpp;
2709 
2710 	if (INTEL_INFO(dev)->gen >= 4) {
2711 		intel_crtc->dspaddr_offset =
2712 			intel_compute_tile_offset(&x, &y, fb, 0,
2713 						  fb->pitches[0], rotation);
2714 		linear_offset -= intel_crtc->dspaddr_offset;
2715 	} else {
2716 		intel_crtc->dspaddr_offset = linear_offset;
2717 	}
2718 
2719 	if (rotation == BIT(DRM_ROTATE_180)) {
2720 		dspcntr |= DISPPLANE_ROTATE_180;
2721 
2722 		x += (crtc_state->pipe_src_w - 1);
2723 		y += (crtc_state->pipe_src_h - 1);
2724 
2725 		/* Finding the last pixel of the last line of the display
2726 		data and adding to linear_offset*/
2727 		linear_offset +=
2728 			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2729 			(crtc_state->pipe_src_w - 1) * cpp;
2730 	}
2731 
2732 	intel_crtc->adjusted_x = x;
2733 	intel_crtc->adjusted_y = y;
2734 
2735 	I915_WRITE(reg, dspcntr);
2736 
2737 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2738 	if (INTEL_INFO(dev)->gen >= 4) {
2739 		I915_WRITE(DSPSURF(plane),
2740 			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2741 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2742 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2743 	} else
2744 		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2745 	POSTING_READ(reg);
2746 }
2747 
2748 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2749 				       struct drm_crtc *crtc)
2750 {
2751 	struct drm_device *dev = crtc->dev;
2752 	struct drm_i915_private *dev_priv = dev->dev_private;
2753 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2754 	int plane = intel_crtc->plane;
2755 
2756 	I915_WRITE(DSPCNTR(plane), 0);
2757 	if (INTEL_INFO(dev_priv)->gen >= 4)
2758 		I915_WRITE(DSPSURF(plane), 0);
2759 	else
2760 		I915_WRITE(DSPADDR(plane), 0);
2761 	POSTING_READ(DSPCNTR(plane));
2762 }
2763 
2764 static void ironlake_update_primary_plane(struct drm_plane *primary,
2765 					  const struct intel_crtc_state *crtc_state,
2766 					  const struct intel_plane_state *plane_state)
2767 {
2768 	struct drm_device *dev = primary->dev;
2769 	struct drm_i915_private *dev_priv = dev->dev_private;
2770 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2771 	struct drm_framebuffer *fb = plane_state->base.fb;
2772 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2773 	int plane = intel_crtc->plane;
2774 	u32 linear_offset;
2775 	u32 dspcntr;
2776 	i915_reg_t reg = DSPCNTR(plane);
2777 	unsigned int rotation = plane_state->base.rotation;
2778 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2779 	int x = plane_state->src.x1 >> 16;
2780 	int y = plane_state->src.y1 >> 16;
2781 
2782 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2783 	dspcntr |= DISPLAY_PLANE_ENABLE;
2784 
2785 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2786 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2787 
2788 	switch (fb->pixel_format) {
2789 	case DRM_FORMAT_C8:
2790 		dspcntr |= DISPPLANE_8BPP;
2791 		break;
2792 	case DRM_FORMAT_RGB565:
2793 		dspcntr |= DISPPLANE_BGRX565;
2794 		break;
2795 	case DRM_FORMAT_XRGB8888:
2796 		dspcntr |= DISPPLANE_BGRX888;
2797 		break;
2798 	case DRM_FORMAT_XBGR8888:
2799 		dspcntr |= DISPPLANE_RGBX888;
2800 		break;
2801 	case DRM_FORMAT_XRGB2101010:
2802 		dspcntr |= DISPPLANE_BGRX101010;
2803 		break;
2804 	case DRM_FORMAT_XBGR2101010:
2805 		dspcntr |= DISPPLANE_RGBX101010;
2806 		break;
2807 	default:
2808 		BUG();
2809 	}
2810 
2811 	if (obj->tiling_mode != I915_TILING_NONE)
2812 		dspcntr |= DISPPLANE_TILED;
2813 
2814 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2815 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2816 
2817 	linear_offset = y * fb->pitches[0] + x * cpp;
2818 	intel_crtc->dspaddr_offset =
2819 		intel_compute_tile_offset(&x, &y, fb, 0,
2820 					  fb->pitches[0], rotation);
2821 	linear_offset -= intel_crtc->dspaddr_offset;
2822 	if (rotation == BIT(DRM_ROTATE_180)) {
2823 		dspcntr |= DISPPLANE_ROTATE_180;
2824 
2825 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2826 			x += (crtc_state->pipe_src_w - 1);
2827 			y += (crtc_state->pipe_src_h - 1);
2828 
2829 			/* Finding the last pixel of the last line of the display
2830 			data and adding to linear_offset*/
2831 			linear_offset +=
2832 				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2833 				(crtc_state->pipe_src_w - 1) * cpp;
2834 		}
2835 	}
2836 
2837 	intel_crtc->adjusted_x = x;
2838 	intel_crtc->adjusted_y = y;
2839 
2840 	I915_WRITE(reg, dspcntr);
2841 
2842 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2843 	I915_WRITE(DSPSURF(plane),
2844 		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2845 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2846 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2847 	} else {
2848 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2849 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2850 	}
2851 	POSTING_READ(reg);
2852 }
2853 
2854 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2855 			      uint64_t fb_modifier, uint32_t pixel_format)
2856 {
2857 	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2858 		return 64;
2859 	} else {
2860 		int cpp = drm_format_plane_cpp(pixel_format, 0);
2861 
2862 		return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2863 	}
2864 }
2865 
2866 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2867 			   struct drm_i915_gem_object *obj,
2868 			   unsigned int plane)
2869 {
2870 	struct i915_ggtt_view view;
2871 	struct i915_vma *vma;
2872 	u64 offset;
2873 
2874 	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2875 				intel_plane->base.state->rotation);
2876 
2877 	vma = i915_gem_obj_to_ggtt_view(obj, &view);
2878 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2879 		view.type))
2880 		return -1;
2881 
2882 	offset = vma->node.start;
2883 
2884 	if (plane == 1) {
2885 		offset += vma->ggtt_view.params.rotated.uv_start_page *
2886 			  PAGE_SIZE;
2887 	}
2888 
2889 	WARN_ON(upper_32_bits(offset));
2890 
2891 	return lower_32_bits(offset);
2892 }
2893 
2894 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2895 {
2896 	struct drm_device *dev = intel_crtc->base.dev;
2897 	struct drm_i915_private *dev_priv = dev->dev_private;
2898 
2899 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2900 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2901 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2902 }
2903 
2904 /*
2905  * This function detaches (aka. unbinds) unused scalers in hardware
2906  */
2907 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2908 {
2909 	struct intel_crtc_scaler_state *scaler_state;
2910 	int i;
2911 
2912 	scaler_state = &intel_crtc->config->scaler_state;
2913 
2914 	/* loop through and disable scalers that aren't in use */
2915 	for (i = 0; i < intel_crtc->num_scalers; i++) {
2916 		if (!scaler_state->scalers[i].in_use)
2917 			skl_detach_scaler(intel_crtc, i);
2918 	}
2919 }
2920 
2921 u32 skl_plane_ctl_format(uint32_t pixel_format)
2922 {
2923 	switch (pixel_format) {
2924 	case DRM_FORMAT_C8:
2925 		return PLANE_CTL_FORMAT_INDEXED;
2926 	case DRM_FORMAT_RGB565:
2927 		return PLANE_CTL_FORMAT_RGB_565;
2928 	case DRM_FORMAT_XBGR8888:
2929 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
2930 	case DRM_FORMAT_XRGB8888:
2931 		return PLANE_CTL_FORMAT_XRGB_8888;
2932 	/*
2933 	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2934 	 * to be already pre-multiplied. We need to add a knob (or a different
2935 	 * DRM_FORMAT) for user-space to configure that.
2936 	 */
2937 	case DRM_FORMAT_ABGR8888:
2938 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
2939 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2940 	case DRM_FORMAT_ARGB8888:
2941 		return PLANE_CTL_FORMAT_XRGB_8888 |
2942 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2943 	case DRM_FORMAT_XRGB2101010:
2944 		return PLANE_CTL_FORMAT_XRGB_2101010;
2945 	case DRM_FORMAT_XBGR2101010:
2946 		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
2947 	case DRM_FORMAT_YUYV:
2948 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
2949 	case DRM_FORMAT_YVYU:
2950 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
2951 	case DRM_FORMAT_UYVY:
2952 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
2953 	case DRM_FORMAT_VYUY:
2954 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
2955 	default:
2956 		MISSING_CASE(pixel_format);
2957 	}
2958 
2959 	return 0;
2960 }
2961 
2962 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2963 {
2964 	switch (fb_modifier) {
2965 	case DRM_FORMAT_MOD_NONE:
2966 		break;
2967 	case I915_FORMAT_MOD_X_TILED:
2968 		return PLANE_CTL_TILED_X;
2969 	case I915_FORMAT_MOD_Y_TILED:
2970 		return PLANE_CTL_TILED_Y;
2971 	case I915_FORMAT_MOD_Yf_TILED:
2972 		return PLANE_CTL_TILED_YF;
2973 	default:
2974 		MISSING_CASE(fb_modifier);
2975 	}
2976 
2977 	return 0;
2978 }
2979 
2980 u32 skl_plane_ctl_rotation(unsigned int rotation)
2981 {
2982 	switch (rotation) {
2983 	case BIT(DRM_ROTATE_0):
2984 		break;
2985 	/*
2986 	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2987 	 * while i915 HW rotation is clockwise, thats why this swapping.
2988 	 */
2989 	case BIT(DRM_ROTATE_90):
2990 		return PLANE_CTL_ROTATE_270;
2991 	case BIT(DRM_ROTATE_180):
2992 		return PLANE_CTL_ROTATE_180;
2993 	case BIT(DRM_ROTATE_270):
2994 		return PLANE_CTL_ROTATE_90;
2995 	default:
2996 		MISSING_CASE(rotation);
2997 	}
2998 
2999 	return 0;
3000 }
3001 
3002 static void skylake_update_primary_plane(struct drm_plane *plane,
3003 					 const struct intel_crtc_state *crtc_state,
3004 					 const struct intel_plane_state *plane_state)
3005 {
3006 	struct drm_device *dev = plane->dev;
3007 	struct drm_i915_private *dev_priv = dev->dev_private;
3008 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3009 	struct drm_framebuffer *fb = plane_state->base.fb;
3010 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3011 	int pipe = intel_crtc->pipe;
3012 	u32 plane_ctl, stride_div, stride;
3013 	u32 tile_height, plane_offset, plane_size;
3014 	unsigned int rotation = plane_state->base.rotation;
3015 	int x_offset, y_offset;
3016 	u32 surf_addr;
3017 	int scaler_id = plane_state->scaler_id;
3018 	int src_x = plane_state->src.x1 >> 16;
3019 	int src_y = plane_state->src.y1 >> 16;
3020 	int src_w = drm_rect_width(&plane_state->src) >> 16;
3021 	int src_h = drm_rect_height(&plane_state->src) >> 16;
3022 	int dst_x = plane_state->dst.x1;
3023 	int dst_y = plane_state->dst.y1;
3024 	int dst_w = drm_rect_width(&plane_state->dst);
3025 	int dst_h = drm_rect_height(&plane_state->dst);
3026 
3027 	plane_ctl = PLANE_CTL_ENABLE |
3028 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3029 		    PLANE_CTL_PIPE_CSC_ENABLE;
3030 
3031 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3032 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3033 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3034 	plane_ctl |= skl_plane_ctl_rotation(rotation);
3035 
3036 	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3037 					       fb->pixel_format);
3038 	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3039 
3040 	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3041 
3042 	if (intel_rotation_90_or_270(rotation)) {
3043 		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3044 
3045 		/* stride = Surface height in tiles */
3046 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3047 		stride = DIV_ROUND_UP(fb->height, tile_height);
3048 		x_offset = stride * tile_height - src_y - src_h;
3049 		y_offset = src_x;
3050 		plane_size = (src_w - 1) << 16 | (src_h - 1);
3051 	} else {
3052 		stride = fb->pitches[0] / stride_div;
3053 		x_offset = src_x;
3054 		y_offset = src_y;
3055 		plane_size = (src_h - 1) << 16 | (src_w - 1);
3056 	}
3057 	plane_offset = y_offset << 16 | x_offset;
3058 
3059 	intel_crtc->adjusted_x = x_offset;
3060 	intel_crtc->adjusted_y = y_offset;
3061 
3062 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3063 	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3064 	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3065 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3066 
3067 	if (scaler_id >= 0) {
3068 		uint32_t ps_ctrl = 0;
3069 
3070 		WARN_ON(!dst_w || !dst_h);
3071 		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3072 			crtc_state->scaler_state.scalers[scaler_id].mode;
3073 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3074 		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3075 		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3076 		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3077 		I915_WRITE(PLANE_POS(pipe, 0), 0);
3078 	} else {
3079 		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3080 	}
3081 
3082 	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3083 
3084 	POSTING_READ(PLANE_SURF(pipe, 0));
3085 }
3086 
3087 static void skylake_disable_primary_plane(struct drm_plane *primary,
3088 					  struct drm_crtc *crtc)
3089 {
3090 	struct drm_device *dev = crtc->dev;
3091 	struct drm_i915_private *dev_priv = dev->dev_private;
3092 	int pipe = to_intel_crtc(crtc)->pipe;
3093 
3094 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
3095 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
3096 	POSTING_READ(PLANE_SURF(pipe, 0));
3097 }
3098 
3099 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3100 static int
3101 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3102 			   int x, int y, enum mode_set_atomic state)
3103 {
3104 	/* Support for kgdboc is disabled, this needs a major rework. */
3105 	DRM_ERROR("legacy panic handler not supported any more.\n");
3106 
3107 	return -ENODEV;
3108 }
3109 
3110 static void intel_complete_page_flips(struct drm_device *dev)
3111 {
3112 	struct drm_crtc *crtc;
3113 
3114 	for_each_crtc(dev, crtc) {
3115 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3116 		enum plane plane = intel_crtc->plane;
3117 
3118 		intel_prepare_page_flip(dev, plane);
3119 		intel_finish_page_flip_plane(dev, plane);
3120 	}
3121 }
3122 
3123 static void intel_update_primary_planes(struct drm_device *dev)
3124 {
3125 	struct drm_crtc *crtc;
3126 
3127 	for_each_crtc(dev, crtc) {
3128 		struct intel_plane *plane = to_intel_plane(crtc->primary);
3129 		struct intel_plane_state *plane_state;
3130 
3131 		drm_modeset_lock_crtc(crtc, &plane->base);
3132 		plane_state = to_intel_plane_state(plane->base.state);
3133 
3134 		if (plane_state->visible)
3135 			plane->update_plane(&plane->base,
3136 					    to_intel_crtc_state(crtc->state),
3137 					    plane_state);
3138 
3139 		drm_modeset_unlock_crtc(crtc);
3140 	}
3141 }
3142 
3143 void intel_prepare_reset(struct drm_device *dev)
3144 {
3145 	/* no reset support for gen2 */
3146 	if (IS_GEN2(dev))
3147 		return;
3148 
3149 	/* reset doesn't touch the display */
3150 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3151 		return;
3152 
3153 	drm_modeset_lock_all(dev);
3154 	/*
3155 	 * Disabling the crtcs gracefully seems nicer. Also the
3156 	 * g33 docs say we should at least disable all the planes.
3157 	 */
3158 	intel_display_suspend(dev);
3159 }
3160 
3161 void intel_finish_reset(struct drm_device *dev)
3162 {
3163 	struct drm_i915_private *dev_priv = to_i915(dev);
3164 
3165 	/*
3166 	 * Flips in the rings will be nuked by the reset,
3167 	 * so complete all pending flips so that user space
3168 	 * will get its events and not get stuck.
3169 	 */
3170 	intel_complete_page_flips(dev);
3171 
3172 	/* no reset support for gen2 */
3173 	if (IS_GEN2(dev))
3174 		return;
3175 
3176 	/* reset doesn't touch the display */
3177 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3178 		/*
3179 		 * Flips in the rings have been nuked by the reset,
3180 		 * so update the base address of all primary
3181 		 * planes to the the last fb to make sure we're
3182 		 * showing the correct fb after a reset.
3183 		 *
3184 		 * FIXME: Atomic will make this obsolete since we won't schedule
3185 		 * CS-based flips (which might get lost in gpu resets) any more.
3186 		 */
3187 		intel_update_primary_planes(dev);
3188 		return;
3189 	}
3190 
3191 	/*
3192 	 * The display has been reset as well,
3193 	 * so need a full re-initialization.
3194 	 */
3195 	intel_runtime_pm_disable_interrupts(dev_priv);
3196 	intel_runtime_pm_enable_interrupts(dev_priv);
3197 
3198 	intel_modeset_init_hw(dev);
3199 
3200 	spin_lock_irq(&dev_priv->irq_lock);
3201 	if (dev_priv->display.hpd_irq_setup)
3202 		dev_priv->display.hpd_irq_setup(dev);
3203 	spin_unlock_irq(&dev_priv->irq_lock);
3204 
3205 	intel_display_resume(dev);
3206 
3207 	intel_hpd_init(dev_priv);
3208 
3209 	drm_modeset_unlock_all(dev);
3210 }
3211 
3212 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3213 {
3214 	struct drm_device *dev = crtc->dev;
3215 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3216 	unsigned reset_counter;
3217 	bool pending;
3218 
3219 	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3220 	if (intel_crtc->reset_counter != reset_counter)
3221 		return false;
3222 
3223 	spin_lock_irq(&dev->event_lock);
3224 	pending = to_intel_crtc(crtc)->unpin_work != NULL;
3225 	spin_unlock_irq(&dev->event_lock);
3226 
3227 	return pending;
3228 }
3229 
3230 static void intel_update_pipe_config(struct intel_crtc *crtc,
3231 				     struct intel_crtc_state *old_crtc_state)
3232 {
3233 	struct drm_device *dev = crtc->base.dev;
3234 	struct drm_i915_private *dev_priv = dev->dev_private;
3235 	struct intel_crtc_state *pipe_config =
3236 		to_intel_crtc_state(crtc->base.state);
3237 
3238 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3239 	crtc->base.mode = crtc->base.state->mode;
3240 
3241 	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3242 		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3243 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3244 
3245 	/*
3246 	 * Update pipe size and adjust fitter if needed: the reason for this is
3247 	 * that in compute_mode_changes we check the native mode (not the pfit
3248 	 * mode) to see if we can flip rather than do a full mode set. In the
3249 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3250 	 * pfit state, we'll end up with a big fb scanned out into the wrong
3251 	 * sized surface.
3252 	 */
3253 
3254 	I915_WRITE(PIPESRC(crtc->pipe),
3255 		   ((pipe_config->pipe_src_w - 1) << 16) |
3256 		   (pipe_config->pipe_src_h - 1));
3257 
3258 	/* on skylake this is done by detaching scalers */
3259 	if (INTEL_INFO(dev)->gen >= 9) {
3260 		skl_detach_scalers(crtc);
3261 
3262 		if (pipe_config->pch_pfit.enabled)
3263 			skylake_pfit_enable(crtc);
3264 	} else if (HAS_PCH_SPLIT(dev)) {
3265 		if (pipe_config->pch_pfit.enabled)
3266 			ironlake_pfit_enable(crtc);
3267 		else if (old_crtc_state->pch_pfit.enabled)
3268 			ironlake_pfit_disable(crtc, true);
3269 	}
3270 }
3271 
3272 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3273 {
3274 	struct drm_device *dev = crtc->dev;
3275 	struct drm_i915_private *dev_priv = dev->dev_private;
3276 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3277 	int pipe = intel_crtc->pipe;
3278 	i915_reg_t reg;
3279 	u32 temp;
3280 
3281 	/* enable normal train */
3282 	reg = FDI_TX_CTL(pipe);
3283 	temp = I915_READ(reg);
3284 	if (IS_IVYBRIDGE(dev)) {
3285 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3286 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3287 	} else {
3288 		temp &= ~FDI_LINK_TRAIN_NONE;
3289 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3290 	}
3291 	I915_WRITE(reg, temp);
3292 
3293 	reg = FDI_RX_CTL(pipe);
3294 	temp = I915_READ(reg);
3295 	if (HAS_PCH_CPT(dev)) {
3296 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3297 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3298 	} else {
3299 		temp &= ~FDI_LINK_TRAIN_NONE;
3300 		temp |= FDI_LINK_TRAIN_NONE;
3301 	}
3302 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3303 
3304 	/* wait one idle pattern time */
3305 	POSTING_READ(reg);
3306 	udelay(1000);
3307 
3308 	/* IVB wants error correction enabled */
3309 	if (IS_IVYBRIDGE(dev))
3310 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3311 			   FDI_FE_ERRC_ENABLE);
3312 }
3313 
3314 /* The FDI link training functions for ILK/Ibexpeak. */
3315 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3316 {
3317 	struct drm_device *dev = crtc->dev;
3318 	struct drm_i915_private *dev_priv = dev->dev_private;
3319 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3320 	int pipe = intel_crtc->pipe;
3321 	i915_reg_t reg;
3322 	u32 temp, tries;
3323 
3324 	/* FDI needs bits from pipe first */
3325 	assert_pipe_enabled(dev_priv, pipe);
3326 
3327 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3328 	   for train result */
3329 	reg = FDI_RX_IMR(pipe);
3330 	temp = I915_READ(reg);
3331 	temp &= ~FDI_RX_SYMBOL_LOCK;
3332 	temp &= ~FDI_RX_BIT_LOCK;
3333 	I915_WRITE(reg, temp);
3334 	I915_READ(reg);
3335 	udelay(150);
3336 
3337 	/* enable CPU FDI TX and PCH FDI RX */
3338 	reg = FDI_TX_CTL(pipe);
3339 	temp = I915_READ(reg);
3340 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3341 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3342 	temp &= ~FDI_LINK_TRAIN_NONE;
3343 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3344 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3345 
3346 	reg = FDI_RX_CTL(pipe);
3347 	temp = I915_READ(reg);
3348 	temp &= ~FDI_LINK_TRAIN_NONE;
3349 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3350 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3351 
3352 	POSTING_READ(reg);
3353 	udelay(150);
3354 
3355 	/* Ironlake workaround, enable clock pointer after FDI enable*/
3356 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3357 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3358 		   FDI_RX_PHASE_SYNC_POINTER_EN);
3359 
3360 	reg = FDI_RX_IIR(pipe);
3361 	for (tries = 0; tries < 5; tries++) {
3362 		temp = I915_READ(reg);
3363 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3364 
3365 		if ((temp & FDI_RX_BIT_LOCK)) {
3366 			DRM_DEBUG_KMS("FDI train 1 done.\n");
3367 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3368 			break;
3369 		}
3370 	}
3371 	if (tries == 5)
3372 		DRM_ERROR("FDI train 1 fail!\n");
3373 
3374 	/* Train 2 */
3375 	reg = FDI_TX_CTL(pipe);
3376 	temp = I915_READ(reg);
3377 	temp &= ~FDI_LINK_TRAIN_NONE;
3378 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3379 	I915_WRITE(reg, temp);
3380 
3381 	reg = FDI_RX_CTL(pipe);
3382 	temp = I915_READ(reg);
3383 	temp &= ~FDI_LINK_TRAIN_NONE;
3384 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3385 	I915_WRITE(reg, temp);
3386 
3387 	POSTING_READ(reg);
3388 	udelay(150);
3389 
3390 	reg = FDI_RX_IIR(pipe);
3391 	for (tries = 0; tries < 5; tries++) {
3392 		temp = I915_READ(reg);
3393 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3394 
3395 		if (temp & FDI_RX_SYMBOL_LOCK) {
3396 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3397 			DRM_DEBUG_KMS("FDI train 2 done.\n");
3398 			break;
3399 		}
3400 	}
3401 	if (tries == 5)
3402 		DRM_ERROR("FDI train 2 fail!\n");
3403 
3404 	DRM_DEBUG_KMS("FDI train done\n");
3405 
3406 }
3407 
3408 static const int snb_b_fdi_train_param[] = {
3409 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3410 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3411 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3412 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3413 };
3414 
3415 /* The FDI link training functions for SNB/Cougarpoint. */
3416 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3417 {
3418 	struct drm_device *dev = crtc->dev;
3419 	struct drm_i915_private *dev_priv = dev->dev_private;
3420 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3421 	int pipe = intel_crtc->pipe;
3422 	i915_reg_t reg;
3423 	u32 temp, i, retry;
3424 
3425 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3426 	   for train result */
3427 	reg = FDI_RX_IMR(pipe);
3428 	temp = I915_READ(reg);
3429 	temp &= ~FDI_RX_SYMBOL_LOCK;
3430 	temp &= ~FDI_RX_BIT_LOCK;
3431 	I915_WRITE(reg, temp);
3432 
3433 	POSTING_READ(reg);
3434 	udelay(150);
3435 
3436 	/* enable CPU FDI TX and PCH FDI RX */
3437 	reg = FDI_TX_CTL(pipe);
3438 	temp = I915_READ(reg);
3439 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3440 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3441 	temp &= ~FDI_LINK_TRAIN_NONE;
3442 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3443 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3444 	/* SNB-B */
3445 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3446 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3447 
3448 	I915_WRITE(FDI_RX_MISC(pipe),
3449 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3450 
3451 	reg = FDI_RX_CTL(pipe);
3452 	temp = I915_READ(reg);
3453 	if (HAS_PCH_CPT(dev)) {
3454 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3455 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3456 	} else {
3457 		temp &= ~FDI_LINK_TRAIN_NONE;
3458 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3459 	}
3460 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3461 
3462 	POSTING_READ(reg);
3463 	udelay(150);
3464 
3465 	for (i = 0; i < 4; i++) {
3466 		reg = FDI_TX_CTL(pipe);
3467 		temp = I915_READ(reg);
3468 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3469 		temp |= snb_b_fdi_train_param[i];
3470 		I915_WRITE(reg, temp);
3471 
3472 		POSTING_READ(reg);
3473 		udelay(500);
3474 
3475 		for (retry = 0; retry < 5; retry++) {
3476 			reg = FDI_RX_IIR(pipe);
3477 			temp = I915_READ(reg);
3478 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3479 			if (temp & FDI_RX_BIT_LOCK) {
3480 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3481 				DRM_DEBUG_KMS("FDI train 1 done.\n");
3482 				break;
3483 			}
3484 			udelay(50);
3485 		}
3486 		if (retry < 5)
3487 			break;
3488 	}
3489 	if (i == 4)
3490 		DRM_ERROR("FDI train 1 fail!\n");
3491 
3492 	/* Train 2 */
3493 	reg = FDI_TX_CTL(pipe);
3494 	temp = I915_READ(reg);
3495 	temp &= ~FDI_LINK_TRAIN_NONE;
3496 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3497 	if (IS_GEN6(dev)) {
3498 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3499 		/* SNB-B */
3500 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3501 	}
3502 	I915_WRITE(reg, temp);
3503 
3504 	reg = FDI_RX_CTL(pipe);
3505 	temp = I915_READ(reg);
3506 	if (HAS_PCH_CPT(dev)) {
3507 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3508 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3509 	} else {
3510 		temp &= ~FDI_LINK_TRAIN_NONE;
3511 		temp |= FDI_LINK_TRAIN_PATTERN_2;
3512 	}
3513 	I915_WRITE(reg, temp);
3514 
3515 	POSTING_READ(reg);
3516 	udelay(150);
3517 
3518 	for (i = 0; i < 4; i++) {
3519 		reg = FDI_TX_CTL(pipe);
3520 		temp = I915_READ(reg);
3521 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3522 		temp |= snb_b_fdi_train_param[i];
3523 		I915_WRITE(reg, temp);
3524 
3525 		POSTING_READ(reg);
3526 		udelay(500);
3527 
3528 		for (retry = 0; retry < 5; retry++) {
3529 			reg = FDI_RX_IIR(pipe);
3530 			temp = I915_READ(reg);
3531 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3532 			if (temp & FDI_RX_SYMBOL_LOCK) {
3533 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3534 				DRM_DEBUG_KMS("FDI train 2 done.\n");
3535 				break;
3536 			}
3537 			udelay(50);
3538 		}
3539 		if (retry < 5)
3540 			break;
3541 	}
3542 	if (i == 4)
3543 		DRM_ERROR("FDI train 2 fail!\n");
3544 
3545 	DRM_DEBUG_KMS("FDI train done.\n");
3546 }
3547 
3548 /* Manual link training for Ivy Bridge A0 parts */
3549 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3550 {
3551 	struct drm_device *dev = crtc->dev;
3552 	struct drm_i915_private *dev_priv = dev->dev_private;
3553 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3554 	int pipe = intel_crtc->pipe;
3555 	i915_reg_t reg;
3556 	u32 temp, i, j;
3557 
3558 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3559 	   for train result */
3560 	reg = FDI_RX_IMR(pipe);
3561 	temp = I915_READ(reg);
3562 	temp &= ~FDI_RX_SYMBOL_LOCK;
3563 	temp &= ~FDI_RX_BIT_LOCK;
3564 	I915_WRITE(reg, temp);
3565 
3566 	POSTING_READ(reg);
3567 	udelay(150);
3568 
3569 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3570 		      I915_READ(FDI_RX_IIR(pipe)));
3571 
3572 	/* Try each vswing and preemphasis setting twice before moving on */
3573 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3574 		/* disable first in case we need to retry */
3575 		reg = FDI_TX_CTL(pipe);
3576 		temp = I915_READ(reg);
3577 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3578 		temp &= ~FDI_TX_ENABLE;
3579 		I915_WRITE(reg, temp);
3580 
3581 		reg = FDI_RX_CTL(pipe);
3582 		temp = I915_READ(reg);
3583 		temp &= ~FDI_LINK_TRAIN_AUTO;
3584 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3585 		temp &= ~FDI_RX_ENABLE;
3586 		I915_WRITE(reg, temp);
3587 
3588 		/* enable CPU FDI TX and PCH FDI RX */
3589 		reg = FDI_TX_CTL(pipe);
3590 		temp = I915_READ(reg);
3591 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3592 		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3593 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3594 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3595 		temp |= snb_b_fdi_train_param[j/2];
3596 		temp |= FDI_COMPOSITE_SYNC;
3597 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3598 
3599 		I915_WRITE(FDI_RX_MISC(pipe),
3600 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3601 
3602 		reg = FDI_RX_CTL(pipe);
3603 		temp = I915_READ(reg);
3604 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3605 		temp |= FDI_COMPOSITE_SYNC;
3606 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3607 
3608 		POSTING_READ(reg);
3609 		udelay(1); /* should be 0.5us */
3610 
3611 		for (i = 0; i < 4; i++) {
3612 			reg = FDI_RX_IIR(pipe);
3613 			temp = I915_READ(reg);
3614 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3615 
3616 			if (temp & FDI_RX_BIT_LOCK ||
3617 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3618 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3619 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3620 					      i);
3621 				break;
3622 			}
3623 			udelay(1); /* should be 0.5us */
3624 		}
3625 		if (i == 4) {
3626 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3627 			continue;
3628 		}
3629 
3630 		/* Train 2 */
3631 		reg = FDI_TX_CTL(pipe);
3632 		temp = I915_READ(reg);
3633 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3634 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3635 		I915_WRITE(reg, temp);
3636 
3637 		reg = FDI_RX_CTL(pipe);
3638 		temp = I915_READ(reg);
3639 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3640 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3641 		I915_WRITE(reg, temp);
3642 
3643 		POSTING_READ(reg);
3644 		udelay(2); /* should be 1.5us */
3645 
3646 		for (i = 0; i < 4; i++) {
3647 			reg = FDI_RX_IIR(pipe);
3648 			temp = I915_READ(reg);
3649 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3650 
3651 			if (temp & FDI_RX_SYMBOL_LOCK ||
3652 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3653 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3654 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3655 					      i);
3656 				goto train_done;
3657 			}
3658 			udelay(2); /* should be 1.5us */
3659 		}
3660 		if (i == 4)
3661 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3662 	}
3663 
3664 train_done:
3665 	DRM_DEBUG_KMS("FDI train done.\n");
3666 }
3667 
3668 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3669 {
3670 	struct drm_device *dev = intel_crtc->base.dev;
3671 	struct drm_i915_private *dev_priv = dev->dev_private;
3672 	int pipe = intel_crtc->pipe;
3673 	i915_reg_t reg;
3674 	u32 temp;
3675 
3676 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3677 	reg = FDI_RX_CTL(pipe);
3678 	temp = I915_READ(reg);
3679 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3680 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3681 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3682 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3683 
3684 	POSTING_READ(reg);
3685 	udelay(200);
3686 
3687 	/* Switch from Rawclk to PCDclk */
3688 	temp = I915_READ(reg);
3689 	I915_WRITE(reg, temp | FDI_PCDCLK);
3690 
3691 	POSTING_READ(reg);
3692 	udelay(200);
3693 
3694 	/* Enable CPU FDI TX PLL, always on for Ironlake */
3695 	reg = FDI_TX_CTL(pipe);
3696 	temp = I915_READ(reg);
3697 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3698 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3699 
3700 		POSTING_READ(reg);
3701 		udelay(100);
3702 	}
3703 }
3704 
3705 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3706 {
3707 	struct drm_device *dev = intel_crtc->base.dev;
3708 	struct drm_i915_private *dev_priv = dev->dev_private;
3709 	int pipe = intel_crtc->pipe;
3710 	i915_reg_t reg;
3711 	u32 temp;
3712 
3713 	/* Switch from PCDclk to Rawclk */
3714 	reg = FDI_RX_CTL(pipe);
3715 	temp = I915_READ(reg);
3716 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3717 
3718 	/* Disable CPU FDI TX PLL */
3719 	reg = FDI_TX_CTL(pipe);
3720 	temp = I915_READ(reg);
3721 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3722 
3723 	POSTING_READ(reg);
3724 	udelay(100);
3725 
3726 	reg = FDI_RX_CTL(pipe);
3727 	temp = I915_READ(reg);
3728 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3729 
3730 	/* Wait for the clocks to turn off. */
3731 	POSTING_READ(reg);
3732 	udelay(100);
3733 }
3734 
3735 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3736 {
3737 	struct drm_device *dev = crtc->dev;
3738 	struct drm_i915_private *dev_priv = dev->dev_private;
3739 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3740 	int pipe = intel_crtc->pipe;
3741 	i915_reg_t reg;
3742 	u32 temp;
3743 
3744 	/* disable CPU FDI tx and PCH FDI rx */
3745 	reg = FDI_TX_CTL(pipe);
3746 	temp = I915_READ(reg);
3747 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3748 	POSTING_READ(reg);
3749 
3750 	reg = FDI_RX_CTL(pipe);
3751 	temp = I915_READ(reg);
3752 	temp &= ~(0x7 << 16);
3753 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3754 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3755 
3756 	POSTING_READ(reg);
3757 	udelay(100);
3758 
3759 	/* Ironlake workaround, disable clock pointer after downing FDI */
3760 	if (HAS_PCH_IBX(dev))
3761 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3762 
3763 	/* still set train pattern 1 */
3764 	reg = FDI_TX_CTL(pipe);
3765 	temp = I915_READ(reg);
3766 	temp &= ~FDI_LINK_TRAIN_NONE;
3767 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3768 	I915_WRITE(reg, temp);
3769 
3770 	reg = FDI_RX_CTL(pipe);
3771 	temp = I915_READ(reg);
3772 	if (HAS_PCH_CPT(dev)) {
3773 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3774 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3775 	} else {
3776 		temp &= ~FDI_LINK_TRAIN_NONE;
3777 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3778 	}
3779 	/* BPC in FDI rx is consistent with that in PIPECONF */
3780 	temp &= ~(0x07 << 16);
3781 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3782 	I915_WRITE(reg, temp);
3783 
3784 	POSTING_READ(reg);
3785 	udelay(100);
3786 }
3787 
3788 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3789 {
3790 	struct intel_crtc *crtc;
3791 
3792 	/* Note that we don't need to be called with mode_config.lock here
3793 	 * as our list of CRTC objects is static for the lifetime of the
3794 	 * device and so cannot disappear as we iterate. Similarly, we can
3795 	 * happily treat the predicates as racy, atomic checks as userspace
3796 	 * cannot claim and pin a new fb without at least acquring the
3797 	 * struct_mutex and so serialising with us.
3798 	 */
3799 	for_each_intel_crtc(dev, crtc) {
3800 		if (atomic_read(&crtc->unpin_work_count) == 0)
3801 			continue;
3802 
3803 		if (crtc->unpin_work)
3804 			intel_wait_for_vblank(dev, crtc->pipe);
3805 
3806 		return true;
3807 	}
3808 
3809 	return false;
3810 }
3811 
3812 static void page_flip_completed(struct intel_crtc *intel_crtc)
3813 {
3814 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3815 	struct intel_unpin_work *work = intel_crtc->unpin_work;
3816 
3817 	/* ensure that the unpin work is consistent wrt ->pending. */
3818 	smp_rmb();
3819 	intel_crtc->unpin_work = NULL;
3820 
3821 	if (work->event)
3822 		drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3823 
3824 	drm_crtc_vblank_put(&intel_crtc->base);
3825 
3826 	wake_up_all(&dev_priv->pending_flip_queue);
3827 	queue_work(dev_priv->wq, &work->work);
3828 
3829 	trace_i915_flip_complete(intel_crtc->plane,
3830 				 work->pending_flip_obj);
3831 }
3832 
3833 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3834 {
3835 	struct drm_device *dev = crtc->dev;
3836 	struct drm_i915_private *dev_priv = dev->dev_private;
3837 	long ret;
3838 
3839 	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3840 
3841 	ret = wait_event_interruptible_timeout(
3842 					dev_priv->pending_flip_queue,
3843 					!intel_crtc_has_pending_flip(crtc),
3844 					60*HZ);
3845 
3846 	if (ret < 0)
3847 		return ret;
3848 
3849 	if (ret == 0) {
3850 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3851 
3852 		spin_lock_irq(&dev->event_lock);
3853 		if (intel_crtc->unpin_work) {
3854 			WARN_ONCE(1, "Removing stuck page flip\n");
3855 			page_flip_completed(intel_crtc);
3856 		}
3857 		spin_unlock_irq(&dev->event_lock);
3858 	}
3859 
3860 	return 0;
3861 }
3862 
3863 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3864 {
3865 	u32 temp;
3866 
3867 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3868 
3869 	mutex_lock(&dev_priv->sb_lock);
3870 
3871 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3872 	temp |= SBI_SSCCTL_DISABLE;
3873 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3874 
3875 	mutex_unlock(&dev_priv->sb_lock);
3876 }
3877 
3878 /* Program iCLKIP clock to the desired frequency */
3879 static void lpt_program_iclkip(struct drm_crtc *crtc)
3880 {
3881 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3882 	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3883 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3884 	u32 temp;
3885 
3886 	lpt_disable_iclkip(dev_priv);
3887 
3888 	/* The iCLK virtual clock root frequency is in MHz,
3889 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
3890 	 * divisors, it is necessary to divide one by another, so we
3891 	 * convert the virtual clock precision to KHz here for higher
3892 	 * precision.
3893 	 */
3894 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3895 		u32 iclk_virtual_root_freq = 172800 * 1000;
3896 		u32 iclk_pi_range = 64;
3897 		u32 desired_divisor;
3898 
3899 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3900 						    clock << auxdiv);
3901 		divsel = (desired_divisor / iclk_pi_range) - 2;
3902 		phaseinc = desired_divisor % iclk_pi_range;
3903 
3904 		/*
3905 		 * Near 20MHz is a corner case which is
3906 		 * out of range for the 7-bit divisor
3907 		 */
3908 		if (divsel <= 0x7f)
3909 			break;
3910 	}
3911 
3912 	/* This should not happen with any sane values */
3913 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3914 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3915 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3916 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3917 
3918 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3919 			clock,
3920 			auxdiv,
3921 			divsel,
3922 			phasedir,
3923 			phaseinc);
3924 
3925 	mutex_lock(&dev_priv->sb_lock);
3926 
3927 	/* Program SSCDIVINTPHASE6 */
3928 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3929 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3930 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3931 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3932 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3933 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3934 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3935 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3936 
3937 	/* Program SSCAUXDIV */
3938 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3939 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3940 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3941 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3942 
3943 	/* Enable modulator and associated divider */
3944 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3945 	temp &= ~SBI_SSCCTL_DISABLE;
3946 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3947 
3948 	mutex_unlock(&dev_priv->sb_lock);
3949 
3950 	/* Wait for initialization time */
3951 	udelay(24);
3952 
3953 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3954 }
3955 
3956 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3957 {
3958 	u32 divsel, phaseinc, auxdiv;
3959 	u32 iclk_virtual_root_freq = 172800 * 1000;
3960 	u32 iclk_pi_range = 64;
3961 	u32 desired_divisor;
3962 	u32 temp;
3963 
3964 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3965 		return 0;
3966 
3967 	mutex_lock(&dev_priv->sb_lock);
3968 
3969 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3970 	if (temp & SBI_SSCCTL_DISABLE) {
3971 		mutex_unlock(&dev_priv->sb_lock);
3972 		return 0;
3973 	}
3974 
3975 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3976 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3977 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3978 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3979 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3980 
3981 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3982 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3983 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3984 
3985 	mutex_unlock(&dev_priv->sb_lock);
3986 
3987 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3988 
3989 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3990 				 desired_divisor << auxdiv);
3991 }
3992 
3993 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3994 						enum i915_pipe pch_transcoder)
3995 {
3996 	struct drm_device *dev = crtc->base.dev;
3997 	struct drm_i915_private *dev_priv = dev->dev_private;
3998 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3999 
4000 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4001 		   I915_READ(HTOTAL(cpu_transcoder)));
4002 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4003 		   I915_READ(HBLANK(cpu_transcoder)));
4004 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4005 		   I915_READ(HSYNC(cpu_transcoder)));
4006 
4007 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4008 		   I915_READ(VTOTAL(cpu_transcoder)));
4009 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4010 		   I915_READ(VBLANK(cpu_transcoder)));
4011 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4012 		   I915_READ(VSYNC(cpu_transcoder)));
4013 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4014 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4015 }
4016 
4017 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4018 {
4019 	struct drm_i915_private *dev_priv = dev->dev_private;
4020 	uint32_t temp;
4021 
4022 	temp = I915_READ(SOUTH_CHICKEN1);
4023 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4024 		return;
4025 
4026 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4027 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4028 
4029 	temp &= ~FDI_BC_BIFURCATION_SELECT;
4030 	if (enable)
4031 		temp |= FDI_BC_BIFURCATION_SELECT;
4032 
4033 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4034 	I915_WRITE(SOUTH_CHICKEN1, temp);
4035 	POSTING_READ(SOUTH_CHICKEN1);
4036 }
4037 
4038 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4039 {
4040 	struct drm_device *dev = intel_crtc->base.dev;
4041 
4042 	switch (intel_crtc->pipe) {
4043 	case PIPE_A:
4044 		break;
4045 	case PIPE_B:
4046 		if (intel_crtc->config->fdi_lanes > 2)
4047 			cpt_set_fdi_bc_bifurcation(dev, false);
4048 		else
4049 			cpt_set_fdi_bc_bifurcation(dev, true);
4050 
4051 		break;
4052 	case PIPE_C:
4053 		cpt_set_fdi_bc_bifurcation(dev, true);
4054 
4055 		break;
4056 	default:
4057 		BUG();
4058 	}
4059 }
4060 
4061 /* Return which DP Port should be selected for Transcoder DP control */
4062 static enum port
4063 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4064 {
4065 	struct drm_device *dev = crtc->dev;
4066 	struct intel_encoder *encoder;
4067 
4068 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4069 		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4070 		    encoder->type == INTEL_OUTPUT_EDP)
4071 			return enc_to_dig_port(&encoder->base)->port;
4072 	}
4073 
4074 	return -1;
4075 }
4076 
4077 /*
4078  * Enable PCH resources required for PCH ports:
4079  *   - PCH PLLs
4080  *   - FDI training & RX/TX
4081  *   - update transcoder timings
4082  *   - DP transcoding bits
4083  *   - transcoder
4084  */
4085 static void ironlake_pch_enable(struct drm_crtc *crtc)
4086 {
4087 	struct drm_device *dev = crtc->dev;
4088 	struct drm_i915_private *dev_priv = dev->dev_private;
4089 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4090 	int pipe = intel_crtc->pipe;
4091 	u32 temp;
4092 
4093 	assert_pch_transcoder_disabled(dev_priv, pipe);
4094 
4095 	if (IS_IVYBRIDGE(dev))
4096 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4097 
4098 	/* Write the TU size bits before fdi link training, so that error
4099 	 * detection works. */
4100 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4101 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4102 
4103 	/* For PCH output, training FDI link */
4104 	dev_priv->display.fdi_link_train(crtc);
4105 
4106 	/* We need to program the right clock selection before writing the pixel
4107 	 * mutliplier into the DPLL. */
4108 	if (HAS_PCH_CPT(dev)) {
4109 		u32 sel;
4110 
4111 		temp = I915_READ(PCH_DPLL_SEL);
4112 		temp |= TRANS_DPLL_ENABLE(pipe);
4113 		sel = TRANS_DPLLB_SEL(pipe);
4114 		if (intel_crtc->config->shared_dpll ==
4115 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4116 			temp |= sel;
4117 		else
4118 			temp &= ~sel;
4119 		I915_WRITE(PCH_DPLL_SEL, temp);
4120 	}
4121 
4122 	/* XXX: pch pll's can be enabled any time before we enable the PCH
4123 	 * transcoder, and we actually should do this to not upset any PCH
4124 	 * transcoder that already use the clock when we share it.
4125 	 *
4126 	 * Note that enable_shared_dpll tries to do the right thing, but
4127 	 * get_shared_dpll unconditionally resets the pll - we need that to have
4128 	 * the right LVDS enable sequence. */
4129 	intel_enable_shared_dpll(intel_crtc);
4130 
4131 	/* set transcoder timing, panel must allow it */
4132 	assert_panel_unlocked(dev_priv, pipe);
4133 	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4134 
4135 	intel_fdi_normal_train(crtc);
4136 
4137 	/* For PCH DP, enable TRANS_DP_CTL */
4138 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4139 		const struct drm_display_mode *adjusted_mode =
4140 			&intel_crtc->config->base.adjusted_mode;
4141 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4142 		i915_reg_t reg = TRANS_DP_CTL(pipe);
4143 		temp = I915_READ(reg);
4144 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4145 			  TRANS_DP_SYNC_MASK |
4146 			  TRANS_DP_BPC_MASK);
4147 		temp |= TRANS_DP_OUTPUT_ENABLE;
4148 		temp |= bpc << 9; /* same format but at 11:9 */
4149 
4150 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4151 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4152 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4153 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4154 
4155 		switch (intel_trans_dp_port_sel(crtc)) {
4156 		case PORT_B:
4157 			temp |= TRANS_DP_PORT_SEL_B;
4158 			break;
4159 		case PORT_C:
4160 			temp |= TRANS_DP_PORT_SEL_C;
4161 			break;
4162 		case PORT_D:
4163 			temp |= TRANS_DP_PORT_SEL_D;
4164 			break;
4165 		default:
4166 			BUG();
4167 		}
4168 
4169 		I915_WRITE(reg, temp);
4170 	}
4171 
4172 	ironlake_enable_pch_transcoder(dev_priv, pipe);
4173 }
4174 
4175 static void lpt_pch_enable(struct drm_crtc *crtc)
4176 {
4177 	struct drm_device *dev = crtc->dev;
4178 	struct drm_i915_private *dev_priv = dev->dev_private;
4179 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4180 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4181 
4182 	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4183 
4184 	lpt_program_iclkip(crtc);
4185 
4186 	/* Set transcoder timing. */
4187 	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4188 
4189 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4190 }
4191 
4192 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4193 {
4194 	struct drm_i915_private *dev_priv = dev->dev_private;
4195 	i915_reg_t dslreg = PIPEDSL(pipe);
4196 	u32 temp;
4197 
4198 	temp = I915_READ(dslreg);
4199 	udelay(500);
4200 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4201 		if (wait_for(I915_READ(dslreg) != temp, 5))
4202 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4203 	}
4204 }
4205 
4206 static int
4207 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4208 		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4209 		  int src_w, int src_h, int dst_w, int dst_h)
4210 {
4211 	struct intel_crtc_scaler_state *scaler_state =
4212 		&crtc_state->scaler_state;
4213 	struct intel_crtc *intel_crtc =
4214 		to_intel_crtc(crtc_state->base.crtc);
4215 	int need_scaling;
4216 
4217 	need_scaling = intel_rotation_90_or_270(rotation) ?
4218 		(src_h != dst_w || src_w != dst_h):
4219 		(src_w != dst_w || src_h != dst_h);
4220 
4221 	/*
4222 	 * if plane is being disabled or scaler is no more required or force detach
4223 	 *  - free scaler binded to this plane/crtc
4224 	 *  - in order to do this, update crtc->scaler_usage
4225 	 *
4226 	 * Here scaler state in crtc_state is set free so that
4227 	 * scaler can be assigned to other user. Actual register
4228 	 * update to free the scaler is done in plane/panel-fit programming.
4229 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4230 	 */
4231 	if (force_detach || !need_scaling) {
4232 		if (*scaler_id >= 0) {
4233 			scaler_state->scaler_users &= ~(1 << scaler_user);
4234 			scaler_state->scalers[*scaler_id].in_use = 0;
4235 
4236 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4237 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4238 				intel_crtc->pipe, scaler_user, *scaler_id,
4239 				scaler_state->scaler_users);
4240 			*scaler_id = -1;
4241 		}
4242 		return 0;
4243 	}
4244 
4245 	/* range checks */
4246 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4247 		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4248 
4249 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4250 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4251 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4252 			"size is out of scaler range\n",
4253 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4254 		return -EINVAL;
4255 	}
4256 
4257 	/* mark this plane as a scaler user in crtc_state */
4258 	scaler_state->scaler_users |= (1 << scaler_user);
4259 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4260 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4261 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4262 		scaler_state->scaler_users);
4263 
4264 	return 0;
4265 }
4266 
4267 /**
4268  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4269  *
4270  * @state: crtc's scaler state
4271  *
4272  * Return
4273  *     0 - scaler_usage updated successfully
4274  *    error - requested scaling cannot be supported or other error condition
4275  */
4276 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4277 {
4278 	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4279 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4280 
4281 	DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4282 		      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4283 
4284 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4285 		&state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4286 		state->pipe_src_w, state->pipe_src_h,
4287 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4288 }
4289 
4290 /**
4291  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4292  *
4293  * @state: crtc's scaler state
4294  * @plane_state: atomic plane state to update
4295  *
4296  * Return
4297  *     0 - scaler_usage updated successfully
4298  *    error - requested scaling cannot be supported or other error condition
4299  */
4300 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4301 				   struct intel_plane_state *plane_state)
4302 {
4303 
4304 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4305 	struct intel_plane *intel_plane =
4306 		to_intel_plane(plane_state->base.plane);
4307 	struct drm_framebuffer *fb = plane_state->base.fb;
4308 	int ret;
4309 
4310 	bool force_detach = !fb || !plane_state->visible;
4311 
4312 	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4313 		      intel_plane->base.base.id, intel_crtc->pipe,
4314 		      drm_plane_index(&intel_plane->base));
4315 
4316 	ret = skl_update_scaler(crtc_state, force_detach,
4317 				drm_plane_index(&intel_plane->base),
4318 				&plane_state->scaler_id,
4319 				plane_state->base.rotation,
4320 				drm_rect_width(&plane_state->src) >> 16,
4321 				drm_rect_height(&plane_state->src) >> 16,
4322 				drm_rect_width(&plane_state->dst),
4323 				drm_rect_height(&plane_state->dst));
4324 
4325 	if (ret || plane_state->scaler_id < 0)
4326 		return ret;
4327 
4328 	/* check colorkey */
4329 	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4330 		DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4331 			      intel_plane->base.base.id);
4332 		return -EINVAL;
4333 	}
4334 
4335 	/* Check src format */
4336 	switch (fb->pixel_format) {
4337 	case DRM_FORMAT_RGB565:
4338 	case DRM_FORMAT_XBGR8888:
4339 	case DRM_FORMAT_XRGB8888:
4340 	case DRM_FORMAT_ABGR8888:
4341 	case DRM_FORMAT_ARGB8888:
4342 	case DRM_FORMAT_XRGB2101010:
4343 	case DRM_FORMAT_XBGR2101010:
4344 	case DRM_FORMAT_YUYV:
4345 	case DRM_FORMAT_YVYU:
4346 	case DRM_FORMAT_UYVY:
4347 	case DRM_FORMAT_VYUY:
4348 		break;
4349 	default:
4350 		DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4351 			intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4352 		return -EINVAL;
4353 	}
4354 
4355 	return 0;
4356 }
4357 
4358 static void skylake_scaler_disable(struct intel_crtc *crtc)
4359 {
4360 	int i;
4361 
4362 	for (i = 0; i < crtc->num_scalers; i++)
4363 		skl_detach_scaler(crtc, i);
4364 }
4365 
4366 static void skylake_pfit_enable(struct intel_crtc *crtc)
4367 {
4368 	struct drm_device *dev = crtc->base.dev;
4369 	struct drm_i915_private *dev_priv = dev->dev_private;
4370 	int pipe = crtc->pipe;
4371 	struct intel_crtc_scaler_state *scaler_state =
4372 		&crtc->config->scaler_state;
4373 
4374 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4375 
4376 	if (crtc->config->pch_pfit.enabled) {
4377 		int id;
4378 
4379 		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4380 			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4381 			return;
4382 		}
4383 
4384 		id = scaler_state->scaler_id;
4385 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4386 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4387 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4388 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4389 
4390 		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4391 	}
4392 }
4393 
4394 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4395 {
4396 	struct drm_device *dev = crtc->base.dev;
4397 	struct drm_i915_private *dev_priv = dev->dev_private;
4398 	int pipe = crtc->pipe;
4399 
4400 	if (crtc->config->pch_pfit.enabled) {
4401 		/* Force use of hard-coded filter coefficients
4402 		 * as some pre-programmed values are broken,
4403 		 * e.g. x201.
4404 		 */
4405 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4406 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4407 						 PF_PIPE_SEL_IVB(pipe));
4408 		else
4409 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4410 		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4411 		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4412 	}
4413 }
4414 
4415 void hsw_enable_ips(struct intel_crtc *crtc)
4416 {
4417 	struct drm_device *dev = crtc->base.dev;
4418 	struct drm_i915_private *dev_priv = dev->dev_private;
4419 
4420 	if (!crtc->config->ips_enabled)
4421 		return;
4422 
4423 	/*
4424 	 * We can only enable IPS after we enable a plane and wait for a vblank
4425 	 * This function is called from post_plane_update, which is run after
4426 	 * a vblank wait.
4427 	 */
4428 
4429 	assert_plane_enabled(dev_priv, crtc->plane);
4430 	if (IS_BROADWELL(dev)) {
4431 		mutex_lock(&dev_priv->rps.hw_lock);
4432 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4433 		mutex_unlock(&dev_priv->rps.hw_lock);
4434 		/* Quoting Art Runyan: "its not safe to expect any particular
4435 		 * value in IPS_CTL bit 31 after enabling IPS through the
4436 		 * mailbox." Moreover, the mailbox may return a bogus state,
4437 		 * so we need to just enable it and continue on.
4438 		 */
4439 	} else {
4440 		I915_WRITE(IPS_CTL, IPS_ENABLE);
4441 		/* The bit only becomes 1 in the next vblank, so this wait here
4442 		 * is essentially intel_wait_for_vblank. If we don't have this
4443 		 * and don't wait for vblanks until the end of crtc_enable, then
4444 		 * the HW state readout code will complain that the expected
4445 		 * IPS_CTL value is not the one we read. */
4446 		if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4447 			DRM_ERROR("Timed out waiting for IPS enable\n");
4448 	}
4449 }
4450 
4451 void hsw_disable_ips(struct intel_crtc *crtc)
4452 {
4453 	struct drm_device *dev = crtc->base.dev;
4454 	struct drm_i915_private *dev_priv = dev->dev_private;
4455 
4456 	if (!crtc->config->ips_enabled)
4457 		return;
4458 
4459 	assert_plane_enabled(dev_priv, crtc->plane);
4460 	if (IS_BROADWELL(dev)) {
4461 		mutex_lock(&dev_priv->rps.hw_lock);
4462 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4463 		mutex_unlock(&dev_priv->rps.hw_lock);
4464 		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4465 		if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4466 			DRM_ERROR("Timed out waiting for IPS disable\n");
4467 	} else {
4468 		I915_WRITE(IPS_CTL, 0);
4469 		POSTING_READ(IPS_CTL);
4470 	}
4471 
4472 	/* We need to wait for a vblank before we can disable the plane. */
4473 	intel_wait_for_vblank(dev, crtc->pipe);
4474 }
4475 
4476 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4477 {
4478 	if (intel_crtc->overlay) {
4479 		struct drm_device *dev = intel_crtc->base.dev;
4480 		struct drm_i915_private *dev_priv = dev->dev_private;
4481 
4482 		mutex_lock(&dev->struct_mutex);
4483 		dev_priv->mm.interruptible = false;
4484 		(void) intel_overlay_switch_off(intel_crtc->overlay);
4485 		dev_priv->mm.interruptible = true;
4486 		mutex_unlock(&dev->struct_mutex);
4487 	}
4488 
4489 	/* Let userspace switch the overlay on again. In most cases userspace
4490 	 * has to recompute where to put it anyway.
4491 	 */
4492 }
4493 
4494 /**
4495  * intel_post_enable_primary - Perform operations after enabling primary plane
4496  * @crtc: the CRTC whose primary plane was just enabled
4497  *
4498  * Performs potentially sleeping operations that must be done after the primary
4499  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4500  * called due to an explicit primary plane update, or due to an implicit
4501  * re-enable that is caused when a sprite plane is updated to no longer
4502  * completely hide the primary plane.
4503  */
4504 static void
4505 intel_post_enable_primary(struct drm_crtc *crtc)
4506 {
4507 	struct drm_device *dev = crtc->dev;
4508 	struct drm_i915_private *dev_priv = dev->dev_private;
4509 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4510 	int pipe = intel_crtc->pipe;
4511 
4512 	/*
4513 	 * FIXME IPS should be fine as long as one plane is
4514 	 * enabled, but in practice it seems to have problems
4515 	 * when going from primary only to sprite only and vice
4516 	 * versa.
4517 	 */
4518 	hsw_enable_ips(intel_crtc);
4519 
4520 	/*
4521 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4522 	 * So don't enable underrun reporting before at least some planes
4523 	 * are enabled.
4524 	 * FIXME: Need to fix the logic to work when we turn off all planes
4525 	 * but leave the pipe running.
4526 	 */
4527 	if (IS_GEN2(dev))
4528 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4529 
4530 	/* Underruns don't always raise interrupts, so check manually. */
4531 	intel_check_cpu_fifo_underruns(dev_priv);
4532 	intel_check_pch_fifo_underruns(dev_priv);
4533 }
4534 
4535 /* FIXME move all this to pre_plane_update() with proper state tracking */
4536 static void
4537 intel_pre_disable_primary(struct drm_crtc *crtc)
4538 {
4539 	struct drm_device *dev = crtc->dev;
4540 	struct drm_i915_private *dev_priv = dev->dev_private;
4541 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4542 	int pipe = intel_crtc->pipe;
4543 
4544 	/*
4545 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4546 	 * So diasble underrun reporting before all the planes get disabled.
4547 	 * FIXME: Need to fix the logic to work when we turn off all planes
4548 	 * but leave the pipe running.
4549 	 */
4550 	if (IS_GEN2(dev))
4551 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4552 
4553 	/*
4554 	 * FIXME IPS should be fine as long as one plane is
4555 	 * enabled, but in practice it seems to have problems
4556 	 * when going from primary only to sprite only and vice
4557 	 * versa.
4558 	 */
4559 	hsw_disable_ips(intel_crtc);
4560 }
4561 
4562 /* FIXME get rid of this and use pre_plane_update */
4563 static void
4564 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4565 {
4566 	struct drm_device *dev = crtc->dev;
4567 	struct drm_i915_private *dev_priv = dev->dev_private;
4568 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4569 	int pipe = intel_crtc->pipe;
4570 
4571 	intel_pre_disable_primary(crtc);
4572 
4573 	/*
4574 	 * Vblank time updates from the shadow to live plane control register
4575 	 * are blocked if the memory self-refresh mode is active at that
4576 	 * moment. So to make sure the plane gets truly disabled, disable
4577 	 * first the self-refresh mode. The self-refresh enable bit in turn
4578 	 * will be checked/applied by the HW only at the next frame start
4579 	 * event which is after the vblank start event, so we need to have a
4580 	 * wait-for-vblank between disabling the plane and the pipe.
4581 	 */
4582 	if (HAS_GMCH_DISPLAY(dev)) {
4583 		intel_set_memory_cxsr(dev_priv, false);
4584 		dev_priv->wm.vlv.cxsr = false;
4585 		intel_wait_for_vblank(dev, pipe);
4586 	}
4587 }
4588 
4589 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4590 {
4591 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4592 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4593 	struct intel_crtc_state *pipe_config =
4594 		to_intel_crtc_state(crtc->base.state);
4595 	struct drm_device *dev = crtc->base.dev;
4596 	struct drm_plane *primary = crtc->base.primary;
4597 	struct drm_plane_state *old_pri_state =
4598 		drm_atomic_get_existing_plane_state(old_state, primary);
4599 
4600 	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4601 
4602 	crtc->wm.cxsr_allowed = true;
4603 
4604 	if (pipe_config->update_wm_post && pipe_config->base.active)
4605 		intel_update_watermarks(&crtc->base);
4606 
4607 	if (old_pri_state) {
4608 		struct intel_plane_state *primary_state =
4609 			to_intel_plane_state(primary->state);
4610 		struct intel_plane_state *old_primary_state =
4611 			to_intel_plane_state(old_pri_state);
4612 
4613 		intel_fbc_post_update(crtc);
4614 
4615 		if (primary_state->visible &&
4616 		    (needs_modeset(&pipe_config->base) ||
4617 		     !old_primary_state->visible))
4618 			intel_post_enable_primary(&crtc->base);
4619 	}
4620 }
4621 
4622 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4623 {
4624 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4625 	struct drm_device *dev = crtc->base.dev;
4626 	struct drm_i915_private *dev_priv = dev->dev_private;
4627 	struct intel_crtc_state *pipe_config =
4628 		to_intel_crtc_state(crtc->base.state);
4629 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4630 	struct drm_plane *primary = crtc->base.primary;
4631 	struct drm_plane_state *old_pri_state =
4632 		drm_atomic_get_existing_plane_state(old_state, primary);
4633 	bool modeset = needs_modeset(&pipe_config->base);
4634 
4635 	if (old_pri_state) {
4636 		struct intel_plane_state *primary_state =
4637 			to_intel_plane_state(primary->state);
4638 		struct intel_plane_state *old_primary_state =
4639 			to_intel_plane_state(old_pri_state);
4640 
4641 		intel_fbc_pre_update(crtc);
4642 
4643 		if (old_primary_state->visible &&
4644 		    (modeset || !primary_state->visible))
4645 			intel_pre_disable_primary(&crtc->base);
4646 	}
4647 
4648 	if (pipe_config->disable_cxsr) {
4649 		crtc->wm.cxsr_allowed = false;
4650 
4651 		/*
4652 		 * Vblank time updates from the shadow to live plane control register
4653 		 * are blocked if the memory self-refresh mode is active at that
4654 		 * moment. So to make sure the plane gets truly disabled, disable
4655 		 * first the self-refresh mode. The self-refresh enable bit in turn
4656 		 * will be checked/applied by the HW only at the next frame start
4657 		 * event which is after the vblank start event, so we need to have a
4658 		 * wait-for-vblank between disabling the plane and the pipe.
4659 		 */
4660 		if (old_crtc_state->base.active) {
4661 			intel_set_memory_cxsr(dev_priv, false);
4662 			dev_priv->wm.vlv.cxsr = false;
4663 			intel_wait_for_vblank(dev, crtc->pipe);
4664 		}
4665 	}
4666 
4667 	/*
4668 	 * IVB workaround: must disable low power watermarks for at least
4669 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
4670 	 * when scaling is disabled.
4671 	 *
4672 	 * WaCxSRDisabledForSpriteScaling:ivb
4673 	 */
4674 	if (pipe_config->disable_lp_wm) {
4675 		ilk_disable_lp_wm(dev);
4676 		intel_wait_for_vblank(dev, crtc->pipe);
4677 	}
4678 
4679 	/*
4680 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
4681 	 * watermark programming here.
4682 	 */
4683 	if (needs_modeset(&pipe_config->base))
4684 		return;
4685 
4686 	/*
4687 	 * For platforms that support atomic watermarks, program the
4688 	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
4689 	 * will be the intermediate values that are safe for both pre- and
4690 	 * post- vblank; when vblank happens, the 'active' values will be set
4691 	 * to the final 'target' values and we'll do this again to get the
4692 	 * optimal watermarks.  For gen9+ platforms, the values we program here
4693 	 * will be the final target values which will get automatically latched
4694 	 * at vblank time; no further programming will be necessary.
4695 	 *
4696 	 * If a platform hasn't been transitioned to atomic watermarks yet,
4697 	 * we'll continue to update watermarks the old way, if flags tell
4698 	 * us to.
4699 	 */
4700 	if (dev_priv->display.initial_watermarks != NULL)
4701 		dev_priv->display.initial_watermarks(pipe_config);
4702 	else if (pipe_config->update_wm_pre)
4703 		intel_update_watermarks(&crtc->base);
4704 }
4705 
4706 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4707 {
4708 	struct drm_device *dev = crtc->dev;
4709 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4710 	struct drm_plane *p;
4711 	int pipe = intel_crtc->pipe;
4712 
4713 	intel_crtc_dpms_overlay_disable(intel_crtc);
4714 
4715 	drm_for_each_plane_mask(p, dev, plane_mask)
4716 		to_intel_plane(p)->disable_plane(p, crtc);
4717 
4718 	/*
4719 	 * FIXME: Once we grow proper nuclear flip support out of this we need
4720 	 * to compute the mask of flip planes precisely. For the time being
4721 	 * consider this a flip to a NULL plane.
4722 	 */
4723 	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4724 }
4725 
4726 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4727 {
4728 	struct drm_device *dev = crtc->dev;
4729 	struct drm_i915_private *dev_priv = dev->dev_private;
4730 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4731 	struct intel_encoder *encoder;
4732 	int pipe = intel_crtc->pipe;
4733 	struct intel_crtc_state *pipe_config =
4734 		to_intel_crtc_state(crtc->state);
4735 
4736 	if (WARN_ON(intel_crtc->active))
4737 		return;
4738 
4739 	/*
4740 	 * Sometimes spurious CPU pipe underruns happen during FDI
4741 	 * training, at least with VGA+HDMI cloning. Suppress them.
4742 	 *
4743 	 * On ILK we get an occasional spurious CPU pipe underruns
4744 	 * between eDP port A enable and vdd enable. Also PCH port
4745 	 * enable seems to result in the occasional CPU pipe underrun.
4746 	 *
4747 	 * Spurious PCH underruns also occur during PCH enabling.
4748 	 */
4749 	if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4750 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4751 	if (intel_crtc->config->has_pch_encoder)
4752 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4753 
4754 	if (intel_crtc->config->has_pch_encoder)
4755 		intel_prepare_shared_dpll(intel_crtc);
4756 
4757 	if (intel_crtc->config->has_dp_encoder)
4758 		intel_dp_set_m_n(intel_crtc, M1_N1);
4759 
4760 	intel_set_pipe_timings(intel_crtc);
4761 	intel_set_pipe_src_size(intel_crtc);
4762 
4763 	if (intel_crtc->config->has_pch_encoder) {
4764 		intel_cpu_transcoder_set_m_n(intel_crtc,
4765 				     &intel_crtc->config->fdi_m_n, NULL);
4766 	}
4767 
4768 	ironlake_set_pipeconf(crtc);
4769 
4770 	intel_crtc->active = true;
4771 
4772 	for_each_encoder_on_crtc(dev, crtc, encoder)
4773 		if (encoder->pre_enable)
4774 			encoder->pre_enable(encoder);
4775 
4776 	if (intel_crtc->config->has_pch_encoder) {
4777 		/* Note: FDI PLL enabling _must_ be done before we enable the
4778 		 * cpu pipes, hence this is separate from all the other fdi/pch
4779 		 * enabling. */
4780 		ironlake_fdi_pll_enable(intel_crtc);
4781 	} else {
4782 		assert_fdi_tx_disabled(dev_priv, pipe);
4783 		assert_fdi_rx_disabled(dev_priv, pipe);
4784 	}
4785 
4786 	ironlake_pfit_enable(intel_crtc);
4787 
4788 	/*
4789 	 * On ILK+ LUT must be loaded before the pipe is running but with
4790 	 * clocks enabled
4791 	 */
4792 	intel_color_load_luts(&pipe_config->base);
4793 
4794 	if (dev_priv->display.initial_watermarks != NULL)
4795 		dev_priv->display.initial_watermarks(intel_crtc->config);
4796 	intel_enable_pipe(intel_crtc);
4797 
4798 	if (intel_crtc->config->has_pch_encoder)
4799 		ironlake_pch_enable(crtc);
4800 
4801 	assert_vblank_disabled(crtc);
4802 	drm_crtc_vblank_on(crtc);
4803 
4804 	for_each_encoder_on_crtc(dev, crtc, encoder)
4805 		encoder->enable(encoder);
4806 
4807 	if (HAS_PCH_CPT(dev))
4808 		cpt_verify_modeset(dev, intel_crtc->pipe);
4809 
4810 	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4811 	if (intel_crtc->config->has_pch_encoder)
4812 		intel_wait_for_vblank(dev, pipe);
4813 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4814 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4815 }
4816 
4817 /* IPS only exists on ULT machines and is tied to pipe A. */
4818 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4819 {
4820 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4821 }
4822 
4823 static void haswell_crtc_enable(struct drm_crtc *crtc)
4824 {
4825 	struct drm_device *dev = crtc->dev;
4826 	struct drm_i915_private *dev_priv = dev->dev_private;
4827 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4828 	struct intel_encoder *encoder;
4829 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4830 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4831 	struct intel_crtc_state *pipe_config =
4832 		to_intel_crtc_state(crtc->state);
4833 
4834 	if (WARN_ON(intel_crtc->active))
4835 		return;
4836 
4837 	if (intel_crtc->config->has_pch_encoder)
4838 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4839 						      false);
4840 
4841 	if (intel_crtc->config->shared_dpll)
4842 		intel_enable_shared_dpll(intel_crtc);
4843 
4844 	if (intel_crtc->config->has_dp_encoder)
4845 		intel_dp_set_m_n(intel_crtc, M1_N1);
4846 
4847 	if (!intel_crtc->config->has_dsi_encoder)
4848 		intel_set_pipe_timings(intel_crtc);
4849 
4850 	intel_set_pipe_src_size(intel_crtc);
4851 
4852 	if (cpu_transcoder != TRANSCODER_EDP &&
4853 	    !transcoder_is_dsi(cpu_transcoder)) {
4854 		I915_WRITE(PIPE_MULT(cpu_transcoder),
4855 			   intel_crtc->config->pixel_multiplier - 1);
4856 	}
4857 
4858 	if (intel_crtc->config->has_pch_encoder) {
4859 		intel_cpu_transcoder_set_m_n(intel_crtc,
4860 				     &intel_crtc->config->fdi_m_n, NULL);
4861 	}
4862 
4863 	if (!intel_crtc->config->has_dsi_encoder)
4864 		haswell_set_pipeconf(crtc);
4865 
4866 	haswell_set_pipemisc(crtc);
4867 
4868 	intel_color_set_csc(&pipe_config->base);
4869 
4870 	intel_crtc->active = true;
4871 
4872 	if (intel_crtc->config->has_pch_encoder)
4873 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4874 	else
4875 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4876 
4877 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4878 		if (encoder->pre_enable)
4879 			encoder->pre_enable(encoder);
4880 	}
4881 
4882 	if (intel_crtc->config->has_pch_encoder)
4883 		dev_priv->display.fdi_link_train(crtc);
4884 
4885 	if (!intel_crtc->config->has_dsi_encoder)
4886 		intel_ddi_enable_pipe_clock(intel_crtc);
4887 
4888 	if (INTEL_INFO(dev)->gen >= 9)
4889 		skylake_pfit_enable(intel_crtc);
4890 	else
4891 		ironlake_pfit_enable(intel_crtc);
4892 
4893 	/*
4894 	 * On ILK+ LUT must be loaded before the pipe is running but with
4895 	 * clocks enabled
4896 	 */
4897 	intel_color_load_luts(&pipe_config->base);
4898 
4899 	intel_ddi_set_pipe_settings(crtc);
4900 	if (!intel_crtc->config->has_dsi_encoder)
4901 		intel_ddi_enable_transcoder_func(crtc);
4902 
4903 	if (dev_priv->display.initial_watermarks != NULL)
4904 		dev_priv->display.initial_watermarks(pipe_config);
4905 	else
4906 		intel_update_watermarks(crtc);
4907 
4908 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
4909 	if (!intel_crtc->config->has_dsi_encoder)
4910 		intel_enable_pipe(intel_crtc);
4911 
4912 	if (intel_crtc->config->has_pch_encoder)
4913 		lpt_pch_enable(crtc);
4914 
4915 	if (intel_crtc->config->dp_encoder_is_mst)
4916 		intel_ddi_set_vc_payload_alloc(crtc, true);
4917 
4918 	assert_vblank_disabled(crtc);
4919 	drm_crtc_vblank_on(crtc);
4920 
4921 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4922 		encoder->enable(encoder);
4923 		intel_opregion_notify_encoder(encoder, true);
4924 	}
4925 
4926 	if (intel_crtc->config->has_pch_encoder) {
4927 		intel_wait_for_vblank(dev, pipe);
4928 		intel_wait_for_vblank(dev, pipe);
4929 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4930 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4931 						      true);
4932 	}
4933 
4934 	/* If we change the relative order between pipe/planes enabling, we need
4935 	 * to change the workaround. */
4936 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4937 	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4938 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4939 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4940 	}
4941 }
4942 
4943 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4944 {
4945 	struct drm_device *dev = crtc->base.dev;
4946 	struct drm_i915_private *dev_priv = dev->dev_private;
4947 	int pipe = crtc->pipe;
4948 
4949 	/* To avoid upsetting the power well on haswell only disable the pfit if
4950 	 * it's in use. The hw state code will make sure we get this right. */
4951 	if (force || crtc->config->pch_pfit.enabled) {
4952 		I915_WRITE(PF_CTL(pipe), 0);
4953 		I915_WRITE(PF_WIN_POS(pipe), 0);
4954 		I915_WRITE(PF_WIN_SZ(pipe), 0);
4955 	}
4956 }
4957 
4958 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4959 {
4960 	struct drm_device *dev = crtc->dev;
4961 	struct drm_i915_private *dev_priv = dev->dev_private;
4962 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4963 	struct intel_encoder *encoder;
4964 	int pipe = intel_crtc->pipe;
4965 
4966 	/*
4967 	 * Sometimes spurious CPU pipe underruns happen when the
4968 	 * pipe is already disabled, but FDI RX/TX is still enabled.
4969 	 * Happens at least with VGA+HDMI cloning. Suppress them.
4970 	 */
4971 	if (intel_crtc->config->has_pch_encoder) {
4972 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4973 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4974 	}
4975 
4976 	for_each_encoder_on_crtc(dev, crtc, encoder)
4977 		encoder->disable(encoder);
4978 
4979 	drm_crtc_vblank_off(crtc);
4980 	assert_vblank_disabled(crtc);
4981 
4982 	intel_disable_pipe(intel_crtc);
4983 
4984 	ironlake_pfit_disable(intel_crtc, false);
4985 
4986 	if (intel_crtc->config->has_pch_encoder)
4987 		ironlake_fdi_disable(crtc);
4988 
4989 	for_each_encoder_on_crtc(dev, crtc, encoder)
4990 		if (encoder->post_disable)
4991 			encoder->post_disable(encoder);
4992 
4993 	if (intel_crtc->config->has_pch_encoder) {
4994 		ironlake_disable_pch_transcoder(dev_priv, pipe);
4995 
4996 		if (HAS_PCH_CPT(dev)) {
4997 			i915_reg_t reg;
4998 			u32 temp;
4999 
5000 			/* disable TRANS_DP_CTL */
5001 			reg = TRANS_DP_CTL(pipe);
5002 			temp = I915_READ(reg);
5003 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5004 				  TRANS_DP_PORT_SEL_MASK);
5005 			temp |= TRANS_DP_PORT_SEL_NONE;
5006 			I915_WRITE(reg, temp);
5007 
5008 			/* disable DPLL_SEL */
5009 			temp = I915_READ(PCH_DPLL_SEL);
5010 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5011 			I915_WRITE(PCH_DPLL_SEL, temp);
5012 		}
5013 
5014 		ironlake_fdi_pll_disable(intel_crtc);
5015 	}
5016 
5017 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5018 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5019 }
5020 
5021 static void haswell_crtc_disable(struct drm_crtc *crtc)
5022 {
5023 	struct drm_device *dev = crtc->dev;
5024 	struct drm_i915_private *dev_priv = dev->dev_private;
5025 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5026 	struct intel_encoder *encoder;
5027 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5028 
5029 	if (intel_crtc->config->has_pch_encoder)
5030 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5031 						      false);
5032 
5033 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5034 		intel_opregion_notify_encoder(encoder, false);
5035 		encoder->disable(encoder);
5036 	}
5037 
5038 	drm_crtc_vblank_off(crtc);
5039 	assert_vblank_disabled(crtc);
5040 
5041 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
5042 	if (!intel_crtc->config->has_dsi_encoder)
5043 		intel_disable_pipe(intel_crtc);
5044 
5045 	if (intel_crtc->config->dp_encoder_is_mst)
5046 		intel_ddi_set_vc_payload_alloc(crtc, false);
5047 
5048 	if (!intel_crtc->config->has_dsi_encoder)
5049 		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5050 
5051 	if (INTEL_INFO(dev)->gen >= 9)
5052 		skylake_scaler_disable(intel_crtc);
5053 	else
5054 		ironlake_pfit_disable(intel_crtc, false);
5055 
5056 	if (!intel_crtc->config->has_dsi_encoder)
5057 		intel_ddi_disable_pipe_clock(intel_crtc);
5058 
5059 	for_each_encoder_on_crtc(dev, crtc, encoder)
5060 		if (encoder->post_disable)
5061 			encoder->post_disable(encoder);
5062 
5063 	if (intel_crtc->config->has_pch_encoder) {
5064 		lpt_disable_pch_transcoder(dev_priv);
5065 		lpt_disable_iclkip(dev_priv);
5066 		intel_ddi_fdi_disable(crtc);
5067 
5068 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5069 						      true);
5070 	}
5071 }
5072 
5073 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5074 {
5075 	struct drm_device *dev = crtc->base.dev;
5076 	struct drm_i915_private *dev_priv = dev->dev_private;
5077 	struct intel_crtc_state *pipe_config = crtc->config;
5078 
5079 	if (!pipe_config->gmch_pfit.control)
5080 		return;
5081 
5082 	/*
5083 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5084 	 * according to register description and PRM.
5085 	 */
5086 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5087 	assert_pipe_disabled(dev_priv, crtc->pipe);
5088 
5089 	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5090 	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5091 
5092 	/* Border color in case we don't scale up to the full screen. Black by
5093 	 * default, change to something else for debugging. */
5094 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5095 }
5096 
5097 static enum intel_display_power_domain port_to_power_domain(enum port port)
5098 {
5099 	switch (port) {
5100 	case PORT_A:
5101 		return POWER_DOMAIN_PORT_DDI_A_LANES;
5102 	case PORT_B:
5103 		return POWER_DOMAIN_PORT_DDI_B_LANES;
5104 	case PORT_C:
5105 		return POWER_DOMAIN_PORT_DDI_C_LANES;
5106 	case PORT_D:
5107 		return POWER_DOMAIN_PORT_DDI_D_LANES;
5108 	case PORT_E:
5109 		return POWER_DOMAIN_PORT_DDI_E_LANES;
5110 	default:
5111 		MISSING_CASE(port);
5112 		return POWER_DOMAIN_PORT_OTHER;
5113 	}
5114 }
5115 
5116 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5117 {
5118 	switch (port) {
5119 	case PORT_A:
5120 		return POWER_DOMAIN_AUX_A;
5121 	case PORT_B:
5122 		return POWER_DOMAIN_AUX_B;
5123 	case PORT_C:
5124 		return POWER_DOMAIN_AUX_C;
5125 	case PORT_D:
5126 		return POWER_DOMAIN_AUX_D;
5127 	case PORT_E:
5128 		/* FIXME: Check VBT for actual wiring of PORT E */
5129 		return POWER_DOMAIN_AUX_D;
5130 	default:
5131 		MISSING_CASE(port);
5132 		return POWER_DOMAIN_AUX_A;
5133 	}
5134 }
5135 
5136 enum intel_display_power_domain
5137 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5138 {
5139 	struct drm_device *dev = intel_encoder->base.dev;
5140 	struct intel_digital_port *intel_dig_port;
5141 
5142 	switch (intel_encoder->type) {
5143 	case INTEL_OUTPUT_UNKNOWN:
5144 		/* Only DDI platforms should ever use this output type */
5145 		WARN_ON_ONCE(!HAS_DDI(dev));
5146 	case INTEL_OUTPUT_DISPLAYPORT:
5147 	case INTEL_OUTPUT_HDMI:
5148 	case INTEL_OUTPUT_EDP:
5149 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5150 		return port_to_power_domain(intel_dig_port->port);
5151 	case INTEL_OUTPUT_DP_MST:
5152 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5153 		return port_to_power_domain(intel_dig_port->port);
5154 	case INTEL_OUTPUT_ANALOG:
5155 		return POWER_DOMAIN_PORT_CRT;
5156 	case INTEL_OUTPUT_DSI:
5157 		return POWER_DOMAIN_PORT_DSI;
5158 	default:
5159 		return POWER_DOMAIN_PORT_OTHER;
5160 	}
5161 }
5162 
5163 enum intel_display_power_domain
5164 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5165 {
5166 	struct drm_device *dev = intel_encoder->base.dev;
5167 	struct intel_digital_port *intel_dig_port;
5168 
5169 	switch (intel_encoder->type) {
5170 	case INTEL_OUTPUT_UNKNOWN:
5171 	case INTEL_OUTPUT_HDMI:
5172 		/*
5173 		 * Only DDI platforms should ever use these output types.
5174 		 * We can get here after the HDMI detect code has already set
5175 		 * the type of the shared encoder. Since we can't be sure
5176 		 * what's the status of the given connectors, play safe and
5177 		 * run the DP detection too.
5178 		 */
5179 		WARN_ON_ONCE(!HAS_DDI(dev));
5180 	case INTEL_OUTPUT_DISPLAYPORT:
5181 	case INTEL_OUTPUT_EDP:
5182 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5183 		return port_to_aux_power_domain(intel_dig_port->port);
5184 	case INTEL_OUTPUT_DP_MST:
5185 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5186 		return port_to_aux_power_domain(intel_dig_port->port);
5187 	default:
5188 		MISSING_CASE(intel_encoder->type);
5189 		return POWER_DOMAIN_AUX_A;
5190 	}
5191 }
5192 
5193 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5194 					    struct intel_crtc_state *crtc_state)
5195 {
5196 	struct drm_device *dev = crtc->dev;
5197 	struct drm_encoder *encoder;
5198 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5199 	enum i915_pipe pipe = intel_crtc->pipe;
5200 	unsigned long mask;
5201 	enum transcoder transcoder = crtc_state->cpu_transcoder;
5202 
5203 	if (!crtc_state->base.active)
5204 		return 0;
5205 
5206 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5207 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5208 	if (crtc_state->pch_pfit.enabled ||
5209 	    crtc_state->pch_pfit.force_thru)
5210 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5211 
5212 	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5213 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5214 
5215 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5216 	}
5217 
5218 	if (crtc_state->shared_dpll)
5219 		mask |= BIT(POWER_DOMAIN_PLLS);
5220 
5221 	return mask;
5222 }
5223 
5224 static unsigned long
5225 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5226 			       struct intel_crtc_state *crtc_state)
5227 {
5228 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5229 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5230 	enum intel_display_power_domain domain;
5231 	unsigned long domains, new_domains, old_domains;
5232 
5233 	old_domains = intel_crtc->enabled_power_domains;
5234 	intel_crtc->enabled_power_domains = new_domains =
5235 		get_crtc_power_domains(crtc, crtc_state);
5236 
5237 	domains = new_domains & ~old_domains;
5238 
5239 	for_each_power_domain(domain, domains)
5240 		intel_display_power_get(dev_priv, domain);
5241 
5242 	return old_domains & ~new_domains;
5243 }
5244 
5245 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5246 				      unsigned long domains)
5247 {
5248 	enum intel_display_power_domain domain;
5249 
5250 	for_each_power_domain(domain, domains)
5251 		intel_display_power_put(dev_priv, domain);
5252 }
5253 
5254 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5255 {
5256 	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5257 
5258 	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5259 	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5260 		return max_cdclk_freq;
5261 	else if (IS_CHERRYVIEW(dev_priv))
5262 		return max_cdclk_freq*95/100;
5263 	else if (INTEL_INFO(dev_priv)->gen < 4)
5264 		return 2*max_cdclk_freq*90/100;
5265 	else
5266 		return max_cdclk_freq*90/100;
5267 }
5268 
5269 static void intel_update_max_cdclk(struct drm_device *dev)
5270 {
5271 	struct drm_i915_private *dev_priv = dev->dev_private;
5272 
5273 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5274 		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5275 
5276 		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5277 			dev_priv->max_cdclk_freq = 675000;
5278 		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5279 			dev_priv->max_cdclk_freq = 540000;
5280 		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5281 			dev_priv->max_cdclk_freq = 450000;
5282 		else
5283 			dev_priv->max_cdclk_freq = 337500;
5284 	} else if (IS_BROXTON(dev)) {
5285 		dev_priv->max_cdclk_freq = 624000;
5286 	} else if (IS_BROADWELL(dev))  {
5287 		/*
5288 		 * FIXME with extra cooling we can allow
5289 		 * 540 MHz for ULX and 675 Mhz for ULT.
5290 		 * How can we know if extra cooling is
5291 		 * available? PCI ID, VTB, something else?
5292 		 */
5293 		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5294 			dev_priv->max_cdclk_freq = 450000;
5295 		else if (IS_BDW_ULX(dev))
5296 			dev_priv->max_cdclk_freq = 450000;
5297 		else if (IS_BDW_ULT(dev))
5298 			dev_priv->max_cdclk_freq = 540000;
5299 		else
5300 			dev_priv->max_cdclk_freq = 675000;
5301 	} else if (IS_CHERRYVIEW(dev)) {
5302 		dev_priv->max_cdclk_freq = 320000;
5303 	} else if (IS_VALLEYVIEW(dev)) {
5304 		dev_priv->max_cdclk_freq = 400000;
5305 	} else {
5306 		/* otherwise assume cdclk is fixed */
5307 		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5308 	}
5309 
5310 	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5311 
5312 	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5313 			 dev_priv->max_cdclk_freq);
5314 
5315 	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5316 			 dev_priv->max_dotclk_freq);
5317 }
5318 
5319 static void intel_update_cdclk(struct drm_device *dev)
5320 {
5321 	struct drm_i915_private *dev_priv = dev->dev_private;
5322 
5323 	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5324 	DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5325 			 dev_priv->cdclk_freq);
5326 
5327 	/*
5328 	 * Program the gmbus_freq based on the cdclk frequency.
5329 	 * BSpec erroneously claims we should aim for 4MHz, but
5330 	 * in fact 1MHz is the correct frequency.
5331 	 */
5332 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5333 		/*
5334 		 * Program the gmbus_freq based on the cdclk frequency.
5335 		 * BSpec erroneously claims we should aim for 4MHz, but
5336 		 * in fact 1MHz is the correct frequency.
5337 		 */
5338 		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5339 	}
5340 
5341 	if (dev_priv->max_cdclk_freq == 0)
5342 		intel_update_max_cdclk(dev);
5343 }
5344 
5345 static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency)
5346 {
5347 	uint32_t divider;
5348 	uint32_t ratio;
5349 	uint32_t current_freq;
5350 	int ret;
5351 
5352 	/* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5353 	switch (frequency) {
5354 	case 144000:
5355 		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5356 		ratio = BXT_DE_PLL_RATIO(60);
5357 		break;
5358 	case 288000:
5359 		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5360 		ratio = BXT_DE_PLL_RATIO(60);
5361 		break;
5362 	case 384000:
5363 		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5364 		ratio = BXT_DE_PLL_RATIO(60);
5365 		break;
5366 	case 576000:
5367 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5368 		ratio = BXT_DE_PLL_RATIO(60);
5369 		break;
5370 	case 624000:
5371 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5372 		ratio = BXT_DE_PLL_RATIO(65);
5373 		break;
5374 	case 19200:
5375 		/*
5376 		 * Bypass frequency with DE PLL disabled. Init ratio, divider
5377 		 * to suppress GCC warning.
5378 		 */
5379 		ratio = 0;
5380 		divider = 0;
5381 		break;
5382 	default:
5383 		DRM_ERROR("unsupported CDCLK freq %d", frequency);
5384 
5385 		return;
5386 	}
5387 
5388 	mutex_lock(&dev_priv->rps.hw_lock);
5389 	/* Inform power controller of upcoming frequency change */
5390 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5391 				      0x80000000);
5392 	mutex_unlock(&dev_priv->rps.hw_lock);
5393 
5394 	if (ret) {
5395 		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5396 			  ret, frequency);
5397 		return;
5398 	}
5399 
5400 	current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5401 	/* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5402 	current_freq = current_freq * 500 + 1000;
5403 
5404 	/*
5405 	 * DE PLL has to be disabled when
5406 	 * - setting to 19.2MHz (bypass, PLL isn't used)
5407 	 * - before setting to 624MHz (PLL needs toggling)
5408 	 * - before setting to any frequency from 624MHz (PLL needs toggling)
5409 	 */
5410 	if (frequency == 19200 || frequency == 624000 ||
5411 	    current_freq == 624000) {
5412 		I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5413 		/* Timeout 200us */
5414 		if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5415 			     1))
5416 			DRM_ERROR("timout waiting for DE PLL unlock\n");
5417 	}
5418 
5419 	if (frequency != 19200) {
5420 		uint32_t val;
5421 
5422 		val = I915_READ(BXT_DE_PLL_CTL);
5423 		val &= ~BXT_DE_PLL_RATIO_MASK;
5424 		val |= ratio;
5425 		I915_WRITE(BXT_DE_PLL_CTL, val);
5426 
5427 		I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5428 		/* Timeout 200us */
5429 		if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5430 			DRM_ERROR("timeout waiting for DE PLL lock\n");
5431 
5432 		val = I915_READ(CDCLK_CTL);
5433 		val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5434 		val |= divider;
5435 		/*
5436 		 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5437 		 * enable otherwise.
5438 		 */
5439 		val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5440 		if (frequency >= 500000)
5441 			val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5442 
5443 		val &= ~CDCLK_FREQ_DECIMAL_MASK;
5444 		/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5445 		val |= (frequency - 1000) / 500;
5446 		I915_WRITE(CDCLK_CTL, val);
5447 	}
5448 
5449 	mutex_lock(&dev_priv->rps.hw_lock);
5450 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5451 				      DIV_ROUND_UP(frequency, 25000));
5452 	mutex_unlock(&dev_priv->rps.hw_lock);
5453 
5454 	if (ret) {
5455 		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5456 			  ret, frequency);
5457 		return;
5458 	}
5459 
5460 	intel_update_cdclk(dev_priv->dev);
5461 }
5462 
5463 static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
5464 {
5465 	if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
5466 		return false;
5467 
5468 	/* TODO: Check for a valid CDCLK rate */
5469 
5470 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
5471 		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
5472 
5473 		return false;
5474 	}
5475 
5476 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
5477 		DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
5478 
5479 		return false;
5480 	}
5481 
5482 	return true;
5483 }
5484 
5485 bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
5486 {
5487 	return broxton_cdclk_is_enabled(dev_priv);
5488 }
5489 
5490 void broxton_init_cdclk(struct drm_i915_private *dev_priv)
5491 {
5492 	/* check if cd clock is enabled */
5493 	if (broxton_cdclk_is_enabled(dev_priv)) {
5494 		DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5495 		return;
5496 	}
5497 
5498 	DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
5499 
5500 	/*
5501 	 * FIXME:
5502 	 * - The initial CDCLK needs to be read from VBT.
5503 	 *   Need to make this change after VBT has changes for BXT.
5504 	 * - check if setting the max (or any) cdclk freq is really necessary
5505 	 *   here, it belongs to modeset time
5506 	 */
5507 	broxton_set_cdclk(dev_priv, 624000);
5508 
5509 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5510 	POSTING_READ(DBUF_CTL);
5511 
5512 	udelay(10);
5513 
5514 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5515 		DRM_ERROR("DBuf power enable timeout!\n");
5516 }
5517 
5518 void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
5519 {
5520 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5521 	POSTING_READ(DBUF_CTL);
5522 
5523 	udelay(10);
5524 
5525 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5526 		DRM_ERROR("DBuf power disable timeout!\n");
5527 
5528 	/* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5529 	broxton_set_cdclk(dev_priv, 19200);
5530 }
5531 
5532 static const struct skl_cdclk_entry {
5533 	unsigned int freq;
5534 	unsigned int vco;
5535 } skl_cdclk_frequencies[] = {
5536 	{ .freq = 308570, .vco = 8640 },
5537 	{ .freq = 337500, .vco = 8100 },
5538 	{ .freq = 432000, .vco = 8640 },
5539 	{ .freq = 450000, .vco = 8100 },
5540 	{ .freq = 540000, .vco = 8100 },
5541 	{ .freq = 617140, .vco = 8640 },
5542 	{ .freq = 675000, .vco = 8100 },
5543 };
5544 
5545 static unsigned int skl_cdclk_decimal(unsigned int freq)
5546 {
5547 	return (freq - 1000) / 500;
5548 }
5549 
5550 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5551 {
5552 	unsigned int i;
5553 
5554 	for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5555 		const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5556 
5557 		if (e->freq == freq)
5558 			return e->vco;
5559 	}
5560 
5561 	return 8100;
5562 }
5563 
5564 static void
5565 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5566 {
5567 	unsigned int min_freq;
5568 	u32 val;
5569 
5570 	/* select the minimum CDCLK before enabling DPLL 0 */
5571 	val = I915_READ(CDCLK_CTL);
5572 	val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5573 	val |= CDCLK_FREQ_337_308;
5574 
5575 	if (required_vco == 8640)
5576 		min_freq = 308570;
5577 	else
5578 		min_freq = 337500;
5579 
5580 	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5581 
5582 	I915_WRITE(CDCLK_CTL, val);
5583 	POSTING_READ(CDCLK_CTL);
5584 
5585 	/*
5586 	 * We always enable DPLL0 with the lowest link rate possible, but still
5587 	 * taking into account the VCO required to operate the eDP panel at the
5588 	 * desired frequency. The usual DP link rates operate with a VCO of
5589 	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5590 	 * The modeset code is responsible for the selection of the exact link
5591 	 * rate later on, with the constraint of choosing a frequency that
5592 	 * works with required_vco.
5593 	 */
5594 	val = I915_READ(DPLL_CTRL1);
5595 
5596 	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5597 		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5598 	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5599 	if (required_vco == 8640)
5600 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5601 					    SKL_DPLL0);
5602 	else
5603 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5604 					    SKL_DPLL0);
5605 
5606 	I915_WRITE(DPLL_CTRL1, val);
5607 	POSTING_READ(DPLL_CTRL1);
5608 
5609 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5610 
5611 	if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5612 		DRM_ERROR("DPLL0 not locked\n");
5613 }
5614 
5615 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5616 {
5617 	int ret;
5618 	u32 val;
5619 
5620 	/* inform PCU we want to change CDCLK */
5621 	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5622 	mutex_lock(&dev_priv->rps.hw_lock);
5623 	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5624 	mutex_unlock(&dev_priv->rps.hw_lock);
5625 
5626 	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5627 }
5628 
5629 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5630 {
5631 	unsigned int i;
5632 
5633 	for (i = 0; i < 15; i++) {
5634 		if (skl_cdclk_pcu_ready(dev_priv))
5635 			return true;
5636 		udelay(10);
5637 	}
5638 
5639 	return false;
5640 }
5641 
5642 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5643 {
5644 	struct drm_device *dev = dev_priv->dev;
5645 	u32 freq_select, pcu_ack;
5646 
5647 	DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5648 
5649 	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5650 		DRM_ERROR("failed to inform PCU about cdclk change\n");
5651 		return;
5652 	}
5653 
5654 	/* set CDCLK_CTL */
5655 	switch(freq) {
5656 	case 450000:
5657 	case 432000:
5658 		freq_select = CDCLK_FREQ_450_432;
5659 		pcu_ack = 1;
5660 		break;
5661 	case 540000:
5662 		freq_select = CDCLK_FREQ_540;
5663 		pcu_ack = 2;
5664 		break;
5665 	case 308570:
5666 	case 337500:
5667 	default:
5668 		freq_select = CDCLK_FREQ_337_308;
5669 		pcu_ack = 0;
5670 		break;
5671 	case 617140:
5672 	case 675000:
5673 		freq_select = CDCLK_FREQ_675_617;
5674 		pcu_ack = 3;
5675 		break;
5676 	}
5677 
5678 	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5679 	POSTING_READ(CDCLK_CTL);
5680 
5681 	/* inform PCU of the change */
5682 	mutex_lock(&dev_priv->rps.hw_lock);
5683 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5684 	mutex_unlock(&dev_priv->rps.hw_lock);
5685 
5686 	intel_update_cdclk(dev);
5687 }
5688 
5689 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5690 {
5691 	/* disable DBUF power */
5692 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5693 	POSTING_READ(DBUF_CTL);
5694 
5695 	udelay(10);
5696 
5697 	if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5698 		DRM_ERROR("DBuf power disable timeout\n");
5699 
5700 	/* disable DPLL0 */
5701 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5702 	if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5703 		DRM_ERROR("Couldn't disable DPLL0\n");
5704 }
5705 
5706 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5707 {
5708 	unsigned int required_vco;
5709 
5710 	/* DPLL0 not enabled (happens on early BIOS versions) */
5711 	if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5712 		/* enable DPLL0 */
5713 		required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5714 		skl_dpll0_enable(dev_priv, required_vco);
5715 	}
5716 
5717 	/* set CDCLK to the frequency the BIOS chose */
5718 	skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5719 
5720 	/* enable DBUF power */
5721 	I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5722 	POSTING_READ(DBUF_CTL);
5723 
5724 	udelay(10);
5725 
5726 	if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5727 		DRM_ERROR("DBuf power enable timeout\n");
5728 }
5729 
5730 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5731 {
5732 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5733 	uint32_t cdctl = I915_READ(CDCLK_CTL);
5734 	int freq = dev_priv->skl_boot_cdclk;
5735 
5736 	/*
5737 	 * check if the pre-os intialized the display
5738 	 * There is SWF18 scratchpad register defined which is set by the
5739 	 * pre-os which can be used by the OS drivers to check the status
5740 	 */
5741 	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5742 		goto sanitize;
5743 
5744 	/* Is PLL enabled and locked ? */
5745 	if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5746 		goto sanitize;
5747 
5748 	/* DPLL okay; verify the cdclock
5749 	 *
5750 	 * Noticed in some instances that the freq selection is correct but
5751 	 * decimal part is programmed wrong from BIOS where pre-os does not
5752 	 * enable display. Verify the same as well.
5753 	 */
5754 	if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5755 		/* All well; nothing to sanitize */
5756 		return false;
5757 sanitize:
5758 	/*
5759 	 * As of now initialize with max cdclk till
5760 	 * we get dynamic cdclk support
5761 	 * */
5762 	dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5763 	skl_init_cdclk(dev_priv);
5764 
5765 	/* we did have to sanitize */
5766 	return true;
5767 }
5768 
5769 /* Adjust CDclk dividers to allow high res or save power if possible */
5770 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5771 {
5772 	struct drm_i915_private *dev_priv = dev->dev_private;
5773 	u32 val, cmd;
5774 
5775 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5776 					!= dev_priv->cdclk_freq);
5777 
5778 	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5779 		cmd = 2;
5780 	else if (cdclk == 266667)
5781 		cmd = 1;
5782 	else
5783 		cmd = 0;
5784 
5785 	mutex_lock(&dev_priv->rps.hw_lock);
5786 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5787 	val &= ~DSPFREQGUAR_MASK;
5788 	val |= (cmd << DSPFREQGUAR_SHIFT);
5789 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5790 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5791 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5792 		     50)) {
5793 		DRM_ERROR("timed out waiting for CDclk change\n");
5794 	}
5795 	mutex_unlock(&dev_priv->rps.hw_lock);
5796 
5797 	mutex_lock(&dev_priv->sb_lock);
5798 
5799 	if (cdclk == 400000) {
5800 		u32 divider;
5801 
5802 		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5803 
5804 		/* adjust cdclk divider */
5805 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5806 		val &= ~CCK_FREQUENCY_VALUES;
5807 		val |= divider;
5808 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5809 
5810 		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5811 			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5812 			     50))
5813 			DRM_ERROR("timed out waiting for CDclk change\n");
5814 	}
5815 
5816 	/* adjust self-refresh exit latency value */
5817 	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5818 	val &= ~0x7f;
5819 
5820 	/*
5821 	 * For high bandwidth configs, we set a higher latency in the bunit
5822 	 * so that the core display fetch happens in time to avoid underruns.
5823 	 */
5824 	if (cdclk == 400000)
5825 		val |= 4500 / 250; /* 4.5 usec */
5826 	else
5827 		val |= 3000 / 250; /* 3.0 usec */
5828 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5829 
5830 	mutex_unlock(&dev_priv->sb_lock);
5831 
5832 	intel_update_cdclk(dev);
5833 }
5834 
5835 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5836 {
5837 	struct drm_i915_private *dev_priv = dev->dev_private;
5838 	u32 val, cmd;
5839 
5840 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5841 						!= dev_priv->cdclk_freq);
5842 
5843 	switch (cdclk) {
5844 	case 333333:
5845 	case 320000:
5846 	case 266667:
5847 	case 200000:
5848 		break;
5849 	default:
5850 		MISSING_CASE(cdclk);
5851 		return;
5852 	}
5853 
5854 	/*
5855 	 * Specs are full of misinformation, but testing on actual
5856 	 * hardware has shown that we just need to write the desired
5857 	 * CCK divider into the Punit register.
5858 	 */
5859 	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5860 
5861 	mutex_lock(&dev_priv->rps.hw_lock);
5862 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5863 	val &= ~DSPFREQGUAR_MASK_CHV;
5864 	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5865 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5866 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5867 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5868 		     50)) {
5869 		DRM_ERROR("timed out waiting for CDclk change\n");
5870 	}
5871 	mutex_unlock(&dev_priv->rps.hw_lock);
5872 
5873 	intel_update_cdclk(dev);
5874 }
5875 
5876 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5877 				 int max_pixclk)
5878 {
5879 	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5880 	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5881 
5882 	/*
5883 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
5884 	 *   200MHz
5885 	 *   267MHz
5886 	 *   320/333MHz (depends on HPLL freq)
5887 	 *   400MHz (VLV only)
5888 	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5889 	 * of the lower bin and adjust if needed.
5890 	 *
5891 	 * We seem to get an unstable or solid color picture at 200MHz.
5892 	 * Not sure what's wrong. For now use 200MHz only when all pipes
5893 	 * are off.
5894 	 */
5895 	if (!IS_CHERRYVIEW(dev_priv) &&
5896 	    max_pixclk > freq_320*limit/100)
5897 		return 400000;
5898 	else if (max_pixclk > 266667*limit/100)
5899 		return freq_320;
5900 	else if (max_pixclk > 0)
5901 		return 266667;
5902 	else
5903 		return 200000;
5904 }
5905 
5906 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
5907 			      int max_pixclk)
5908 {
5909 	/*
5910 	 * FIXME:
5911 	 * - remove the guardband, it's not needed on BXT
5912 	 * - set 19.2MHz bypass frequency if there are no active pipes
5913 	 */
5914 	if (max_pixclk > 576000*9/10)
5915 		return 624000;
5916 	else if (max_pixclk > 384000*9/10)
5917 		return 576000;
5918 	else if (max_pixclk > 288000*9/10)
5919 		return 384000;
5920 	else if (max_pixclk > 144000*9/10)
5921 		return 288000;
5922 	else
5923 		return 144000;
5924 }
5925 
5926 /* Compute the max pixel clock for new configuration. */
5927 static int intel_mode_max_pixclk(struct drm_device *dev,
5928 				 struct drm_atomic_state *state)
5929 {
5930 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5931 	struct drm_i915_private *dev_priv = dev->dev_private;
5932 	struct drm_crtc *crtc;
5933 	struct drm_crtc_state *crtc_state;
5934 	unsigned max_pixclk = 0, i;
5935 	enum i915_pipe pipe;
5936 
5937 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
5938 	       sizeof(intel_state->min_pixclk));
5939 
5940 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
5941 		int pixclk = 0;
5942 
5943 		if (crtc_state->enable)
5944 			pixclk = crtc_state->adjusted_mode.crtc_clock;
5945 
5946 		intel_state->min_pixclk[i] = pixclk;
5947 	}
5948 
5949 	for_each_pipe(dev_priv, pipe)
5950 		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
5951 
5952 	return max_pixclk;
5953 }
5954 
5955 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5956 {
5957 	struct drm_device *dev = state->dev;
5958 	struct drm_i915_private *dev_priv = dev->dev_private;
5959 	int max_pixclk = intel_mode_max_pixclk(dev, state);
5960 	struct intel_atomic_state *intel_state =
5961 		to_intel_atomic_state(state);
5962 
5963 	if (max_pixclk < 0)
5964 		return max_pixclk;
5965 
5966 	intel_state->cdclk = intel_state->dev_cdclk =
5967 		valleyview_calc_cdclk(dev_priv, max_pixclk);
5968 
5969 	if (!intel_state->active_crtcs)
5970 		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
5971 
5972 	return 0;
5973 }
5974 
5975 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5976 {
5977 	struct drm_device *dev = state->dev;
5978 	struct drm_i915_private *dev_priv = dev->dev_private;
5979 	int max_pixclk = intel_mode_max_pixclk(dev, state);
5980 	struct intel_atomic_state *intel_state =
5981 		to_intel_atomic_state(state);
5982 
5983 	if (max_pixclk < 0)
5984 		return max_pixclk;
5985 
5986 	intel_state->cdclk = intel_state->dev_cdclk =
5987 		broxton_calc_cdclk(dev_priv, max_pixclk);
5988 
5989 	if (!intel_state->active_crtcs)
5990 		intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
5991 
5992 	return 0;
5993 }
5994 
5995 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5996 {
5997 	unsigned int credits, default_credits;
5998 
5999 	if (IS_CHERRYVIEW(dev_priv))
6000 		default_credits = PFI_CREDIT(12);
6001 	else
6002 		default_credits = PFI_CREDIT(8);
6003 
6004 	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6005 		/* CHV suggested value is 31 or 63 */
6006 		if (IS_CHERRYVIEW(dev_priv))
6007 			credits = PFI_CREDIT_63;
6008 		else
6009 			credits = PFI_CREDIT(15);
6010 	} else {
6011 		credits = default_credits;
6012 	}
6013 
6014 	/*
6015 	 * WA - write default credits before re-programming
6016 	 * FIXME: should we also set the resend bit here?
6017 	 */
6018 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6019 		   default_credits);
6020 
6021 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6022 		   credits | PFI_CREDIT_RESEND);
6023 
6024 	/*
6025 	 * FIXME is this guaranteed to clear
6026 	 * immediately or should we poll for it?
6027 	 */
6028 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6029 }
6030 
6031 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6032 {
6033 	struct drm_device *dev = old_state->dev;
6034 	struct drm_i915_private *dev_priv = dev->dev_private;
6035 	struct intel_atomic_state *old_intel_state =
6036 		to_intel_atomic_state(old_state);
6037 	unsigned req_cdclk = old_intel_state->dev_cdclk;
6038 
6039 	/*
6040 	 * FIXME: We can end up here with all power domains off, yet
6041 	 * with a CDCLK frequency other than the minimum. To account
6042 	 * for this take the PIPE-A power domain, which covers the HW
6043 	 * blocks needed for the following programming. This can be
6044 	 * removed once it's guaranteed that we get here either with
6045 	 * the minimum CDCLK set, or the required power domains
6046 	 * enabled.
6047 	 */
6048 	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6049 
6050 	if (IS_CHERRYVIEW(dev))
6051 		cherryview_set_cdclk(dev, req_cdclk);
6052 	else
6053 		valleyview_set_cdclk(dev, req_cdclk);
6054 
6055 	vlv_program_pfi_credits(dev_priv);
6056 
6057 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6058 }
6059 
6060 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6061 {
6062 	struct drm_device *dev = crtc->dev;
6063 	struct drm_i915_private *dev_priv = to_i915(dev);
6064 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6065 	struct intel_encoder *encoder;
6066 	struct intel_crtc_state *pipe_config =
6067 		to_intel_crtc_state(crtc->state);
6068 	int pipe = intel_crtc->pipe;
6069 
6070 	if (WARN_ON(intel_crtc->active))
6071 		return;
6072 
6073 	if (intel_crtc->config->has_dp_encoder)
6074 		intel_dp_set_m_n(intel_crtc, M1_N1);
6075 
6076 	intel_set_pipe_timings(intel_crtc);
6077 	intel_set_pipe_src_size(intel_crtc);
6078 
6079 	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6080 		struct drm_i915_private *dev_priv = dev->dev_private;
6081 
6082 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6083 		I915_WRITE(CHV_CANVAS(pipe), 0);
6084 	}
6085 
6086 	i9xx_set_pipeconf(intel_crtc);
6087 
6088 	intel_crtc->active = true;
6089 
6090 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6091 
6092 	for_each_encoder_on_crtc(dev, crtc, encoder)
6093 		if (encoder->pre_pll_enable)
6094 			encoder->pre_pll_enable(encoder);
6095 
6096 	if (IS_CHERRYVIEW(dev)) {
6097 		chv_prepare_pll(intel_crtc, intel_crtc->config);
6098 		chv_enable_pll(intel_crtc, intel_crtc->config);
6099 	} else {
6100 		vlv_prepare_pll(intel_crtc, intel_crtc->config);
6101 		vlv_enable_pll(intel_crtc, intel_crtc->config);
6102 	}
6103 
6104 	for_each_encoder_on_crtc(dev, crtc, encoder)
6105 		if (encoder->pre_enable)
6106 			encoder->pre_enable(encoder);
6107 
6108 	i9xx_pfit_enable(intel_crtc);
6109 
6110 	intel_color_load_luts(&pipe_config->base);
6111 
6112 	intel_update_watermarks(crtc);
6113 	intel_enable_pipe(intel_crtc);
6114 
6115 	assert_vblank_disabled(crtc);
6116 	drm_crtc_vblank_on(crtc);
6117 
6118 	for_each_encoder_on_crtc(dev, crtc, encoder)
6119 		encoder->enable(encoder);
6120 }
6121 
6122 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6123 {
6124 	struct drm_device *dev = crtc->base.dev;
6125 	struct drm_i915_private *dev_priv = dev->dev_private;
6126 
6127 	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6128 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6129 }
6130 
6131 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6132 {
6133 	struct drm_device *dev = crtc->dev;
6134 	struct drm_i915_private *dev_priv = to_i915(dev);
6135 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6136 	struct intel_encoder *encoder;
6137 	struct intel_crtc_state *pipe_config =
6138 		to_intel_crtc_state(crtc->state);
6139 	enum i915_pipe pipe = intel_crtc->pipe;
6140 
6141 	if (WARN_ON(intel_crtc->active))
6142 		return;
6143 
6144 	i9xx_set_pll_dividers(intel_crtc);
6145 
6146 	if (intel_crtc->config->has_dp_encoder)
6147 		intel_dp_set_m_n(intel_crtc, M1_N1);
6148 
6149 	intel_set_pipe_timings(intel_crtc);
6150 	intel_set_pipe_src_size(intel_crtc);
6151 
6152 	i9xx_set_pipeconf(intel_crtc);
6153 
6154 	intel_crtc->active = true;
6155 
6156 	if (!IS_GEN2(dev))
6157 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6158 
6159 	for_each_encoder_on_crtc(dev, crtc, encoder)
6160 		if (encoder->pre_enable)
6161 			encoder->pre_enable(encoder);
6162 
6163 	i9xx_enable_pll(intel_crtc);
6164 
6165 	i9xx_pfit_enable(intel_crtc);
6166 
6167 	intel_color_load_luts(&pipe_config->base);
6168 
6169 	intel_update_watermarks(crtc);
6170 	intel_enable_pipe(intel_crtc);
6171 
6172 	assert_vblank_disabled(crtc);
6173 	drm_crtc_vblank_on(crtc);
6174 
6175 	for_each_encoder_on_crtc(dev, crtc, encoder)
6176 		encoder->enable(encoder);
6177 }
6178 
6179 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6180 {
6181 	struct drm_device *dev = crtc->base.dev;
6182 	struct drm_i915_private *dev_priv = dev->dev_private;
6183 
6184 	if (!crtc->config->gmch_pfit.control)
6185 		return;
6186 
6187 	assert_pipe_disabled(dev_priv, crtc->pipe);
6188 
6189 	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6190 			 I915_READ(PFIT_CONTROL));
6191 	I915_WRITE(PFIT_CONTROL, 0);
6192 }
6193 
6194 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6195 {
6196 	struct drm_device *dev = crtc->dev;
6197 	struct drm_i915_private *dev_priv = dev->dev_private;
6198 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6199 	struct intel_encoder *encoder;
6200 	int pipe = intel_crtc->pipe;
6201 
6202 	/*
6203 	 * On gen2 planes are double buffered but the pipe isn't, so we must
6204 	 * wait for planes to fully turn off before disabling the pipe.
6205 	 */
6206 	if (IS_GEN2(dev))
6207 		intel_wait_for_vblank(dev, pipe);
6208 
6209 	for_each_encoder_on_crtc(dev, crtc, encoder)
6210 		encoder->disable(encoder);
6211 
6212 	drm_crtc_vblank_off(crtc);
6213 	assert_vblank_disabled(crtc);
6214 
6215 	intel_disable_pipe(intel_crtc);
6216 
6217 	i9xx_pfit_disable(intel_crtc);
6218 
6219 	for_each_encoder_on_crtc(dev, crtc, encoder)
6220 		if (encoder->post_disable)
6221 			encoder->post_disable(encoder);
6222 
6223 	if (!intel_crtc->config->has_dsi_encoder) {
6224 		if (IS_CHERRYVIEW(dev))
6225 			chv_disable_pll(dev_priv, pipe);
6226 		else if (IS_VALLEYVIEW(dev))
6227 			vlv_disable_pll(dev_priv, pipe);
6228 		else
6229 			i9xx_disable_pll(intel_crtc);
6230 	}
6231 
6232 	for_each_encoder_on_crtc(dev, crtc, encoder)
6233 		if (encoder->post_pll_disable)
6234 			encoder->post_pll_disable(encoder);
6235 
6236 	if (!IS_GEN2(dev))
6237 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6238 }
6239 
6240 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6241 {
6242 	struct intel_encoder *encoder;
6243 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6244 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6245 	enum intel_display_power_domain domain;
6246 	unsigned long domains;
6247 
6248 	if (!intel_crtc->active)
6249 		return;
6250 
6251 	if (to_intel_plane_state(crtc->primary->state)->visible) {
6252 		WARN_ON(intel_crtc->unpin_work);
6253 
6254 		intel_pre_disable_primary_noatomic(crtc);
6255 
6256 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6257 		to_intel_plane_state(crtc->primary->state)->visible = false;
6258 	}
6259 
6260 	dev_priv->display.crtc_disable(crtc);
6261 
6262 	DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
6263 		      crtc->base.id);
6264 
6265 	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6266 	crtc->state->active = false;
6267 	intel_crtc->active = false;
6268 	crtc->enabled = false;
6269 	crtc->state->connector_mask = 0;
6270 	crtc->state->encoder_mask = 0;
6271 
6272 	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6273 		encoder->base.crtc = NULL;
6274 
6275 	intel_fbc_disable(intel_crtc);
6276 	intel_update_watermarks(crtc);
6277 	intel_disable_shared_dpll(intel_crtc);
6278 
6279 	domains = intel_crtc->enabled_power_domains;
6280 	for_each_power_domain(domain, domains)
6281 		intel_display_power_put(dev_priv, domain);
6282 	intel_crtc->enabled_power_domains = 0;
6283 
6284 	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6285 	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6286 }
6287 
6288 /*
6289  * turn all crtc's off, but do not adjust state
6290  * This has to be paired with a call to intel_modeset_setup_hw_state.
6291  */
6292 int intel_display_suspend(struct drm_device *dev)
6293 {
6294 	struct drm_i915_private *dev_priv = to_i915(dev);
6295 	struct drm_atomic_state *state;
6296 	int ret;
6297 
6298 	state = drm_atomic_helper_suspend(dev);
6299 	ret = PTR_ERR_OR_ZERO(state);
6300 	if (ret)
6301 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6302 	else
6303 		dev_priv->modeset_restore_state = state;
6304 	return ret;
6305 }
6306 
6307 void intel_encoder_destroy(struct drm_encoder *encoder)
6308 {
6309 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6310 
6311 	drm_encoder_cleanup(encoder);
6312 	kfree(intel_encoder);
6313 }
6314 
6315 /* Cross check the actual hw state with our own modeset state tracking (and it's
6316  * internal consistency). */
6317 static void intel_connector_verify_state(struct intel_connector *connector)
6318 {
6319 	struct drm_crtc *crtc = connector->base.state->crtc;
6320 
6321 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6322 		      connector->base.base.id,
6323 		      connector->base.name);
6324 
6325 	if (connector->get_hw_state(connector)) {
6326 		struct intel_encoder *encoder = connector->encoder;
6327 		struct drm_connector_state *conn_state = connector->base.state;
6328 
6329 		I915_STATE_WARN(!crtc,
6330 			 "connector enabled without attached crtc\n");
6331 
6332 		if (!crtc)
6333 			return;
6334 
6335 		I915_STATE_WARN(!crtc->state->active,
6336 		      "connector is active, but attached crtc isn't\n");
6337 
6338 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6339 			return;
6340 
6341 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6342 			"atomic encoder doesn't match attached encoder\n");
6343 
6344 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6345 			"attached encoder crtc differs from connector crtc\n");
6346 	} else {
6347 		I915_STATE_WARN(crtc && crtc->state->active,
6348 			"attached crtc is active, but connector isn't\n");
6349 		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6350 			"best encoder set without crtc!\n");
6351 	}
6352 }
6353 
6354 int intel_connector_init(struct intel_connector *connector)
6355 {
6356 	drm_atomic_helper_connector_reset(&connector->base);
6357 
6358 	if (!connector->base.state)
6359 		return -ENOMEM;
6360 
6361 	return 0;
6362 }
6363 
6364 struct intel_connector *intel_connector_alloc(void)
6365 {
6366 	struct intel_connector *connector;
6367 
6368 	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6369 	if (!connector)
6370 		return NULL;
6371 
6372 	if (intel_connector_init(connector) < 0) {
6373 		kfree(connector);
6374 		return NULL;
6375 	}
6376 
6377 	return connector;
6378 }
6379 
6380 /* Simple connector->get_hw_state implementation for encoders that support only
6381  * one connector and no cloning and hence the encoder state determines the state
6382  * of the connector. */
6383 bool intel_connector_get_hw_state(struct intel_connector *connector)
6384 {
6385 	enum i915_pipe pipe = 0;
6386 	struct intel_encoder *encoder = connector->encoder;
6387 
6388 	return encoder->get_hw_state(encoder, &pipe);
6389 }
6390 
6391 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6392 {
6393 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6394 		return crtc_state->fdi_lanes;
6395 
6396 	return 0;
6397 }
6398 
6399 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe,
6400 				     struct intel_crtc_state *pipe_config)
6401 {
6402 	struct drm_atomic_state *state = pipe_config->base.state;
6403 	struct intel_crtc *other_crtc;
6404 	struct intel_crtc_state *other_crtc_state;
6405 
6406 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6407 		      pipe_name(pipe), pipe_config->fdi_lanes);
6408 	if (pipe_config->fdi_lanes > 4) {
6409 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6410 			      pipe_name(pipe), pipe_config->fdi_lanes);
6411 		return -EINVAL;
6412 	}
6413 
6414 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6415 		if (pipe_config->fdi_lanes > 2) {
6416 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6417 				      pipe_config->fdi_lanes);
6418 			return -EINVAL;
6419 		} else {
6420 			return 0;
6421 		}
6422 	}
6423 
6424 	if (INTEL_INFO(dev)->num_pipes == 2)
6425 		return 0;
6426 
6427 	/* Ivybridge 3 pipe is really complicated */
6428 	switch (pipe) {
6429 	case PIPE_A:
6430 		return 0;
6431 	case PIPE_B:
6432 		if (pipe_config->fdi_lanes <= 2)
6433 			return 0;
6434 
6435 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6436 		other_crtc_state =
6437 			intel_atomic_get_crtc_state(state, other_crtc);
6438 		if (IS_ERR(other_crtc_state))
6439 			return PTR_ERR(other_crtc_state);
6440 
6441 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6442 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6443 				      pipe_name(pipe), pipe_config->fdi_lanes);
6444 			return -EINVAL;
6445 		}
6446 		return 0;
6447 	case PIPE_C:
6448 		if (pipe_config->fdi_lanes > 2) {
6449 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6450 				      pipe_name(pipe), pipe_config->fdi_lanes);
6451 			return -EINVAL;
6452 		}
6453 
6454 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6455 		other_crtc_state =
6456 			intel_atomic_get_crtc_state(state, other_crtc);
6457 		if (IS_ERR(other_crtc_state))
6458 			return PTR_ERR(other_crtc_state);
6459 
6460 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6461 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6462 			return -EINVAL;
6463 		}
6464 		return 0;
6465 	default:
6466 		BUG();
6467 	}
6468 }
6469 
6470 #define RETRY 1
6471 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6472 				       struct intel_crtc_state *pipe_config)
6473 {
6474 	struct drm_device *dev = intel_crtc->base.dev;
6475 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6476 	int lane, link_bw, fdi_dotclock, ret;
6477 	bool needs_recompute = false;
6478 
6479 retry:
6480 	/* FDI is a binary signal running at ~2.7GHz, encoding
6481 	 * each output octet as 10 bits. The actual frequency
6482 	 * is stored as a divider into a 100MHz clock, and the
6483 	 * mode pixel clock is stored in units of 1KHz.
6484 	 * Hence the bw of each lane in terms of the mode signal
6485 	 * is:
6486 	 */
6487 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6488 
6489 	fdi_dotclock = adjusted_mode->crtc_clock;
6490 
6491 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6492 					   pipe_config->pipe_bpp);
6493 
6494 	pipe_config->fdi_lanes = lane;
6495 
6496 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6497 			       link_bw, &pipe_config->fdi_m_n);
6498 
6499 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6500 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6501 		pipe_config->pipe_bpp -= 2*3;
6502 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6503 			      pipe_config->pipe_bpp);
6504 		needs_recompute = true;
6505 		pipe_config->bw_constrained = true;
6506 
6507 		goto retry;
6508 	}
6509 
6510 	if (needs_recompute)
6511 		return RETRY;
6512 
6513 	return ret;
6514 }
6515 
6516 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6517 				     struct intel_crtc_state *pipe_config)
6518 {
6519 	if (pipe_config->pipe_bpp > 24)
6520 		return false;
6521 
6522 	/* HSW can handle pixel rate up to cdclk? */
6523 	if (IS_HASWELL(dev_priv))
6524 		return true;
6525 
6526 	/*
6527 	 * We compare against max which means we must take
6528 	 * the increased cdclk requirement into account when
6529 	 * calculating the new cdclk.
6530 	 *
6531 	 * Should measure whether using a lower cdclk w/o IPS
6532 	 */
6533 	return ilk_pipe_pixel_rate(pipe_config) <=
6534 		dev_priv->max_cdclk_freq * 95 / 100;
6535 }
6536 
6537 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6538 				   struct intel_crtc_state *pipe_config)
6539 {
6540 	struct drm_device *dev = crtc->base.dev;
6541 	struct drm_i915_private *dev_priv = dev->dev_private;
6542 
6543 	pipe_config->ips_enabled = i915.enable_ips &&
6544 		hsw_crtc_supports_ips(crtc) &&
6545 		pipe_config_supports_ips(dev_priv, pipe_config);
6546 }
6547 
6548 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6549 {
6550 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6551 
6552 	/* GDG double wide on either pipe, otherwise pipe A only */
6553 	return INTEL_INFO(dev_priv)->gen < 4 &&
6554 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6555 }
6556 
6557 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6558 				     struct intel_crtc_state *pipe_config)
6559 {
6560 	struct drm_device *dev = crtc->base.dev;
6561 	struct drm_i915_private *dev_priv = dev->dev_private;
6562 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6563 
6564 	/* FIXME should check pixel clock limits on all platforms */
6565 	if (INTEL_INFO(dev)->gen < 4) {
6566 		int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6567 
6568 		/*
6569 		 * Enable double wide mode when the dot clock
6570 		 * is > 90% of the (display) core speed.
6571 		 */
6572 		if (intel_crtc_supports_double_wide(crtc) &&
6573 		    adjusted_mode->crtc_clock > clock_limit) {
6574 			clock_limit *= 2;
6575 			pipe_config->double_wide = true;
6576 		}
6577 
6578 		if (adjusted_mode->crtc_clock > clock_limit) {
6579 			DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6580 				      adjusted_mode->crtc_clock, clock_limit,
6581 				      yesno(pipe_config->double_wide));
6582 			return -EINVAL;
6583 		}
6584 	}
6585 
6586 	/*
6587 	 * Pipe horizontal size must be even in:
6588 	 * - DVO ganged mode
6589 	 * - LVDS dual channel mode
6590 	 * - Double wide pipe
6591 	 */
6592 	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6593 	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6594 		pipe_config->pipe_src_w &= ~1;
6595 
6596 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6597 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6598 	 */
6599 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6600 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6601 		return -EINVAL;
6602 
6603 	if (HAS_IPS(dev))
6604 		hsw_compute_ips_config(crtc, pipe_config);
6605 
6606 	if (pipe_config->has_pch_encoder)
6607 		return ironlake_fdi_compute_config(crtc, pipe_config);
6608 
6609 	return 0;
6610 }
6611 
6612 static int skylake_get_display_clock_speed(struct drm_device *dev)
6613 {
6614 	struct drm_i915_private *dev_priv = to_i915(dev);
6615 	uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6616 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6617 	uint32_t linkrate;
6618 
6619 	if (!(lcpll1 & LCPLL_PLL_ENABLE))
6620 		return 24000; /* 24MHz is the cd freq with NSSC ref */
6621 
6622 	if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6623 		return 540000;
6624 
6625 	linkrate = (I915_READ(DPLL_CTRL1) &
6626 		    DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6627 
6628 	if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6629 	    linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6630 		/* vco 8640 */
6631 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6632 		case CDCLK_FREQ_450_432:
6633 			return 432000;
6634 		case CDCLK_FREQ_337_308:
6635 			return 308570;
6636 		case CDCLK_FREQ_675_617:
6637 			return 617140;
6638 		default:
6639 			WARN(1, "Unknown cd freq selection\n");
6640 		}
6641 	} else {
6642 		/* vco 8100 */
6643 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6644 		case CDCLK_FREQ_450_432:
6645 			return 450000;
6646 		case CDCLK_FREQ_337_308:
6647 			return 337500;
6648 		case CDCLK_FREQ_675_617:
6649 			return 675000;
6650 		default:
6651 			WARN(1, "Unknown cd freq selection\n");
6652 		}
6653 	}
6654 
6655 	/* error case, do as if DPLL0 isn't enabled */
6656 	return 24000;
6657 }
6658 
6659 static int broxton_get_display_clock_speed(struct drm_device *dev)
6660 {
6661 	struct drm_i915_private *dev_priv = to_i915(dev);
6662 	uint32_t cdctl = I915_READ(CDCLK_CTL);
6663 	uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6664 	uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6665 	int cdclk;
6666 
6667 	if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6668 		return 19200;
6669 
6670 	cdclk = 19200 * pll_ratio / 2;
6671 
6672 	switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6673 	case BXT_CDCLK_CD2X_DIV_SEL_1:
6674 		return cdclk;  /* 576MHz or 624MHz */
6675 	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6676 		return cdclk * 2 / 3; /* 384MHz */
6677 	case BXT_CDCLK_CD2X_DIV_SEL_2:
6678 		return cdclk / 2; /* 288MHz */
6679 	case BXT_CDCLK_CD2X_DIV_SEL_4:
6680 		return cdclk / 4; /* 144MHz */
6681 	}
6682 
6683 	/* error case, do as if DE PLL isn't enabled */
6684 	return 19200;
6685 }
6686 
6687 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6688 {
6689 	struct drm_i915_private *dev_priv = dev->dev_private;
6690 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6691 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6692 
6693 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6694 		return 800000;
6695 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6696 		return 450000;
6697 	else if (freq == LCPLL_CLK_FREQ_450)
6698 		return 450000;
6699 	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6700 		return 540000;
6701 	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6702 		return 337500;
6703 	else
6704 		return 675000;
6705 }
6706 
6707 static int haswell_get_display_clock_speed(struct drm_device *dev)
6708 {
6709 	struct drm_i915_private *dev_priv = dev->dev_private;
6710 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6711 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6712 
6713 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6714 		return 800000;
6715 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6716 		return 450000;
6717 	else if (freq == LCPLL_CLK_FREQ_450)
6718 		return 450000;
6719 	else if (IS_HSW_ULT(dev))
6720 		return 337500;
6721 	else
6722 		return 540000;
6723 }
6724 
6725 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6726 {
6727 	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6728 				      CCK_DISPLAY_CLOCK_CONTROL);
6729 }
6730 
6731 static int ilk_get_display_clock_speed(struct drm_device *dev)
6732 {
6733 	return 450000;
6734 }
6735 
6736 static int i945_get_display_clock_speed(struct drm_device *dev)
6737 {
6738 	return 400000;
6739 }
6740 
6741 static int i915_get_display_clock_speed(struct drm_device *dev)
6742 {
6743 	return 333333;
6744 }
6745 
6746 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6747 {
6748 	return 200000;
6749 }
6750 
6751 static int pnv_get_display_clock_speed(struct drm_device *dev)
6752 {
6753 	u16 gcfgc = 0;
6754 
6755 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6756 
6757 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6758 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6759 		return 266667;
6760 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6761 		return 333333;
6762 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6763 		return 444444;
6764 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6765 		return 200000;
6766 	default:
6767 		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6768 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6769 		return 133333;
6770 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6771 		return 166667;
6772 	}
6773 }
6774 
6775 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6776 {
6777 	u16 gcfgc = 0;
6778 
6779 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6780 
6781 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6782 		return 133333;
6783 	else {
6784 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6785 		case GC_DISPLAY_CLOCK_333_MHZ:
6786 			return 333333;
6787 		default:
6788 		case GC_DISPLAY_CLOCK_190_200_MHZ:
6789 			return 190000;
6790 		}
6791 	}
6792 }
6793 
6794 static int i865_get_display_clock_speed(struct drm_device *dev)
6795 {
6796 	return 266667;
6797 }
6798 
6799 static int i85x_get_display_clock_speed(struct drm_device *dev)
6800 {
6801 	u16 hpllcc = 0;
6802 
6803 	/*
6804 	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6805 	 * encoding is different :(
6806 	 * FIXME is this the right way to detect 852GM/852GMV?
6807 	 */
6808 	if (dev->pdev->revision == 0x1)
6809 		return 133333;
6810 
6811 #if 0
6812 	pci_bus_read_config_word(dev->pdev->bus,
6813 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6814 #endif
6815 
6816 	/* Assume that the hardware is in the high speed state.  This
6817 	 * should be the default.
6818 	 */
6819 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6820 	case GC_CLOCK_133_200:
6821 	case GC_CLOCK_133_200_2:
6822 	case GC_CLOCK_100_200:
6823 		return 200000;
6824 	case GC_CLOCK_166_250:
6825 		return 250000;
6826 	case GC_CLOCK_100_133:
6827 		return 133333;
6828 	case GC_CLOCK_133_266:
6829 	case GC_CLOCK_133_266_2:
6830 	case GC_CLOCK_166_266:
6831 		return 266667;
6832 	}
6833 
6834 	/* Shouldn't happen */
6835 	return 0;
6836 }
6837 
6838 static int i830_get_display_clock_speed(struct drm_device *dev)
6839 {
6840 	return 133333;
6841 }
6842 
6843 static unsigned int intel_hpll_vco(struct drm_device *dev)
6844 {
6845 	struct drm_i915_private *dev_priv = dev->dev_private;
6846 	static const unsigned int blb_vco[8] = {
6847 		[0] = 3200000,
6848 		[1] = 4000000,
6849 		[2] = 5333333,
6850 		[3] = 4800000,
6851 		[4] = 6400000,
6852 	};
6853 	static const unsigned int pnv_vco[8] = {
6854 		[0] = 3200000,
6855 		[1] = 4000000,
6856 		[2] = 5333333,
6857 		[3] = 4800000,
6858 		[4] = 2666667,
6859 	};
6860 	static const unsigned int cl_vco[8] = {
6861 		[0] = 3200000,
6862 		[1] = 4000000,
6863 		[2] = 5333333,
6864 		[3] = 6400000,
6865 		[4] = 3333333,
6866 		[5] = 3566667,
6867 		[6] = 4266667,
6868 	};
6869 	static const unsigned int elk_vco[8] = {
6870 		[0] = 3200000,
6871 		[1] = 4000000,
6872 		[2] = 5333333,
6873 		[3] = 4800000,
6874 	};
6875 	static const unsigned int ctg_vco[8] = {
6876 		[0] = 3200000,
6877 		[1] = 4000000,
6878 		[2] = 5333333,
6879 		[3] = 6400000,
6880 		[4] = 2666667,
6881 		[5] = 4266667,
6882 	};
6883 	const unsigned int *vco_table;
6884 	unsigned int vco;
6885 	uint8_t tmp = 0;
6886 
6887 	/* FIXME other chipsets? */
6888 	if (IS_GM45(dev))
6889 		vco_table = ctg_vco;
6890 	else if (IS_G4X(dev))
6891 		vco_table = elk_vco;
6892 	else if (IS_CRESTLINE(dev))
6893 		vco_table = cl_vco;
6894 	else if (IS_PINEVIEW(dev))
6895 		vco_table = pnv_vco;
6896 	else if (IS_G33(dev))
6897 		vco_table = blb_vco;
6898 	else
6899 		return 0;
6900 
6901 	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6902 
6903 	vco = vco_table[tmp & 0x7];
6904 	if (vco == 0)
6905 		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6906 	else
6907 		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6908 
6909 	return vco;
6910 }
6911 
6912 static int gm45_get_display_clock_speed(struct drm_device *dev)
6913 {
6914 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6915 	uint16_t tmp = 0;
6916 
6917 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6918 
6919 	cdclk_sel = (tmp >> 12) & 0x1;
6920 
6921 	switch (vco) {
6922 	case 2666667:
6923 	case 4000000:
6924 	case 5333333:
6925 		return cdclk_sel ? 333333 : 222222;
6926 	case 3200000:
6927 		return cdclk_sel ? 320000 : 228571;
6928 	default:
6929 		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
6930 		return 222222;
6931 	}
6932 }
6933 
6934 static int i965gm_get_display_clock_speed(struct drm_device *dev)
6935 {
6936 	static const uint8_t div_3200[] = { 16, 10,  8 };
6937 	static const uint8_t div_4000[] = { 20, 12, 10 };
6938 	static const uint8_t div_5333[] = { 24, 16, 14 };
6939 	const uint8_t *div_table;
6940 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6941 	uint16_t tmp = 0;
6942 
6943 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6944 
6945 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
6946 
6947 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
6948 		goto fail;
6949 
6950 	switch (vco) {
6951 	case 3200000:
6952 		div_table = div_3200;
6953 		break;
6954 	case 4000000:
6955 		div_table = div_4000;
6956 		break;
6957 	case 5333333:
6958 		div_table = div_5333;
6959 		break;
6960 	default:
6961 		goto fail;
6962 	}
6963 
6964 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6965 
6966 fail:
6967 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
6968 	return 200000;
6969 }
6970 
6971 static int g33_get_display_clock_speed(struct drm_device *dev)
6972 {
6973 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
6974 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
6975 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
6976 	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
6977 	const uint8_t *div_table;
6978 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6979 	uint16_t tmp = 0;
6980 
6981 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
6982 
6983 	cdclk_sel = (tmp >> 4) & 0x7;
6984 
6985 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
6986 		goto fail;
6987 
6988 	switch (vco) {
6989 	case 3200000:
6990 		div_table = div_3200;
6991 		break;
6992 	case 4000000:
6993 		div_table = div_4000;
6994 		break;
6995 	case 4800000:
6996 		div_table = div_4800;
6997 		break;
6998 	case 5333333:
6999 		div_table = div_5333;
7000 		break;
7001 	default:
7002 		goto fail;
7003 	}
7004 
7005 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7006 
7007 fail:
7008 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7009 	return 190476;
7010 }
7011 
7012 static void
7013 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7014 {
7015 	while (*num > DATA_LINK_M_N_MASK ||
7016 	       *den > DATA_LINK_M_N_MASK) {
7017 		*num >>= 1;
7018 		*den >>= 1;
7019 	}
7020 }
7021 
7022 static void compute_m_n(unsigned int m, unsigned int n,
7023 			uint32_t *ret_m, uint32_t *ret_n)
7024 {
7025 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7026 	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7027 	intel_reduce_m_n_ratio(ret_m, ret_n);
7028 }
7029 
7030 void
7031 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7032 		       int pixel_clock, int link_clock,
7033 		       struct intel_link_m_n *m_n)
7034 {
7035 	m_n->tu = 64;
7036 
7037 	compute_m_n(bits_per_pixel * pixel_clock,
7038 		    link_clock * nlanes * 8,
7039 		    &m_n->gmch_m, &m_n->gmch_n);
7040 
7041 	compute_m_n(pixel_clock, link_clock,
7042 		    &m_n->link_m, &m_n->link_n);
7043 }
7044 
7045 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7046 {
7047 	if (i915.panel_use_ssc >= 0)
7048 		return i915.panel_use_ssc != 0;
7049 	return dev_priv->vbt.lvds_use_ssc
7050 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7051 }
7052 
7053 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7054 {
7055 	return (1 << dpll->n) << 16 | dpll->m2;
7056 }
7057 
7058 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7059 {
7060 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7061 }
7062 
7063 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7064 				     struct intel_crtc_state *crtc_state,
7065 				     intel_clock_t *reduced_clock)
7066 {
7067 	struct drm_device *dev = crtc->base.dev;
7068 	u32 fp, fp2 = 0;
7069 
7070 	if (IS_PINEVIEW(dev)) {
7071 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7072 		if (reduced_clock)
7073 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7074 	} else {
7075 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7076 		if (reduced_clock)
7077 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7078 	}
7079 
7080 	crtc_state->dpll_hw_state.fp0 = fp;
7081 
7082 	crtc->lowfreq_avail = false;
7083 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7084 	    reduced_clock) {
7085 		crtc_state->dpll_hw_state.fp1 = fp2;
7086 		crtc->lowfreq_avail = true;
7087 	} else {
7088 		crtc_state->dpll_hw_state.fp1 = fp;
7089 	}
7090 }
7091 
7092 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe
7093 		pipe)
7094 {
7095 	u32 reg_val;
7096 
7097 	/*
7098 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7099 	 * and set it to a reasonable value instead.
7100 	 */
7101 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7102 	reg_val &= 0xffffff00;
7103 	reg_val |= 0x00000030;
7104 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7105 
7106 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7107 	reg_val &= 0x00ffffff;
7108 	reg_val |= 0x8c000000;
7109 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7110 
7111 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7112 	reg_val &= 0xffffff00;
7113 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7114 
7115 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7116 	reg_val &= 0x00ffffff;
7117 	reg_val |= 0xb0000000;
7118 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7119 }
7120 
7121 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7122 					 struct intel_link_m_n *m_n)
7123 {
7124 	struct drm_device *dev = crtc->base.dev;
7125 	struct drm_i915_private *dev_priv = dev->dev_private;
7126 	int pipe = crtc->pipe;
7127 
7128 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7129 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7130 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7131 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7132 }
7133 
7134 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7135 					 struct intel_link_m_n *m_n,
7136 					 struct intel_link_m_n *m2_n2)
7137 {
7138 	struct drm_device *dev = crtc->base.dev;
7139 	struct drm_i915_private *dev_priv = dev->dev_private;
7140 	int pipe = crtc->pipe;
7141 	enum transcoder transcoder = crtc->config->cpu_transcoder;
7142 
7143 	if (INTEL_INFO(dev)->gen >= 5) {
7144 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7145 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7146 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7147 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7148 		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7149 		 * for gen < 8) and if DRRS is supported (to make sure the
7150 		 * registers are not unnecessarily accessed).
7151 		 */
7152 		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7153 			crtc->config->has_drrs) {
7154 			I915_WRITE(PIPE_DATA_M2(transcoder),
7155 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7156 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7157 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7158 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7159 		}
7160 	} else {
7161 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7162 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7163 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7164 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7165 	}
7166 }
7167 
7168 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7169 {
7170 	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7171 
7172 	if (m_n == M1_N1) {
7173 		dp_m_n = &crtc->config->dp_m_n;
7174 		dp_m2_n2 = &crtc->config->dp_m2_n2;
7175 	} else if (m_n == M2_N2) {
7176 
7177 		/*
7178 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7179 		 * needs to be programmed into M1_N1.
7180 		 */
7181 		dp_m_n = &crtc->config->dp_m2_n2;
7182 	} else {
7183 		DRM_ERROR("Unsupported divider value\n");
7184 		return;
7185 	}
7186 
7187 	if (crtc->config->has_pch_encoder)
7188 		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7189 	else
7190 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7191 }
7192 
7193 static void vlv_compute_dpll(struct intel_crtc *crtc,
7194 			     struct intel_crtc_state *pipe_config)
7195 {
7196 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7197 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7198 	if (crtc->pipe != PIPE_A)
7199 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7200 
7201 	/* DPLL not used with DSI, but still need the rest set up */
7202 	if (!pipe_config->has_dsi_encoder)
7203 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7204 			DPLL_EXT_BUFFER_ENABLE_VLV;
7205 
7206 	pipe_config->dpll_hw_state.dpll_md =
7207 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7208 }
7209 
7210 static void chv_compute_dpll(struct intel_crtc *crtc,
7211 			     struct intel_crtc_state *pipe_config)
7212 {
7213 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7214 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7215 	if (crtc->pipe != PIPE_A)
7216 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7217 
7218 	/* DPLL not used with DSI, but still need the rest set up */
7219 	if (!pipe_config->has_dsi_encoder)
7220 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7221 
7222 	pipe_config->dpll_hw_state.dpll_md =
7223 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7224 }
7225 
7226 static void vlv_prepare_pll(struct intel_crtc *crtc,
7227 			    const struct intel_crtc_state *pipe_config)
7228 {
7229 	struct drm_device *dev = crtc->base.dev;
7230 	struct drm_i915_private *dev_priv = dev->dev_private;
7231 	enum i915_pipe pipe = crtc->pipe;
7232 	u32 mdiv;
7233 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7234 	u32 coreclk, reg_val;
7235 
7236 	/* Enable Refclk */
7237 	I915_WRITE(DPLL(pipe),
7238 		   pipe_config->dpll_hw_state.dpll &
7239 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7240 
7241 	/* No need to actually set up the DPLL with DSI */
7242 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7243 		return;
7244 
7245 	mutex_lock(&dev_priv->sb_lock);
7246 
7247 	bestn = pipe_config->dpll.n;
7248 	bestm1 = pipe_config->dpll.m1;
7249 	bestm2 = pipe_config->dpll.m2;
7250 	bestp1 = pipe_config->dpll.p1;
7251 	bestp2 = pipe_config->dpll.p2;
7252 
7253 	/* See eDP HDMI DPIO driver vbios notes doc */
7254 
7255 	/* PLL B needs special handling */
7256 	if (pipe == PIPE_B)
7257 		vlv_pllb_recal_opamp(dev_priv, pipe);
7258 
7259 	/* Set up Tx target for periodic Rcomp update */
7260 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7261 
7262 	/* Disable target IRef on PLL */
7263 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7264 	reg_val &= 0x00ffffff;
7265 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7266 
7267 	/* Disable fast lock */
7268 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7269 
7270 	/* Set idtafcrecal before PLL is enabled */
7271 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7272 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7273 	mdiv |= ((bestn << DPIO_N_SHIFT));
7274 	mdiv |= (1 << DPIO_K_SHIFT);
7275 
7276 	/*
7277 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7278 	 * but we don't support that).
7279 	 * Note: don't use the DAC post divider as it seems unstable.
7280 	 */
7281 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7282 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7283 
7284 	mdiv |= DPIO_ENABLE_CALIBRATION;
7285 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7286 
7287 	/* Set HBR and RBR LPF coefficients */
7288 	if (pipe_config->port_clock == 162000 ||
7289 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7290 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7291 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7292 				 0x009f0003);
7293 	else
7294 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7295 				 0x00d0000f);
7296 
7297 	if (pipe_config->has_dp_encoder) {
7298 		/* Use SSC source */
7299 		if (pipe == PIPE_A)
7300 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7301 					 0x0df40000);
7302 		else
7303 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7304 					 0x0df70000);
7305 	} else { /* HDMI or VGA */
7306 		/* Use bend source */
7307 		if (pipe == PIPE_A)
7308 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7309 					 0x0df70000);
7310 		else
7311 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7312 					 0x0df40000);
7313 	}
7314 
7315 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7316 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7317 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7318 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7319 		coreclk |= 0x01000000;
7320 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7321 
7322 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7323 	mutex_unlock(&dev_priv->sb_lock);
7324 }
7325 
7326 static void chv_prepare_pll(struct intel_crtc *crtc,
7327 			    const struct intel_crtc_state *pipe_config)
7328 {
7329 	struct drm_device *dev = crtc->base.dev;
7330 	struct drm_i915_private *dev_priv = dev->dev_private;
7331 	enum i915_pipe pipe = crtc->pipe;
7332 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7333 	u32 loopfilter, tribuf_calcntr;
7334 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7335 	u32 dpio_val;
7336 	int vco;
7337 
7338 	/* Enable Refclk and SSC */
7339 	I915_WRITE(DPLL(pipe),
7340 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7341 
7342 	/* No need to actually set up the DPLL with DSI */
7343 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7344 		return;
7345 
7346 	bestn = pipe_config->dpll.n;
7347 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7348 	bestm1 = pipe_config->dpll.m1;
7349 	bestm2 = pipe_config->dpll.m2 >> 22;
7350 	bestp1 = pipe_config->dpll.p1;
7351 	bestp2 = pipe_config->dpll.p2;
7352 	vco = pipe_config->dpll.vco;
7353 	dpio_val = 0;
7354 	loopfilter = 0;
7355 
7356 	mutex_lock(&dev_priv->sb_lock);
7357 
7358 	/* p1 and p2 divider */
7359 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7360 			5 << DPIO_CHV_S1_DIV_SHIFT |
7361 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7362 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7363 			1 << DPIO_CHV_K_DIV_SHIFT);
7364 
7365 	/* Feedback post-divider - m2 */
7366 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7367 
7368 	/* Feedback refclk divider - n and m1 */
7369 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7370 			DPIO_CHV_M1_DIV_BY_2 |
7371 			1 << DPIO_CHV_N_DIV_SHIFT);
7372 
7373 	/* M2 fraction division */
7374 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7375 
7376 	/* M2 fraction division enable */
7377 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7378 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7379 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7380 	if (bestm2_frac)
7381 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7382 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7383 
7384 	/* Program digital lock detect threshold */
7385 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7386 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7387 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7388 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7389 	if (!bestm2_frac)
7390 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7391 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7392 
7393 	/* Loop filter */
7394 	if (vco == 5400000) {
7395 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7396 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7397 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7398 		tribuf_calcntr = 0x9;
7399 	} else if (vco <= 6200000) {
7400 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7401 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7402 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7403 		tribuf_calcntr = 0x9;
7404 	} else if (vco <= 6480000) {
7405 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7406 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7407 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7408 		tribuf_calcntr = 0x8;
7409 	} else {
7410 		/* Not supported. Apply the same limits as in the max case */
7411 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7412 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7413 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7414 		tribuf_calcntr = 0;
7415 	}
7416 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7417 
7418 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7419 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7420 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7421 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7422 
7423 	/* AFC Recal */
7424 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7425 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7426 			DPIO_AFC_RECAL);
7427 
7428 	mutex_unlock(&dev_priv->sb_lock);
7429 }
7430 
7431 /**
7432  * vlv_force_pll_on - forcibly enable just the PLL
7433  * @dev_priv: i915 private structure
7434  * @pipe: pipe PLL to enable
7435  * @dpll: PLL configuration
7436  *
7437  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7438  * in cases where we need the PLL enabled even when @pipe is not going to
7439  * be enabled.
7440  */
7441 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe,
7442 		     const struct dpll *dpll)
7443 {
7444 	struct intel_crtc *crtc =
7445 		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7446 	struct intel_crtc_state *pipe_config;
7447 
7448 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7449 	if (!pipe_config)
7450 		return -ENOMEM;
7451 
7452 	pipe_config->base.crtc = &crtc->base;
7453 	pipe_config->pixel_multiplier = 1;
7454 	pipe_config->dpll = *dpll;
7455 
7456 	if (IS_CHERRYVIEW(dev)) {
7457 		chv_compute_dpll(crtc, pipe_config);
7458 		chv_prepare_pll(crtc, pipe_config);
7459 		chv_enable_pll(crtc, pipe_config);
7460 	} else {
7461 		vlv_compute_dpll(crtc, pipe_config);
7462 		vlv_prepare_pll(crtc, pipe_config);
7463 		vlv_enable_pll(crtc, pipe_config);
7464 	}
7465 
7466 	kfree(pipe_config);
7467 
7468 	return 0;
7469 }
7470 
7471 /**
7472  * vlv_force_pll_off - forcibly disable just the PLL
7473  * @dev_priv: i915 private structure
7474  * @pipe: pipe PLL to disable
7475  *
7476  * Disable the PLL for @pipe. To be used in cases where we need
7477  * the PLL enabled even when @pipe is not going to be enabled.
7478  */
7479 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe)
7480 {
7481 	if (IS_CHERRYVIEW(dev))
7482 		chv_disable_pll(to_i915(dev), pipe);
7483 	else
7484 		vlv_disable_pll(to_i915(dev), pipe);
7485 }
7486 
7487 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7488 			      struct intel_crtc_state *crtc_state,
7489 			      intel_clock_t *reduced_clock)
7490 {
7491 	struct drm_device *dev = crtc->base.dev;
7492 	struct drm_i915_private *dev_priv = dev->dev_private;
7493 	u32 dpll;
7494 	bool is_sdvo;
7495 	struct dpll *clock = &crtc_state->dpll;
7496 
7497 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7498 
7499 	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7500 		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7501 
7502 	dpll = DPLL_VGA_MODE_DIS;
7503 
7504 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7505 		dpll |= DPLLB_MODE_LVDS;
7506 	else
7507 		dpll |= DPLLB_MODE_DAC_SERIAL;
7508 
7509 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7510 		dpll |= (crtc_state->pixel_multiplier - 1)
7511 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7512 	}
7513 
7514 	if (is_sdvo)
7515 		dpll |= DPLL_SDVO_HIGH_SPEED;
7516 
7517 	if (crtc_state->has_dp_encoder)
7518 		dpll |= DPLL_SDVO_HIGH_SPEED;
7519 
7520 	/* compute bitmask from p1 value */
7521 	if (IS_PINEVIEW(dev))
7522 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7523 	else {
7524 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7525 		if (IS_G4X(dev) && reduced_clock)
7526 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7527 	}
7528 	switch (clock->p2) {
7529 	case 5:
7530 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7531 		break;
7532 	case 7:
7533 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7534 		break;
7535 	case 10:
7536 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7537 		break;
7538 	case 14:
7539 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7540 		break;
7541 	}
7542 	if (INTEL_INFO(dev)->gen >= 4)
7543 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7544 
7545 	if (crtc_state->sdvo_tv_clock)
7546 		dpll |= PLL_REF_INPUT_TVCLKINBC;
7547 	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7548 		 intel_panel_use_ssc(dev_priv))
7549 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7550 	else
7551 		dpll |= PLL_REF_INPUT_DREFCLK;
7552 
7553 	dpll |= DPLL_VCO_ENABLE;
7554 	crtc_state->dpll_hw_state.dpll = dpll;
7555 
7556 	if (INTEL_INFO(dev)->gen >= 4) {
7557 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7558 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7559 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7560 	}
7561 }
7562 
7563 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7564 			      struct intel_crtc_state *crtc_state,
7565 			      intel_clock_t *reduced_clock)
7566 {
7567 	struct drm_device *dev = crtc->base.dev;
7568 	struct drm_i915_private *dev_priv = dev->dev_private;
7569 	u32 dpll;
7570 	struct dpll *clock = &crtc_state->dpll;
7571 
7572 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7573 
7574 	dpll = DPLL_VGA_MODE_DIS;
7575 
7576 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7577 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7578 	} else {
7579 		if (clock->p1 == 2)
7580 			dpll |= PLL_P1_DIVIDE_BY_TWO;
7581 		else
7582 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7583 		if (clock->p2 == 4)
7584 			dpll |= PLL_P2_DIVIDE_BY_4;
7585 	}
7586 
7587 	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7588 		dpll |= DPLL_DVO_2X_MODE;
7589 
7590 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7591 	    intel_panel_use_ssc(dev_priv))
7592 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7593 	else
7594 		dpll |= PLL_REF_INPUT_DREFCLK;
7595 
7596 	dpll |= DPLL_VCO_ENABLE;
7597 	crtc_state->dpll_hw_state.dpll = dpll;
7598 }
7599 
7600 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7601 {
7602 	struct drm_device *dev = intel_crtc->base.dev;
7603 	struct drm_i915_private *dev_priv = dev->dev_private;
7604 	enum i915_pipe pipe = intel_crtc->pipe;
7605 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7606 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7607 	uint32_t crtc_vtotal, crtc_vblank_end;
7608 	int vsyncshift = 0;
7609 
7610 	/* We need to be careful not to changed the adjusted mode, for otherwise
7611 	 * the hw state checker will get angry at the mismatch. */
7612 	crtc_vtotal = adjusted_mode->crtc_vtotal;
7613 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7614 
7615 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7616 		/* the chip adds 2 halflines automatically */
7617 		crtc_vtotal -= 1;
7618 		crtc_vblank_end -= 1;
7619 
7620 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7621 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7622 		else
7623 			vsyncshift = adjusted_mode->crtc_hsync_start -
7624 				adjusted_mode->crtc_htotal / 2;
7625 		if (vsyncshift < 0)
7626 			vsyncshift += adjusted_mode->crtc_htotal;
7627 	}
7628 
7629 	if (INTEL_INFO(dev)->gen > 3)
7630 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7631 
7632 	I915_WRITE(HTOTAL(cpu_transcoder),
7633 		   (adjusted_mode->crtc_hdisplay - 1) |
7634 		   ((adjusted_mode->crtc_htotal - 1) << 16));
7635 	I915_WRITE(HBLANK(cpu_transcoder),
7636 		   (adjusted_mode->crtc_hblank_start - 1) |
7637 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7638 	I915_WRITE(HSYNC(cpu_transcoder),
7639 		   (adjusted_mode->crtc_hsync_start - 1) |
7640 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7641 
7642 	I915_WRITE(VTOTAL(cpu_transcoder),
7643 		   (adjusted_mode->crtc_vdisplay - 1) |
7644 		   ((crtc_vtotal - 1) << 16));
7645 	I915_WRITE(VBLANK(cpu_transcoder),
7646 		   (adjusted_mode->crtc_vblank_start - 1) |
7647 		   ((crtc_vblank_end - 1) << 16));
7648 	I915_WRITE(VSYNC(cpu_transcoder),
7649 		   (adjusted_mode->crtc_vsync_start - 1) |
7650 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7651 
7652 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7653 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7654 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7655 	 * bits. */
7656 	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7657 	    (pipe == PIPE_B || pipe == PIPE_C))
7658 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7659 
7660 }
7661 
7662 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7663 {
7664 	struct drm_device *dev = intel_crtc->base.dev;
7665 	struct drm_i915_private *dev_priv = dev->dev_private;
7666 	enum i915_pipe pipe = intel_crtc->pipe;
7667 
7668 	/* pipesrc controls the size that is scaled from, which should
7669 	 * always be the user's requested size.
7670 	 */
7671 	I915_WRITE(PIPESRC(pipe),
7672 		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7673 		   (intel_crtc->config->pipe_src_h - 1));
7674 }
7675 
7676 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7677 				   struct intel_crtc_state *pipe_config)
7678 {
7679 	struct drm_device *dev = crtc->base.dev;
7680 	struct drm_i915_private *dev_priv = dev->dev_private;
7681 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7682 	uint32_t tmp;
7683 
7684 	tmp = I915_READ(HTOTAL(cpu_transcoder));
7685 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7686 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7687 	tmp = I915_READ(HBLANK(cpu_transcoder));
7688 	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7689 	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7690 	tmp = I915_READ(HSYNC(cpu_transcoder));
7691 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7692 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7693 
7694 	tmp = I915_READ(VTOTAL(cpu_transcoder));
7695 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7696 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7697 	tmp = I915_READ(VBLANK(cpu_transcoder));
7698 	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7699 	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7700 	tmp = I915_READ(VSYNC(cpu_transcoder));
7701 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7702 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7703 
7704 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7705 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7706 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7707 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7708 	}
7709 }
7710 
7711 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7712 				    struct intel_crtc_state *pipe_config)
7713 {
7714 	struct drm_device *dev = crtc->base.dev;
7715 	struct drm_i915_private *dev_priv = dev->dev_private;
7716 	u32 tmp;
7717 
7718 	tmp = I915_READ(PIPESRC(crtc->pipe));
7719 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7720 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7721 
7722 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7723 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7724 }
7725 
7726 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7727 				 struct intel_crtc_state *pipe_config)
7728 {
7729 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7730 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7731 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7732 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7733 
7734 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7735 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7736 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7737 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7738 
7739 	mode->flags = pipe_config->base.adjusted_mode.flags;
7740 	mode->type = DRM_MODE_TYPE_DRIVER;
7741 
7742 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7743 	mode->flags |= pipe_config->base.adjusted_mode.flags;
7744 
7745 	mode->hsync = drm_mode_hsync(mode);
7746 	mode->vrefresh = drm_mode_vrefresh(mode);
7747 	drm_mode_set_name(mode);
7748 }
7749 
7750 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7751 {
7752 	struct drm_device *dev = intel_crtc->base.dev;
7753 	struct drm_i915_private *dev_priv = dev->dev_private;
7754 	uint32_t pipeconf;
7755 
7756 	pipeconf = 0;
7757 
7758 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7759 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7760 		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7761 
7762 	if (intel_crtc->config->double_wide)
7763 		pipeconf |= PIPECONF_DOUBLE_WIDE;
7764 
7765 	/* only g4x and later have fancy bpc/dither controls */
7766 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7767 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7768 		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7769 			pipeconf |= PIPECONF_DITHER_EN |
7770 				    PIPECONF_DITHER_TYPE_SP;
7771 
7772 		switch (intel_crtc->config->pipe_bpp) {
7773 		case 18:
7774 			pipeconf |= PIPECONF_6BPC;
7775 			break;
7776 		case 24:
7777 			pipeconf |= PIPECONF_8BPC;
7778 			break;
7779 		case 30:
7780 			pipeconf |= PIPECONF_10BPC;
7781 			break;
7782 		default:
7783 			/* Case prevented by intel_choose_pipe_bpp_dither. */
7784 			BUG();
7785 		}
7786 	}
7787 
7788 	if (HAS_PIPE_CXSR(dev)) {
7789 		if (intel_crtc->lowfreq_avail) {
7790 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7791 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7792 		} else {
7793 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7794 		}
7795 	}
7796 
7797 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7798 		if (INTEL_INFO(dev)->gen < 4 ||
7799 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7800 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7801 		else
7802 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7803 	} else
7804 		pipeconf |= PIPECONF_PROGRESSIVE;
7805 
7806 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7807 	     intel_crtc->config->limited_color_range)
7808 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7809 
7810 	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7811 	POSTING_READ(PIPECONF(intel_crtc->pipe));
7812 }
7813 
7814 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7815 				   struct intel_crtc_state *crtc_state)
7816 {
7817 	struct drm_device *dev = crtc->base.dev;
7818 	struct drm_i915_private *dev_priv = dev->dev_private;
7819 	const intel_limit_t *limit;
7820 	int refclk = 48000;
7821 
7822 	memset(&crtc_state->dpll_hw_state, 0,
7823 	       sizeof(crtc_state->dpll_hw_state));
7824 
7825 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7826 		if (intel_panel_use_ssc(dev_priv)) {
7827 			refclk = dev_priv->vbt.lvds_ssc_freq;
7828 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7829 		}
7830 
7831 		limit = &intel_limits_i8xx_lvds;
7832 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7833 		limit = &intel_limits_i8xx_dvo;
7834 	} else {
7835 		limit = &intel_limits_i8xx_dac;
7836 	}
7837 
7838 	if (!crtc_state->clock_set &&
7839 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7840 				 refclk, NULL, &crtc_state->dpll)) {
7841 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7842 		return -EINVAL;
7843 	}
7844 
7845 	i8xx_compute_dpll(crtc, crtc_state, NULL);
7846 
7847 	return 0;
7848 }
7849 
7850 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7851 				  struct intel_crtc_state *crtc_state)
7852 {
7853 	struct drm_device *dev = crtc->base.dev;
7854 	struct drm_i915_private *dev_priv = dev->dev_private;
7855 	const intel_limit_t *limit;
7856 	int refclk = 96000;
7857 
7858 	memset(&crtc_state->dpll_hw_state, 0,
7859 	       sizeof(crtc_state->dpll_hw_state));
7860 
7861 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7862 		if (intel_panel_use_ssc(dev_priv)) {
7863 			refclk = dev_priv->vbt.lvds_ssc_freq;
7864 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7865 		}
7866 
7867 		if (intel_is_dual_link_lvds(dev))
7868 			limit = &intel_limits_g4x_dual_channel_lvds;
7869 		else
7870 			limit = &intel_limits_g4x_single_channel_lvds;
7871 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7872 		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7873 		limit = &intel_limits_g4x_hdmi;
7874 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7875 		limit = &intel_limits_g4x_sdvo;
7876 	} else {
7877 		/* The option is for other outputs */
7878 		limit = &intel_limits_i9xx_sdvo;
7879 	}
7880 
7881 	if (!crtc_state->clock_set &&
7882 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7883 				refclk, NULL, &crtc_state->dpll)) {
7884 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7885 		return -EINVAL;
7886 	}
7887 
7888 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7889 
7890 	return 0;
7891 }
7892 
7893 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7894 				  struct intel_crtc_state *crtc_state)
7895 {
7896 	struct drm_device *dev = crtc->base.dev;
7897 	struct drm_i915_private *dev_priv = dev->dev_private;
7898 	const intel_limit_t *limit;
7899 	int refclk = 96000;
7900 
7901 	memset(&crtc_state->dpll_hw_state, 0,
7902 	       sizeof(crtc_state->dpll_hw_state));
7903 
7904 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7905 		if (intel_panel_use_ssc(dev_priv)) {
7906 			refclk = dev_priv->vbt.lvds_ssc_freq;
7907 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7908 		}
7909 
7910 		limit = &intel_limits_pineview_lvds;
7911 	} else {
7912 		limit = &intel_limits_pineview_sdvo;
7913 	}
7914 
7915 	if (!crtc_state->clock_set &&
7916 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7917 				refclk, NULL, &crtc_state->dpll)) {
7918 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7919 		return -EINVAL;
7920 	}
7921 
7922 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7923 
7924 	return 0;
7925 }
7926 
7927 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7928 				   struct intel_crtc_state *crtc_state)
7929 {
7930 	struct drm_device *dev = crtc->base.dev;
7931 	struct drm_i915_private *dev_priv = dev->dev_private;
7932 	const intel_limit_t *limit;
7933 	int refclk = 96000;
7934 
7935 	memset(&crtc_state->dpll_hw_state, 0,
7936 	       sizeof(crtc_state->dpll_hw_state));
7937 
7938 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7939 		if (intel_panel_use_ssc(dev_priv)) {
7940 			refclk = dev_priv->vbt.lvds_ssc_freq;
7941 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7942 		}
7943 
7944 		limit = &intel_limits_i9xx_lvds;
7945 	} else {
7946 		limit = &intel_limits_i9xx_sdvo;
7947 	}
7948 
7949 	if (!crtc_state->clock_set &&
7950 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7951 				 refclk, NULL, &crtc_state->dpll)) {
7952 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7953 		return -EINVAL;
7954 	}
7955 
7956 	i9xx_compute_dpll(crtc, crtc_state, NULL);
7957 
7958 	return 0;
7959 }
7960 
7961 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7962 				  struct intel_crtc_state *crtc_state)
7963 {
7964 	int refclk = 100000;
7965 	const intel_limit_t *limit = &intel_limits_chv;
7966 
7967 	memset(&crtc_state->dpll_hw_state, 0,
7968 	       sizeof(crtc_state->dpll_hw_state));
7969 
7970 	if (!crtc_state->clock_set &&
7971 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7972 				refclk, NULL, &crtc_state->dpll)) {
7973 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7974 		return -EINVAL;
7975 	}
7976 
7977 	chv_compute_dpll(crtc, crtc_state);
7978 
7979 	return 0;
7980 }
7981 
7982 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7983 				  struct intel_crtc_state *crtc_state)
7984 {
7985 	int refclk = 100000;
7986 	const intel_limit_t *limit = &intel_limits_vlv;
7987 
7988 	memset(&crtc_state->dpll_hw_state, 0,
7989 	       sizeof(crtc_state->dpll_hw_state));
7990 
7991 	if (!crtc_state->clock_set &&
7992 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7993 				refclk, NULL, &crtc_state->dpll)) {
7994 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7995 		return -EINVAL;
7996 	}
7997 
7998 	vlv_compute_dpll(crtc, crtc_state);
7999 
8000 	return 0;
8001 }
8002 
8003 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8004 				 struct intel_crtc_state *pipe_config)
8005 {
8006 	struct drm_device *dev = crtc->base.dev;
8007 	struct drm_i915_private *dev_priv = dev->dev_private;
8008 	uint32_t tmp;
8009 
8010 	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8011 		return;
8012 
8013 	tmp = I915_READ(PFIT_CONTROL);
8014 	if (!(tmp & PFIT_ENABLE))
8015 		return;
8016 
8017 	/* Check whether the pfit is attached to our pipe. */
8018 	if (INTEL_INFO(dev)->gen < 4) {
8019 		if (crtc->pipe != PIPE_B)
8020 			return;
8021 	} else {
8022 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8023 			return;
8024 	}
8025 
8026 	pipe_config->gmch_pfit.control = tmp;
8027 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8028 }
8029 
8030 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8031 			       struct intel_crtc_state *pipe_config)
8032 {
8033 	struct drm_device *dev = crtc->base.dev;
8034 	struct drm_i915_private *dev_priv = dev->dev_private;
8035 	int pipe = pipe_config->cpu_transcoder;
8036 	intel_clock_t clock;
8037 	u32 mdiv;
8038 	int refclk = 100000;
8039 
8040 	/* In case of DSI, DPLL will not be used */
8041 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8042 		return;
8043 
8044 	mutex_lock(&dev_priv->sb_lock);
8045 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8046 	mutex_unlock(&dev_priv->sb_lock);
8047 
8048 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8049 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8050 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8051 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8052 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8053 
8054 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8055 }
8056 
8057 static void
8058 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8059 			      struct intel_initial_plane_config *plane_config)
8060 {
8061 	struct drm_device *dev = crtc->base.dev;
8062 	struct drm_i915_private *dev_priv = dev->dev_private;
8063 	u32 val, base, offset;
8064 	int pipe = crtc->pipe, plane = crtc->plane;
8065 	int fourcc, pixel_format;
8066 	unsigned int aligned_height;
8067 	struct drm_framebuffer *fb;
8068 	struct intel_framebuffer *intel_fb;
8069 
8070 	val = I915_READ(DSPCNTR(plane));
8071 	if (!(val & DISPLAY_PLANE_ENABLE))
8072 		return;
8073 
8074 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8075 	if (!intel_fb) {
8076 		DRM_DEBUG_KMS("failed to alloc fb\n");
8077 		return;
8078 	}
8079 
8080 	fb = &intel_fb->base;
8081 
8082 	if (INTEL_INFO(dev)->gen >= 4) {
8083 		if (val & DISPPLANE_TILED) {
8084 			plane_config->tiling = I915_TILING_X;
8085 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8086 		}
8087 	}
8088 
8089 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8090 	fourcc = i9xx_format_to_fourcc(pixel_format);
8091 	fb->pixel_format = fourcc;
8092 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8093 
8094 	if (INTEL_INFO(dev)->gen >= 4) {
8095 		if (plane_config->tiling)
8096 			offset = I915_READ(DSPTILEOFF(plane));
8097 		else
8098 			offset = I915_READ(DSPLINOFF(plane));
8099 		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8100 	} else {
8101 		base = I915_READ(DSPADDR(plane));
8102 	}
8103 	plane_config->base = base;
8104 
8105 	val = I915_READ(PIPESRC(pipe));
8106 	fb->width = ((val >> 16) & 0xfff) + 1;
8107 	fb->height = ((val >> 0) & 0xfff) + 1;
8108 
8109 	val = I915_READ(DSPSTRIDE(pipe));
8110 	fb->pitches[0] = val & 0xffffffc0;
8111 
8112 	aligned_height = intel_fb_align_height(dev, fb->height,
8113 					       fb->pixel_format,
8114 					       fb->modifier[0]);
8115 
8116 	plane_config->size = fb->pitches[0] * aligned_height;
8117 
8118 	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8119 		      pipe_name(pipe), plane, fb->width, fb->height,
8120 		      fb->bits_per_pixel, base, fb->pitches[0],
8121 		      plane_config->size);
8122 
8123 	plane_config->fb = intel_fb;
8124 }
8125 
8126 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8127 			       struct intel_crtc_state *pipe_config)
8128 {
8129 	struct drm_device *dev = crtc->base.dev;
8130 	struct drm_i915_private *dev_priv = dev->dev_private;
8131 	int pipe = pipe_config->cpu_transcoder;
8132 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8133 	intel_clock_t clock;
8134 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8135 	int refclk = 100000;
8136 
8137 	/* In case of DSI, DPLL will not be used */
8138 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8139 		return;
8140 
8141 	mutex_lock(&dev_priv->sb_lock);
8142 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8143 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8144 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8145 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8146 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8147 	mutex_unlock(&dev_priv->sb_lock);
8148 
8149 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8150 	clock.m2 = (pll_dw0 & 0xff) << 22;
8151 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8152 		clock.m2 |= pll_dw2 & 0x3fffff;
8153 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8154 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8155 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8156 
8157 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8158 }
8159 
8160 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8161 				 struct intel_crtc_state *pipe_config)
8162 {
8163 	struct drm_device *dev = crtc->base.dev;
8164 	struct drm_i915_private *dev_priv = dev->dev_private;
8165 	enum intel_display_power_domain power_domain;
8166 	uint32_t tmp;
8167 	bool ret;
8168 
8169 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8170 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8171 		return false;
8172 
8173 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8174 	pipe_config->shared_dpll = NULL;
8175 
8176 	ret = false;
8177 
8178 	tmp = I915_READ(PIPECONF(crtc->pipe));
8179 	if (!(tmp & PIPECONF_ENABLE))
8180 		goto out;
8181 
8182 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8183 		switch (tmp & PIPECONF_BPC_MASK) {
8184 		case PIPECONF_6BPC:
8185 			pipe_config->pipe_bpp = 18;
8186 			break;
8187 		case PIPECONF_8BPC:
8188 			pipe_config->pipe_bpp = 24;
8189 			break;
8190 		case PIPECONF_10BPC:
8191 			pipe_config->pipe_bpp = 30;
8192 			break;
8193 		default:
8194 			break;
8195 		}
8196 	}
8197 
8198 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8199 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8200 		pipe_config->limited_color_range = true;
8201 
8202 	if (INTEL_INFO(dev)->gen < 4)
8203 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8204 
8205 	intel_get_pipe_timings(crtc, pipe_config);
8206 	intel_get_pipe_src_size(crtc, pipe_config);
8207 
8208 	i9xx_get_pfit_config(crtc, pipe_config);
8209 
8210 	if (INTEL_INFO(dev)->gen >= 4) {
8211 		/* No way to read it out on pipes B and C */
8212 		if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8213 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
8214 		else
8215 			tmp = I915_READ(DPLL_MD(crtc->pipe));
8216 		pipe_config->pixel_multiplier =
8217 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8218 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8219 		pipe_config->dpll_hw_state.dpll_md = tmp;
8220 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8221 		tmp = I915_READ(DPLL(crtc->pipe));
8222 		pipe_config->pixel_multiplier =
8223 			((tmp & SDVO_MULTIPLIER_MASK)
8224 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8225 	} else {
8226 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8227 		 * port and will be fixed up in the encoder->get_config
8228 		 * function. */
8229 		pipe_config->pixel_multiplier = 1;
8230 	}
8231 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8232 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8233 		/*
8234 		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8235 		 * on 830. Filter it out here so that we don't
8236 		 * report errors due to that.
8237 		 */
8238 		if (IS_I830(dev))
8239 			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8240 
8241 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8242 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8243 	} else {
8244 		/* Mask out read-only status bits. */
8245 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8246 						     DPLL_PORTC_READY_MASK |
8247 						     DPLL_PORTB_READY_MASK);
8248 	}
8249 
8250 	if (IS_CHERRYVIEW(dev))
8251 		chv_crtc_clock_get(crtc, pipe_config);
8252 	else if (IS_VALLEYVIEW(dev))
8253 		vlv_crtc_clock_get(crtc, pipe_config);
8254 	else
8255 		i9xx_crtc_clock_get(crtc, pipe_config);
8256 
8257 	/*
8258 	 * Normally the dotclock is filled in by the encoder .get_config()
8259 	 * but in case the pipe is enabled w/o any ports we need a sane
8260 	 * default.
8261 	 */
8262 	pipe_config->base.adjusted_mode.crtc_clock =
8263 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8264 
8265 	ret = true;
8266 
8267 out:
8268 	intel_display_power_put(dev_priv, power_domain);
8269 
8270 	return ret;
8271 }
8272 
8273 static void ironlake_init_pch_refclk(struct drm_device *dev)
8274 {
8275 	struct drm_i915_private *dev_priv = dev->dev_private;
8276 	struct intel_encoder *encoder;
8277 	int i;
8278 	u32 val, final;
8279 	bool has_lvds = false;
8280 	bool has_cpu_edp = false;
8281 	bool has_panel = false;
8282 	bool has_ck505 = false;
8283 	bool can_ssc = false;
8284 	bool using_ssc_source = false;
8285 
8286 	/* We need to take the global config into account */
8287 	for_each_intel_encoder(dev, encoder) {
8288 		switch (encoder->type) {
8289 		case INTEL_OUTPUT_LVDS:
8290 			has_panel = true;
8291 			has_lvds = true;
8292 			break;
8293 		case INTEL_OUTPUT_EDP:
8294 			has_panel = true;
8295 			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8296 				has_cpu_edp = true;
8297 			break;
8298 		default:
8299 			break;
8300 		}
8301 	}
8302 
8303 	if (HAS_PCH_IBX(dev)) {
8304 		has_ck505 = dev_priv->vbt.display_clock_mode;
8305 		can_ssc = has_ck505;
8306 	} else {
8307 		has_ck505 = false;
8308 		can_ssc = true;
8309 	}
8310 
8311 	/* Check if any DPLLs are using the SSC source */
8312 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8313 		u32 temp = I915_READ(PCH_DPLL(i));
8314 
8315 		if (!(temp & DPLL_VCO_ENABLE))
8316 			continue;
8317 
8318 		if ((temp & PLL_REF_INPUT_MASK) ==
8319 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8320 			using_ssc_source = true;
8321 			break;
8322 		}
8323 	}
8324 
8325 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8326 		      has_panel, has_lvds, has_ck505, using_ssc_source);
8327 
8328 	/* Ironlake: try to setup display ref clock before DPLL
8329 	 * enabling. This is only under driver's control after
8330 	 * PCH B stepping, previous chipset stepping should be
8331 	 * ignoring this setting.
8332 	 */
8333 	val = I915_READ(PCH_DREF_CONTROL);
8334 
8335 	/* As we must carefully and slowly disable/enable each source in turn,
8336 	 * compute the final state we want first and check if we need to
8337 	 * make any changes at all.
8338 	 */
8339 	final = val;
8340 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8341 	if (has_ck505)
8342 		final |= DREF_NONSPREAD_CK505_ENABLE;
8343 	else
8344 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8345 
8346 	final &= ~DREF_SSC_SOURCE_MASK;
8347 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8348 	final &= ~DREF_SSC1_ENABLE;
8349 
8350 	if (has_panel) {
8351 		final |= DREF_SSC_SOURCE_ENABLE;
8352 
8353 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8354 			final |= DREF_SSC1_ENABLE;
8355 
8356 		if (has_cpu_edp) {
8357 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8358 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8359 			else
8360 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8361 		} else
8362 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8363 	} else if (using_ssc_source) {
8364 		final |= DREF_SSC_SOURCE_ENABLE;
8365 		final |= DREF_SSC1_ENABLE;
8366 	}
8367 
8368 	if (final == val)
8369 		return;
8370 
8371 	/* Always enable nonspread source */
8372 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8373 
8374 	if (has_ck505)
8375 		val |= DREF_NONSPREAD_CK505_ENABLE;
8376 	else
8377 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8378 
8379 	if (has_panel) {
8380 		val &= ~DREF_SSC_SOURCE_MASK;
8381 		val |= DREF_SSC_SOURCE_ENABLE;
8382 
8383 		/* SSC must be turned on before enabling the CPU output  */
8384 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8385 			DRM_DEBUG_KMS("Using SSC on panel\n");
8386 			val |= DREF_SSC1_ENABLE;
8387 		} else
8388 			val &= ~DREF_SSC1_ENABLE;
8389 
8390 		/* Get SSC going before enabling the outputs */
8391 		I915_WRITE(PCH_DREF_CONTROL, val);
8392 		POSTING_READ(PCH_DREF_CONTROL);
8393 		udelay(200);
8394 
8395 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8396 
8397 		/* Enable CPU source on CPU attached eDP */
8398 		if (has_cpu_edp) {
8399 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8400 				DRM_DEBUG_KMS("Using SSC on eDP\n");
8401 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8402 			} else
8403 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8404 		} else
8405 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8406 
8407 		I915_WRITE(PCH_DREF_CONTROL, val);
8408 		POSTING_READ(PCH_DREF_CONTROL);
8409 		udelay(200);
8410 	} else {
8411 		DRM_DEBUG_KMS("Disabling CPU source output\n");
8412 
8413 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8414 
8415 		/* Turn off CPU output */
8416 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8417 
8418 		I915_WRITE(PCH_DREF_CONTROL, val);
8419 		POSTING_READ(PCH_DREF_CONTROL);
8420 		udelay(200);
8421 
8422 		if (!using_ssc_source) {
8423 			DRM_DEBUG_KMS("Disabling SSC source\n");
8424 
8425 			/* Turn off the SSC source */
8426 			val &= ~DREF_SSC_SOURCE_MASK;
8427 			val |= DREF_SSC_SOURCE_DISABLE;
8428 
8429 			/* Turn off SSC1 */
8430 			val &= ~DREF_SSC1_ENABLE;
8431 
8432 			I915_WRITE(PCH_DREF_CONTROL, val);
8433 			POSTING_READ(PCH_DREF_CONTROL);
8434 			udelay(200);
8435 		}
8436 	}
8437 
8438 	BUG_ON(val != final);
8439 }
8440 
8441 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8442 {
8443 	uint32_t tmp;
8444 
8445 	tmp = I915_READ(SOUTH_CHICKEN2);
8446 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8447 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8448 
8449 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8450 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8451 		DRM_ERROR("FDI mPHY reset assert timeout\n");
8452 
8453 	tmp = I915_READ(SOUTH_CHICKEN2);
8454 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8455 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8456 
8457 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8458 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8459 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8460 }
8461 
8462 /* WaMPhyProgramming:hsw */
8463 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8464 {
8465 	uint32_t tmp;
8466 
8467 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8468 	tmp &= ~(0xFF << 24);
8469 	tmp |= (0x12 << 24);
8470 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8471 
8472 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8473 	tmp |= (1 << 11);
8474 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8475 
8476 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8477 	tmp |= (1 << 11);
8478 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8479 
8480 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8481 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8482 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8483 
8484 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8485 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8486 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8487 
8488 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8489 	tmp &= ~(7 << 13);
8490 	tmp |= (5 << 13);
8491 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8492 
8493 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8494 	tmp &= ~(7 << 13);
8495 	tmp |= (5 << 13);
8496 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8497 
8498 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8499 	tmp &= ~0xFF;
8500 	tmp |= 0x1C;
8501 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8502 
8503 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8504 	tmp &= ~0xFF;
8505 	tmp |= 0x1C;
8506 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8507 
8508 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8509 	tmp &= ~(0xFF << 16);
8510 	tmp |= (0x1C << 16);
8511 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8512 
8513 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8514 	tmp &= ~(0xFF << 16);
8515 	tmp |= (0x1C << 16);
8516 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8517 
8518 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8519 	tmp |= (1 << 27);
8520 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8521 
8522 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8523 	tmp |= (1 << 27);
8524 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8525 
8526 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8527 	tmp &= ~(0xF << 28);
8528 	tmp |= (4 << 28);
8529 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8530 
8531 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8532 	tmp &= ~(0xF << 28);
8533 	tmp |= (4 << 28);
8534 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8535 }
8536 
8537 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8538  * Programming" based on the parameters passed:
8539  * - Sequence to enable CLKOUT_DP
8540  * - Sequence to enable CLKOUT_DP without spread
8541  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8542  */
8543 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8544 				 bool with_fdi)
8545 {
8546 	struct drm_i915_private *dev_priv = dev->dev_private;
8547 	uint32_t reg, tmp;
8548 
8549 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8550 		with_spread = true;
8551 	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8552 		with_fdi = false;
8553 
8554 	mutex_lock(&dev_priv->sb_lock);
8555 
8556 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8557 	tmp &= ~SBI_SSCCTL_DISABLE;
8558 	tmp |= SBI_SSCCTL_PATHALT;
8559 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8560 
8561 	udelay(24);
8562 
8563 	if (with_spread) {
8564 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8565 		tmp &= ~SBI_SSCCTL_PATHALT;
8566 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8567 
8568 		if (with_fdi) {
8569 			lpt_reset_fdi_mphy(dev_priv);
8570 			lpt_program_fdi_mphy(dev_priv);
8571 		}
8572 	}
8573 
8574 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8575 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8576 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8577 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8578 
8579 	mutex_unlock(&dev_priv->sb_lock);
8580 }
8581 
8582 /* Sequence to disable CLKOUT_DP */
8583 static void lpt_disable_clkout_dp(struct drm_device *dev)
8584 {
8585 	struct drm_i915_private *dev_priv = dev->dev_private;
8586 	uint32_t reg, tmp;
8587 
8588 	mutex_lock(&dev_priv->sb_lock);
8589 
8590 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8591 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8592 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8593 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8594 
8595 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8596 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8597 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8598 			tmp |= SBI_SSCCTL_PATHALT;
8599 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8600 			udelay(32);
8601 		}
8602 		tmp |= SBI_SSCCTL_DISABLE;
8603 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8604 	}
8605 
8606 	mutex_unlock(&dev_priv->sb_lock);
8607 }
8608 
8609 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8610 
8611 static const uint16_t sscdivintphase[] = {
8612 	[BEND_IDX( 50)] = 0x3B23,
8613 	[BEND_IDX( 45)] = 0x3B23,
8614 	[BEND_IDX( 40)] = 0x3C23,
8615 	[BEND_IDX( 35)] = 0x3C23,
8616 	[BEND_IDX( 30)] = 0x3D23,
8617 	[BEND_IDX( 25)] = 0x3D23,
8618 	[BEND_IDX( 20)] = 0x3E23,
8619 	[BEND_IDX( 15)] = 0x3E23,
8620 	[BEND_IDX( 10)] = 0x3F23,
8621 	[BEND_IDX(  5)] = 0x3F23,
8622 	[BEND_IDX(  0)] = 0x0025,
8623 	[BEND_IDX( -5)] = 0x0025,
8624 	[BEND_IDX(-10)] = 0x0125,
8625 	[BEND_IDX(-15)] = 0x0125,
8626 	[BEND_IDX(-20)] = 0x0225,
8627 	[BEND_IDX(-25)] = 0x0225,
8628 	[BEND_IDX(-30)] = 0x0325,
8629 	[BEND_IDX(-35)] = 0x0325,
8630 	[BEND_IDX(-40)] = 0x0425,
8631 	[BEND_IDX(-45)] = 0x0425,
8632 	[BEND_IDX(-50)] = 0x0525,
8633 };
8634 
8635 /*
8636  * Bend CLKOUT_DP
8637  * steps -50 to 50 inclusive, in steps of 5
8638  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8639  * change in clock period = -(steps / 10) * 5.787 ps
8640  */
8641 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8642 {
8643 	uint32_t tmp;
8644 	int idx = BEND_IDX(steps);
8645 
8646 	if (WARN_ON(steps % 5 != 0))
8647 		return;
8648 
8649 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8650 		return;
8651 
8652 	mutex_lock(&dev_priv->sb_lock);
8653 
8654 	if (steps % 10 != 0)
8655 		tmp = 0xAAAAAAAB;
8656 	else
8657 		tmp = 0x00000000;
8658 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8659 
8660 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8661 	tmp &= 0xffff0000;
8662 	tmp |= sscdivintphase[idx];
8663 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8664 
8665 	mutex_unlock(&dev_priv->sb_lock);
8666 }
8667 
8668 #undef BEND_IDX
8669 
8670 static void lpt_init_pch_refclk(struct drm_device *dev)
8671 {
8672 	struct intel_encoder *encoder;
8673 	bool has_vga = false;
8674 
8675 	for_each_intel_encoder(dev, encoder) {
8676 		switch (encoder->type) {
8677 		case INTEL_OUTPUT_ANALOG:
8678 			has_vga = true;
8679 			break;
8680 		default:
8681 			break;
8682 		}
8683 	}
8684 
8685 	if (has_vga) {
8686 		lpt_bend_clkout_dp(to_i915(dev), 0);
8687 		lpt_enable_clkout_dp(dev, true, true);
8688 	} else {
8689 		lpt_disable_clkout_dp(dev);
8690 	}
8691 }
8692 
8693 /*
8694  * Initialize reference clocks when the driver loads
8695  */
8696 void intel_init_pch_refclk(struct drm_device *dev)
8697 {
8698 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8699 		ironlake_init_pch_refclk(dev);
8700 	else if (HAS_PCH_LPT(dev))
8701 		lpt_init_pch_refclk(dev);
8702 }
8703 
8704 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8705 {
8706 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8707 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8708 	int pipe = intel_crtc->pipe;
8709 	uint32_t val;
8710 
8711 	val = 0;
8712 
8713 	switch (intel_crtc->config->pipe_bpp) {
8714 	case 18:
8715 		val |= PIPECONF_6BPC;
8716 		break;
8717 	case 24:
8718 		val |= PIPECONF_8BPC;
8719 		break;
8720 	case 30:
8721 		val |= PIPECONF_10BPC;
8722 		break;
8723 	case 36:
8724 		val |= PIPECONF_12BPC;
8725 		break;
8726 	default:
8727 		/* Case prevented by intel_choose_pipe_bpp_dither. */
8728 		BUG();
8729 	}
8730 
8731 	if (intel_crtc->config->dither)
8732 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8733 
8734 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8735 		val |= PIPECONF_INTERLACED_ILK;
8736 	else
8737 		val |= PIPECONF_PROGRESSIVE;
8738 
8739 	if (intel_crtc->config->limited_color_range)
8740 		val |= PIPECONF_COLOR_RANGE_SELECT;
8741 
8742 	I915_WRITE(PIPECONF(pipe), val);
8743 	POSTING_READ(PIPECONF(pipe));
8744 }
8745 
8746 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8747 {
8748 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8749 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8750 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8751 	u32 val = 0;
8752 
8753 	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8754 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8755 
8756 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8757 		val |= PIPECONF_INTERLACED_ILK;
8758 	else
8759 		val |= PIPECONF_PROGRESSIVE;
8760 
8761 	I915_WRITE(PIPECONF(cpu_transcoder), val);
8762 	POSTING_READ(PIPECONF(cpu_transcoder));
8763 }
8764 
8765 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8766 {
8767 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8768 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8769 
8770 	if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8771 		u32 val = 0;
8772 
8773 		switch (intel_crtc->config->pipe_bpp) {
8774 		case 18:
8775 			val |= PIPEMISC_DITHER_6_BPC;
8776 			break;
8777 		case 24:
8778 			val |= PIPEMISC_DITHER_8_BPC;
8779 			break;
8780 		case 30:
8781 			val |= PIPEMISC_DITHER_10_BPC;
8782 			break;
8783 		case 36:
8784 			val |= PIPEMISC_DITHER_12_BPC;
8785 			break;
8786 		default:
8787 			/* Case prevented by pipe_config_set_bpp. */
8788 			BUG();
8789 		}
8790 
8791 		if (intel_crtc->config->dither)
8792 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8793 
8794 		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8795 	}
8796 }
8797 
8798 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8799 {
8800 	/*
8801 	 * Account for spread spectrum to avoid
8802 	 * oversubscribing the link. Max center spread
8803 	 * is 2.5%; use 5% for safety's sake.
8804 	 */
8805 	u32 bps = target_clock * bpp * 21 / 20;
8806 	return DIV_ROUND_UP(bps, link_bw * 8);
8807 }
8808 
8809 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8810 {
8811 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8812 }
8813 
8814 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8815 				  struct intel_crtc_state *crtc_state,
8816 				  intel_clock_t *reduced_clock)
8817 {
8818 	struct drm_crtc *crtc = &intel_crtc->base;
8819 	struct drm_device *dev = crtc->dev;
8820 	struct drm_i915_private *dev_priv = dev->dev_private;
8821 	struct drm_atomic_state *state = crtc_state->base.state;
8822 	struct drm_connector *connector;
8823 	struct drm_connector_state *connector_state;
8824 	struct intel_encoder *encoder;
8825 	u32 dpll, fp, fp2;
8826 	int factor, i;
8827 	bool is_lvds = false, is_sdvo = false;
8828 
8829 	for_each_connector_in_state(state, connector, connector_state, i) {
8830 		if (connector_state->crtc != crtc_state->base.crtc)
8831 			continue;
8832 
8833 		encoder = to_intel_encoder(connector_state->best_encoder);
8834 
8835 		switch (encoder->type) {
8836 		case INTEL_OUTPUT_LVDS:
8837 			is_lvds = true;
8838 			break;
8839 		case INTEL_OUTPUT_SDVO:
8840 		case INTEL_OUTPUT_HDMI:
8841 			is_sdvo = true;
8842 			break;
8843 		default:
8844 			break;
8845 		}
8846 	}
8847 
8848 	/* Enable autotuning of the PLL clock (if permissible) */
8849 	factor = 21;
8850 	if (is_lvds) {
8851 		if ((intel_panel_use_ssc(dev_priv) &&
8852 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8853 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8854 			factor = 25;
8855 	} else if (crtc_state->sdvo_tv_clock)
8856 		factor = 20;
8857 
8858 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8859 
8860 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8861 		fp |= FP_CB_TUNE;
8862 
8863 	if (reduced_clock) {
8864 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
8865 
8866 		if (reduced_clock->m < factor * reduced_clock->n)
8867 			fp2 |= FP_CB_TUNE;
8868 	} else {
8869 		fp2 = fp;
8870 	}
8871 
8872 	dpll = 0;
8873 
8874 	if (is_lvds)
8875 		dpll |= DPLLB_MODE_LVDS;
8876 	else
8877 		dpll |= DPLLB_MODE_DAC_SERIAL;
8878 
8879 	dpll |= (crtc_state->pixel_multiplier - 1)
8880 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8881 
8882 	if (is_sdvo)
8883 		dpll |= DPLL_SDVO_HIGH_SPEED;
8884 	if (crtc_state->has_dp_encoder)
8885 		dpll |= DPLL_SDVO_HIGH_SPEED;
8886 
8887 	/* compute bitmask from p1 value */
8888 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8889 	/* also FPA1 */
8890 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8891 
8892 	switch (crtc_state->dpll.p2) {
8893 	case 5:
8894 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8895 		break;
8896 	case 7:
8897 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8898 		break;
8899 	case 10:
8900 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8901 		break;
8902 	case 14:
8903 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8904 		break;
8905 	}
8906 
8907 	if (is_lvds && intel_panel_use_ssc(dev_priv))
8908 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8909 	else
8910 		dpll |= PLL_REF_INPUT_DREFCLK;
8911 
8912 	dpll |= DPLL_VCO_ENABLE;
8913 
8914 	crtc_state->dpll_hw_state.dpll = dpll;
8915 	crtc_state->dpll_hw_state.fp0 = fp;
8916 	crtc_state->dpll_hw_state.fp1 = fp2;
8917 }
8918 
8919 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8920 				       struct intel_crtc_state *crtc_state)
8921 {
8922 	struct drm_device *dev = crtc->base.dev;
8923 	struct drm_i915_private *dev_priv = dev->dev_private;
8924 	intel_clock_t reduced_clock;
8925 	bool has_reduced_clock = false;
8926 	struct intel_shared_dpll *pll;
8927 	const intel_limit_t *limit;
8928 	int refclk = 120000;
8929 
8930 	memset(&crtc_state->dpll_hw_state, 0,
8931 	       sizeof(crtc_state->dpll_hw_state));
8932 
8933 	crtc->lowfreq_avail = false;
8934 
8935 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8936 	if (!crtc_state->has_pch_encoder)
8937 		return 0;
8938 
8939 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8940 		if (intel_panel_use_ssc(dev_priv)) {
8941 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8942 				      dev_priv->vbt.lvds_ssc_freq);
8943 			refclk = dev_priv->vbt.lvds_ssc_freq;
8944 		}
8945 
8946 		if (intel_is_dual_link_lvds(dev)) {
8947 			if (refclk == 100000)
8948 				limit = &intel_limits_ironlake_dual_lvds_100m;
8949 			else
8950 				limit = &intel_limits_ironlake_dual_lvds;
8951 		} else {
8952 			if (refclk == 100000)
8953 				limit = &intel_limits_ironlake_single_lvds_100m;
8954 			else
8955 				limit = &intel_limits_ironlake_single_lvds;
8956 		}
8957 	} else {
8958 		limit = &intel_limits_ironlake_dac;
8959 	}
8960 
8961 	if (!crtc_state->clock_set &&
8962 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8963 				refclk, NULL, &crtc_state->dpll)) {
8964 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8965 		return -EINVAL;
8966 	}
8967 
8968 	ironlake_compute_dpll(crtc, crtc_state,
8969 			      has_reduced_clock ? &reduced_clock : NULL);
8970 
8971 	pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
8972 	if (pll == NULL) {
8973 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8974 				 pipe_name(crtc->pipe));
8975 		return -EINVAL;
8976 	}
8977 
8978 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8979 	    has_reduced_clock)
8980 		crtc->lowfreq_avail = true;
8981 
8982 	return 0;
8983 }
8984 
8985 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8986 					 struct intel_link_m_n *m_n)
8987 {
8988 	struct drm_device *dev = crtc->base.dev;
8989 	struct drm_i915_private *dev_priv = dev->dev_private;
8990 	enum i915_pipe pipe = crtc->pipe;
8991 
8992 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8993 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8994 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8995 		& ~TU_SIZE_MASK;
8996 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8997 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8998 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8999 }
9000 
9001 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9002 					 enum transcoder transcoder,
9003 					 struct intel_link_m_n *m_n,
9004 					 struct intel_link_m_n *m2_n2)
9005 {
9006 	struct drm_device *dev = crtc->base.dev;
9007 	struct drm_i915_private *dev_priv = dev->dev_private;
9008 	enum i915_pipe pipe = crtc->pipe;
9009 
9010 	if (INTEL_INFO(dev)->gen >= 5) {
9011 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9012 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9013 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9014 			& ~TU_SIZE_MASK;
9015 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9016 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9017 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9018 		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9019 		 * gen < 8) and if DRRS is supported (to make sure the
9020 		 * registers are not unnecessarily read).
9021 		 */
9022 		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9023 			crtc->config->has_drrs) {
9024 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9025 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9026 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9027 					& ~TU_SIZE_MASK;
9028 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9029 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9030 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9031 		}
9032 	} else {
9033 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9034 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9035 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9036 			& ~TU_SIZE_MASK;
9037 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9038 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9039 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9040 	}
9041 }
9042 
9043 void intel_dp_get_m_n(struct intel_crtc *crtc,
9044 		      struct intel_crtc_state *pipe_config)
9045 {
9046 	if (pipe_config->has_pch_encoder)
9047 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9048 	else
9049 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9050 					     &pipe_config->dp_m_n,
9051 					     &pipe_config->dp_m2_n2);
9052 }
9053 
9054 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9055 					struct intel_crtc_state *pipe_config)
9056 {
9057 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9058 				     &pipe_config->fdi_m_n, NULL);
9059 }
9060 
9061 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9062 				    struct intel_crtc_state *pipe_config)
9063 {
9064 	struct drm_device *dev = crtc->base.dev;
9065 	struct drm_i915_private *dev_priv = dev->dev_private;
9066 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9067 	uint32_t ps_ctrl = 0;
9068 	int id = -1;
9069 	int i;
9070 
9071 	/* find scaler attached to this pipe */
9072 	for (i = 0; i < crtc->num_scalers; i++) {
9073 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9074 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9075 			id = i;
9076 			pipe_config->pch_pfit.enabled = true;
9077 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9078 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9079 			break;
9080 		}
9081 	}
9082 
9083 	scaler_state->scaler_id = id;
9084 	if (id >= 0) {
9085 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9086 	} else {
9087 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9088 	}
9089 }
9090 
9091 static void
9092 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9093 				 struct intel_initial_plane_config *plane_config)
9094 {
9095 	struct drm_device *dev = crtc->base.dev;
9096 	struct drm_i915_private *dev_priv = dev->dev_private;
9097 	u32 val, base, offset, stride_mult, tiling;
9098 	int pipe = crtc->pipe;
9099 	int fourcc, pixel_format;
9100 	unsigned int aligned_height;
9101 	struct drm_framebuffer *fb;
9102 	struct intel_framebuffer *intel_fb;
9103 
9104 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9105 	if (!intel_fb) {
9106 		DRM_DEBUG_KMS("failed to alloc fb\n");
9107 		return;
9108 	}
9109 
9110 	fb = &intel_fb->base;
9111 
9112 	val = I915_READ(PLANE_CTL(pipe, 0));
9113 	if (!(val & PLANE_CTL_ENABLE))
9114 		goto error;
9115 
9116 	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9117 	fourcc = skl_format_to_fourcc(pixel_format,
9118 				      val & PLANE_CTL_ORDER_RGBX,
9119 				      val & PLANE_CTL_ALPHA_MASK);
9120 	fb->pixel_format = fourcc;
9121 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9122 
9123 	tiling = val & PLANE_CTL_TILED_MASK;
9124 	switch (tiling) {
9125 	case PLANE_CTL_TILED_LINEAR:
9126 		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9127 		break;
9128 	case PLANE_CTL_TILED_X:
9129 		plane_config->tiling = I915_TILING_X;
9130 		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9131 		break;
9132 	case PLANE_CTL_TILED_Y:
9133 		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9134 		break;
9135 	case PLANE_CTL_TILED_YF:
9136 		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9137 		break;
9138 	default:
9139 		MISSING_CASE(tiling);
9140 		goto error;
9141 	}
9142 
9143 	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9144 	plane_config->base = base;
9145 
9146 	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9147 
9148 	val = I915_READ(PLANE_SIZE(pipe, 0));
9149 	fb->height = ((val >> 16) & 0xfff) + 1;
9150 	fb->width = ((val >> 0) & 0x1fff) + 1;
9151 
9152 	val = I915_READ(PLANE_STRIDE(pipe, 0));
9153 	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9154 						fb->pixel_format);
9155 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9156 
9157 	aligned_height = intel_fb_align_height(dev, fb->height,
9158 					       fb->pixel_format,
9159 					       fb->modifier[0]);
9160 
9161 	plane_config->size = fb->pitches[0] * aligned_height;
9162 
9163 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9164 		      pipe_name(pipe), fb->width, fb->height,
9165 		      fb->bits_per_pixel, base, fb->pitches[0],
9166 		      plane_config->size);
9167 
9168 	plane_config->fb = intel_fb;
9169 	return;
9170 
9171 error:
9172 	kfree(fb);
9173 }
9174 
9175 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9176 				     struct intel_crtc_state *pipe_config)
9177 {
9178 	struct drm_device *dev = crtc->base.dev;
9179 	struct drm_i915_private *dev_priv = dev->dev_private;
9180 	uint32_t tmp;
9181 
9182 	tmp = I915_READ(PF_CTL(crtc->pipe));
9183 
9184 	if (tmp & PF_ENABLE) {
9185 		pipe_config->pch_pfit.enabled = true;
9186 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9187 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9188 
9189 		/* We currently do not free assignements of panel fitters on
9190 		 * ivb/hsw (since we don't use the higher upscaling modes which
9191 		 * differentiates them) so just WARN about this case for now. */
9192 		if (IS_GEN7(dev)) {
9193 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9194 				PF_PIPE_SEL_IVB(crtc->pipe));
9195 		}
9196 	}
9197 }
9198 
9199 static void
9200 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9201 				  struct intel_initial_plane_config *plane_config)
9202 {
9203 	struct drm_device *dev = crtc->base.dev;
9204 	struct drm_i915_private *dev_priv = dev->dev_private;
9205 	u32 val, base, offset;
9206 	int pipe = crtc->pipe;
9207 	int fourcc, pixel_format;
9208 	unsigned int aligned_height;
9209 	struct drm_framebuffer *fb;
9210 	struct intel_framebuffer *intel_fb;
9211 
9212 	val = I915_READ(DSPCNTR(pipe));
9213 	if (!(val & DISPLAY_PLANE_ENABLE))
9214 		return;
9215 
9216 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9217 	if (!intel_fb) {
9218 		DRM_DEBUG_KMS("failed to alloc fb\n");
9219 		return;
9220 	}
9221 
9222 	fb = &intel_fb->base;
9223 
9224 	if (INTEL_INFO(dev)->gen >= 4) {
9225 		if (val & DISPPLANE_TILED) {
9226 			plane_config->tiling = I915_TILING_X;
9227 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9228 		}
9229 	}
9230 
9231 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9232 	fourcc = i9xx_format_to_fourcc(pixel_format);
9233 	fb->pixel_format = fourcc;
9234 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9235 
9236 	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9237 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9238 		offset = I915_READ(DSPOFFSET(pipe));
9239 	} else {
9240 		if (plane_config->tiling)
9241 			offset = I915_READ(DSPTILEOFF(pipe));
9242 		else
9243 			offset = I915_READ(DSPLINOFF(pipe));
9244 	}
9245 	plane_config->base = base;
9246 
9247 	val = I915_READ(PIPESRC(pipe));
9248 	fb->width = ((val >> 16) & 0xfff) + 1;
9249 	fb->height = ((val >> 0) & 0xfff) + 1;
9250 
9251 	val = I915_READ(DSPSTRIDE(pipe));
9252 	fb->pitches[0] = val & 0xffffffc0;
9253 
9254 	aligned_height = intel_fb_align_height(dev, fb->height,
9255 					       fb->pixel_format,
9256 					       fb->modifier[0]);
9257 
9258 	plane_config->size = fb->pitches[0] * aligned_height;
9259 
9260 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9261 		      pipe_name(pipe), fb->width, fb->height,
9262 		      fb->bits_per_pixel, base, fb->pitches[0],
9263 		      plane_config->size);
9264 
9265 	plane_config->fb = intel_fb;
9266 }
9267 
9268 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9269 				     struct intel_crtc_state *pipe_config)
9270 {
9271 	struct drm_device *dev = crtc->base.dev;
9272 	struct drm_i915_private *dev_priv = dev->dev_private;
9273 	enum intel_display_power_domain power_domain;
9274 	uint32_t tmp;
9275 	bool ret;
9276 
9277 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9278 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9279 		return false;
9280 
9281 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9282 	pipe_config->shared_dpll = NULL;
9283 
9284 	ret = false;
9285 	tmp = I915_READ(PIPECONF(crtc->pipe));
9286 	if (!(tmp & PIPECONF_ENABLE))
9287 		goto out;
9288 
9289 	switch (tmp & PIPECONF_BPC_MASK) {
9290 	case PIPECONF_6BPC:
9291 		pipe_config->pipe_bpp = 18;
9292 		break;
9293 	case PIPECONF_8BPC:
9294 		pipe_config->pipe_bpp = 24;
9295 		break;
9296 	case PIPECONF_10BPC:
9297 		pipe_config->pipe_bpp = 30;
9298 		break;
9299 	case PIPECONF_12BPC:
9300 		pipe_config->pipe_bpp = 36;
9301 		break;
9302 	default:
9303 		break;
9304 	}
9305 
9306 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9307 		pipe_config->limited_color_range = true;
9308 
9309 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9310 		struct intel_shared_dpll *pll;
9311 		enum intel_dpll_id pll_id;
9312 
9313 		pipe_config->has_pch_encoder = true;
9314 
9315 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9316 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9317 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9318 
9319 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9320 
9321 		if (HAS_PCH_IBX(dev_priv)) {
9322 			pll_id = (enum intel_dpll_id) crtc->pipe;
9323 		} else {
9324 			tmp = I915_READ(PCH_DPLL_SEL);
9325 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9326 				pll_id = DPLL_ID_PCH_PLL_B;
9327 			else
9328 				pll_id= DPLL_ID_PCH_PLL_A;
9329 		}
9330 
9331 		pipe_config->shared_dpll =
9332 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
9333 		pll = pipe_config->shared_dpll;
9334 
9335 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9336 						 &pipe_config->dpll_hw_state));
9337 
9338 		tmp = pipe_config->dpll_hw_state.dpll;
9339 		pipe_config->pixel_multiplier =
9340 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9341 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9342 
9343 		ironlake_pch_clock_get(crtc, pipe_config);
9344 	} else {
9345 		pipe_config->pixel_multiplier = 1;
9346 	}
9347 
9348 	intel_get_pipe_timings(crtc, pipe_config);
9349 	intel_get_pipe_src_size(crtc, pipe_config);
9350 
9351 	ironlake_get_pfit_config(crtc, pipe_config);
9352 
9353 	ret = true;
9354 
9355 out:
9356 	intel_display_power_put(dev_priv, power_domain);
9357 
9358 	return ret;
9359 }
9360 
9361 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9362 {
9363 	struct drm_device *dev = dev_priv->dev;
9364 	struct intel_crtc *crtc;
9365 
9366 	for_each_intel_crtc(dev, crtc)
9367 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9368 		     pipe_name(crtc->pipe));
9369 
9370 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9371 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9372 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9373 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9374 	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9375 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9376 	     "CPU PWM1 enabled\n");
9377 	if (IS_HASWELL(dev))
9378 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9379 		     "CPU PWM2 enabled\n");
9380 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9381 	     "PCH PWM1 enabled\n");
9382 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9383 	     "Utility pin enabled\n");
9384 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9385 
9386 	/*
9387 	 * In theory we can still leave IRQs enabled, as long as only the HPD
9388 	 * interrupts remain enabled. We used to check for that, but since it's
9389 	 * gen-specific and since we only disable LCPLL after we fully disable
9390 	 * the interrupts, the check below should be enough.
9391 	 */
9392 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9393 }
9394 
9395 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9396 {
9397 	struct drm_device *dev = dev_priv->dev;
9398 
9399 	if (IS_HASWELL(dev))
9400 		return I915_READ(D_COMP_HSW);
9401 	else
9402 		return I915_READ(D_COMP_BDW);
9403 }
9404 
9405 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9406 {
9407 	struct drm_device *dev = dev_priv->dev;
9408 
9409 	if (IS_HASWELL(dev)) {
9410 		mutex_lock(&dev_priv->rps.hw_lock);
9411 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9412 					    val))
9413 			DRM_ERROR("Failed to write to D_COMP\n");
9414 		mutex_unlock(&dev_priv->rps.hw_lock);
9415 	} else {
9416 		I915_WRITE(D_COMP_BDW, val);
9417 		POSTING_READ(D_COMP_BDW);
9418 	}
9419 }
9420 
9421 /*
9422  * This function implements pieces of two sequences from BSpec:
9423  * - Sequence for display software to disable LCPLL
9424  * - Sequence for display software to allow package C8+
9425  * The steps implemented here are just the steps that actually touch the LCPLL
9426  * register. Callers should take care of disabling all the display engine
9427  * functions, doing the mode unset, fixing interrupts, etc.
9428  */
9429 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9430 			      bool switch_to_fclk, bool allow_power_down)
9431 {
9432 	uint32_t val;
9433 
9434 	assert_can_disable_lcpll(dev_priv);
9435 
9436 	val = I915_READ(LCPLL_CTL);
9437 
9438 	if (switch_to_fclk) {
9439 		val |= LCPLL_CD_SOURCE_FCLK;
9440 		I915_WRITE(LCPLL_CTL, val);
9441 
9442 		if (wait_for_us(I915_READ(LCPLL_CTL) &
9443 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
9444 			DRM_ERROR("Switching to FCLK failed\n");
9445 
9446 		val = I915_READ(LCPLL_CTL);
9447 	}
9448 
9449 	val |= LCPLL_PLL_DISABLE;
9450 	I915_WRITE(LCPLL_CTL, val);
9451 	POSTING_READ(LCPLL_CTL);
9452 
9453 	if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9454 		DRM_ERROR("LCPLL still locked\n");
9455 
9456 	val = hsw_read_dcomp(dev_priv);
9457 	val |= D_COMP_COMP_DISABLE;
9458 	hsw_write_dcomp(dev_priv, val);
9459 	ndelay(100);
9460 
9461 	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9462 		     1))
9463 		DRM_ERROR("D_COMP RCOMP still in progress\n");
9464 
9465 	if (allow_power_down) {
9466 		val = I915_READ(LCPLL_CTL);
9467 		val |= LCPLL_POWER_DOWN_ALLOW;
9468 		I915_WRITE(LCPLL_CTL, val);
9469 		POSTING_READ(LCPLL_CTL);
9470 	}
9471 }
9472 
9473 /*
9474  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9475  * source.
9476  */
9477 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9478 {
9479 	uint32_t val;
9480 
9481 	val = I915_READ(LCPLL_CTL);
9482 
9483 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9484 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9485 		return;
9486 
9487 	/*
9488 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9489 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9490 	 */
9491 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9492 
9493 	if (val & LCPLL_POWER_DOWN_ALLOW) {
9494 		val &= ~LCPLL_POWER_DOWN_ALLOW;
9495 		I915_WRITE(LCPLL_CTL, val);
9496 		POSTING_READ(LCPLL_CTL);
9497 	}
9498 
9499 	val = hsw_read_dcomp(dev_priv);
9500 	val |= D_COMP_COMP_FORCE;
9501 	val &= ~D_COMP_COMP_DISABLE;
9502 	hsw_write_dcomp(dev_priv, val);
9503 
9504 	val = I915_READ(LCPLL_CTL);
9505 	val &= ~LCPLL_PLL_DISABLE;
9506 	I915_WRITE(LCPLL_CTL, val);
9507 
9508 	if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9509 		DRM_ERROR("LCPLL not locked yet\n");
9510 
9511 	if (val & LCPLL_CD_SOURCE_FCLK) {
9512 		val = I915_READ(LCPLL_CTL);
9513 		val &= ~LCPLL_CD_SOURCE_FCLK;
9514 		I915_WRITE(LCPLL_CTL, val);
9515 
9516 		if (wait_for_us((I915_READ(LCPLL_CTL) &
9517 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9518 			DRM_ERROR("Switching back to LCPLL failed\n");
9519 	}
9520 
9521 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9522 	intel_update_cdclk(dev_priv->dev);
9523 }
9524 
9525 /*
9526  * Package states C8 and deeper are really deep PC states that can only be
9527  * reached when all the devices on the system allow it, so even if the graphics
9528  * device allows PC8+, it doesn't mean the system will actually get to these
9529  * states. Our driver only allows PC8+ when going into runtime PM.
9530  *
9531  * The requirements for PC8+ are that all the outputs are disabled, the power
9532  * well is disabled and most interrupts are disabled, and these are also
9533  * requirements for runtime PM. When these conditions are met, we manually do
9534  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9535  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9536  * hang the machine.
9537  *
9538  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9539  * the state of some registers, so when we come back from PC8+ we need to
9540  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9541  * need to take care of the registers kept by RC6. Notice that this happens even
9542  * if we don't put the device in PCI D3 state (which is what currently happens
9543  * because of the runtime PM support).
9544  *
9545  * For more, read "Display Sequences for Package C8" on the hardware
9546  * documentation.
9547  */
9548 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9549 {
9550 	struct drm_device *dev = dev_priv->dev;
9551 	uint32_t val;
9552 
9553 	DRM_DEBUG_KMS("Enabling package C8+\n");
9554 
9555 	if (HAS_PCH_LPT_LP(dev)) {
9556 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9557 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9558 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9559 	}
9560 
9561 	lpt_disable_clkout_dp(dev);
9562 	hsw_disable_lcpll(dev_priv, true, true);
9563 }
9564 
9565 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9566 {
9567 	struct drm_device *dev = dev_priv->dev;
9568 	uint32_t val;
9569 
9570 	DRM_DEBUG_KMS("Disabling package C8+\n");
9571 
9572 	hsw_restore_lcpll(dev_priv);
9573 	lpt_init_pch_refclk(dev);
9574 
9575 	if (HAS_PCH_LPT_LP(dev)) {
9576 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9577 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9578 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9579 	}
9580 }
9581 
9582 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9583 {
9584 	struct drm_device *dev = old_state->dev;
9585 	struct intel_atomic_state *old_intel_state =
9586 		to_intel_atomic_state(old_state);
9587 	unsigned int req_cdclk = old_intel_state->dev_cdclk;
9588 
9589 	broxton_set_cdclk(to_i915(dev), req_cdclk);
9590 }
9591 
9592 /* compute the max rate for new configuration */
9593 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9594 {
9595 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9596 	struct drm_i915_private *dev_priv = state->dev->dev_private;
9597 	struct drm_crtc *crtc;
9598 	struct drm_crtc_state *cstate;
9599 	struct intel_crtc_state *crtc_state;
9600 	unsigned max_pixel_rate = 0, i;
9601 	enum i915_pipe pipe;
9602 
9603 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9604 	       sizeof(intel_state->min_pixclk));
9605 
9606 	for_each_crtc_in_state(state, crtc, cstate, i) {
9607 		int pixel_rate;
9608 
9609 		crtc_state = to_intel_crtc_state(cstate);
9610 		if (!crtc_state->base.enable) {
9611 			intel_state->min_pixclk[i] = 0;
9612 			continue;
9613 		}
9614 
9615 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9616 
9617 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9618 		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9619 			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9620 
9621 		intel_state->min_pixclk[i] = pixel_rate;
9622 	}
9623 
9624 	for_each_pipe(dev_priv, pipe)
9625 		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9626 
9627 	return max_pixel_rate;
9628 }
9629 
9630 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9631 {
9632 	struct drm_i915_private *dev_priv = dev->dev_private;
9633 	uint32_t val, data;
9634 	int ret;
9635 
9636 	if (WARN((I915_READ(LCPLL_CTL) &
9637 		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9638 		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9639 		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9640 		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9641 		 "trying to change cdclk frequency with cdclk not enabled\n"))
9642 		return;
9643 
9644 	mutex_lock(&dev_priv->rps.hw_lock);
9645 	ret = sandybridge_pcode_write(dev_priv,
9646 				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9647 	mutex_unlock(&dev_priv->rps.hw_lock);
9648 	if (ret) {
9649 		DRM_ERROR("failed to inform pcode about cdclk change\n");
9650 		return;
9651 	}
9652 
9653 	val = I915_READ(LCPLL_CTL);
9654 	val |= LCPLL_CD_SOURCE_FCLK;
9655 	I915_WRITE(LCPLL_CTL, val);
9656 
9657 	if (wait_for_us(I915_READ(LCPLL_CTL) &
9658 			LCPLL_CD_SOURCE_FCLK_DONE, 1))
9659 		DRM_ERROR("Switching to FCLK failed\n");
9660 
9661 	val = I915_READ(LCPLL_CTL);
9662 	val &= ~LCPLL_CLK_FREQ_MASK;
9663 
9664 	switch (cdclk) {
9665 	case 450000:
9666 		val |= LCPLL_CLK_FREQ_450;
9667 		data = 0;
9668 		break;
9669 	case 540000:
9670 		val |= LCPLL_CLK_FREQ_54O_BDW;
9671 		data = 1;
9672 		break;
9673 	case 337500:
9674 		val |= LCPLL_CLK_FREQ_337_5_BDW;
9675 		data = 2;
9676 		break;
9677 	case 675000:
9678 		val |= LCPLL_CLK_FREQ_675_BDW;
9679 		data = 3;
9680 		break;
9681 	default:
9682 		WARN(1, "invalid cdclk frequency\n");
9683 		return;
9684 	}
9685 
9686 	I915_WRITE(LCPLL_CTL, val);
9687 
9688 	val = I915_READ(LCPLL_CTL);
9689 	val &= ~LCPLL_CD_SOURCE_FCLK;
9690 	I915_WRITE(LCPLL_CTL, val);
9691 
9692 	if (wait_for_us((I915_READ(LCPLL_CTL) &
9693 			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9694 		DRM_ERROR("Switching back to LCPLL failed\n");
9695 
9696 	mutex_lock(&dev_priv->rps.hw_lock);
9697 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9698 	mutex_unlock(&dev_priv->rps.hw_lock);
9699 
9700 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9701 
9702 	intel_update_cdclk(dev);
9703 
9704 	WARN(cdclk != dev_priv->cdclk_freq,
9705 	     "cdclk requested %d kHz but got %d kHz\n",
9706 	     cdclk, dev_priv->cdclk_freq);
9707 }
9708 
9709 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9710 {
9711 	struct drm_i915_private *dev_priv = to_i915(state->dev);
9712 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9713 	int max_pixclk = ilk_max_pixel_rate(state);
9714 	int cdclk;
9715 
9716 	/*
9717 	 * FIXME should also account for plane ratio
9718 	 * once 64bpp pixel formats are supported.
9719 	 */
9720 	if (max_pixclk > 540000)
9721 		cdclk = 675000;
9722 	else if (max_pixclk > 450000)
9723 		cdclk = 540000;
9724 	else if (max_pixclk > 337500)
9725 		cdclk = 450000;
9726 	else
9727 		cdclk = 337500;
9728 
9729 	if (cdclk > dev_priv->max_cdclk_freq) {
9730 		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9731 			      cdclk, dev_priv->max_cdclk_freq);
9732 		return -EINVAL;
9733 	}
9734 
9735 	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9736 	if (!intel_state->active_crtcs)
9737 		intel_state->dev_cdclk = 337500;
9738 
9739 	return 0;
9740 }
9741 
9742 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9743 {
9744 	struct drm_device *dev = old_state->dev;
9745 	struct intel_atomic_state *old_intel_state =
9746 		to_intel_atomic_state(old_state);
9747 	unsigned req_cdclk = old_intel_state->dev_cdclk;
9748 
9749 	broadwell_set_cdclk(dev, req_cdclk);
9750 }
9751 
9752 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9753 				      struct intel_crtc_state *crtc_state)
9754 {
9755 	struct intel_encoder *intel_encoder =
9756 		intel_ddi_get_crtc_new_encoder(crtc_state);
9757 
9758 	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9759 		if (!intel_ddi_pll_select(crtc, crtc_state))
9760 			return -EINVAL;
9761 	}
9762 
9763 	crtc->lowfreq_avail = false;
9764 
9765 	return 0;
9766 }
9767 
9768 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9769 				enum port port,
9770 				struct intel_crtc_state *pipe_config)
9771 {
9772 	enum intel_dpll_id id;
9773 
9774 	switch (port) {
9775 	case PORT_A:
9776 		pipe_config->ddi_pll_sel = SKL_DPLL0;
9777 		id = DPLL_ID_SKL_DPLL0;
9778 		break;
9779 	case PORT_B:
9780 		pipe_config->ddi_pll_sel = SKL_DPLL1;
9781 		id = DPLL_ID_SKL_DPLL1;
9782 		break;
9783 	case PORT_C:
9784 		pipe_config->ddi_pll_sel = SKL_DPLL2;
9785 		id = DPLL_ID_SKL_DPLL2;
9786 		break;
9787 	default:
9788 		DRM_ERROR("Incorrect port type\n");
9789 		return;
9790 	}
9791 
9792 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9793 }
9794 
9795 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9796 				enum port port,
9797 				struct intel_crtc_state *pipe_config)
9798 {
9799 	enum intel_dpll_id id;
9800 	u32 temp;
9801 
9802 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9803 	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9804 
9805 	switch (pipe_config->ddi_pll_sel) {
9806 	case SKL_DPLL0:
9807 		id = DPLL_ID_SKL_DPLL0;
9808 		break;
9809 	case SKL_DPLL1:
9810 		id = DPLL_ID_SKL_DPLL1;
9811 		break;
9812 	case SKL_DPLL2:
9813 		id = DPLL_ID_SKL_DPLL2;
9814 		break;
9815 	case SKL_DPLL3:
9816 		id = DPLL_ID_SKL_DPLL3;
9817 		break;
9818 	default:
9819 		MISSING_CASE(pipe_config->ddi_pll_sel);
9820 		return;
9821 	}
9822 
9823 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9824 }
9825 
9826 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9827 				enum port port,
9828 				struct intel_crtc_state *pipe_config)
9829 {
9830 	enum intel_dpll_id id;
9831 
9832 	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9833 
9834 	switch (pipe_config->ddi_pll_sel) {
9835 	case PORT_CLK_SEL_WRPLL1:
9836 		id = DPLL_ID_WRPLL1;
9837 		break;
9838 	case PORT_CLK_SEL_WRPLL2:
9839 		id = DPLL_ID_WRPLL2;
9840 		break;
9841 	case PORT_CLK_SEL_SPLL:
9842 		id = DPLL_ID_SPLL;
9843 		break;
9844 	case PORT_CLK_SEL_LCPLL_810:
9845 		id = DPLL_ID_LCPLL_810;
9846 		break;
9847 	case PORT_CLK_SEL_LCPLL_1350:
9848 		id = DPLL_ID_LCPLL_1350;
9849 		break;
9850 	case PORT_CLK_SEL_LCPLL_2700:
9851 		id = DPLL_ID_LCPLL_2700;
9852 		break;
9853 	default:
9854 		MISSING_CASE(pipe_config->ddi_pll_sel);
9855 		/* fall through */
9856 	case PORT_CLK_SEL_NONE:
9857 		return;
9858 	}
9859 
9860 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9861 }
9862 
9863 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9864 				     struct intel_crtc_state *pipe_config,
9865 				     unsigned long *power_domain_mask)
9866 {
9867 	struct drm_device *dev = crtc->base.dev;
9868 	struct drm_i915_private *dev_priv = dev->dev_private;
9869 	enum intel_display_power_domain power_domain;
9870 	u32 tmp;
9871 
9872 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9873 
9874 	/*
9875 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9876 	 * consistency and less surprising code; it's in always on power).
9877 	 */
9878 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9879 	if (tmp & TRANS_DDI_FUNC_ENABLE) {
9880 		enum i915_pipe trans_edp_pipe;
9881 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9882 		default:
9883 			WARN(1, "unknown pipe linked to edp transcoder\n");
9884 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
9885 		case TRANS_DDI_EDP_INPUT_A_ON:
9886 			trans_edp_pipe = PIPE_A;
9887 			break;
9888 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
9889 			trans_edp_pipe = PIPE_B;
9890 			break;
9891 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
9892 			trans_edp_pipe = PIPE_C;
9893 			break;
9894 		}
9895 
9896 		if (trans_edp_pipe == crtc->pipe)
9897 			pipe_config->cpu_transcoder = TRANSCODER_EDP;
9898 	}
9899 
9900 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9901 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9902 		return false;
9903 	*power_domain_mask |= BIT(power_domain);
9904 
9905 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9906 
9907 	return tmp & PIPECONF_ENABLE;
9908 }
9909 
9910 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9911 					 struct intel_crtc_state *pipe_config,
9912 					 unsigned long *power_domain_mask)
9913 {
9914 	struct drm_device *dev = crtc->base.dev;
9915 	struct drm_i915_private *dev_priv = dev->dev_private;
9916 	enum intel_display_power_domain power_domain;
9917 	enum port port;
9918 	enum transcoder cpu_transcoder;
9919 	u32 tmp;
9920 
9921 	pipe_config->has_dsi_encoder = false;
9922 
9923 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9924 		if (port == PORT_A)
9925 			cpu_transcoder = TRANSCODER_DSI_A;
9926 		else
9927 			cpu_transcoder = TRANSCODER_DSI_C;
9928 
9929 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9930 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9931 			continue;
9932 		*power_domain_mask |= BIT(power_domain);
9933 
9934 		/*
9935 		 * The PLL needs to be enabled with a valid divider
9936 		 * configuration, otherwise accessing DSI registers will hang
9937 		 * the machine. See BSpec North Display Engine
9938 		 * registers/MIPI[BXT]. We can break out here early, since we
9939 		 * need the same DSI PLL to be enabled for both DSI ports.
9940 		 */
9941 		if (!intel_dsi_pll_is_enabled(dev_priv))
9942 			break;
9943 
9944 		/* XXX: this works for video mode only */
9945 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9946 		if (!(tmp & DPI_ENABLE))
9947 			continue;
9948 
9949 		tmp = I915_READ(MIPI_CTRL(port));
9950 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9951 			continue;
9952 
9953 		pipe_config->cpu_transcoder = cpu_transcoder;
9954 		pipe_config->has_dsi_encoder = true;
9955 		break;
9956 	}
9957 
9958 	return pipe_config->has_dsi_encoder;
9959 }
9960 
9961 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9962 				       struct intel_crtc_state *pipe_config)
9963 {
9964 	struct drm_device *dev = crtc->base.dev;
9965 	struct drm_i915_private *dev_priv = dev->dev_private;
9966 	struct intel_shared_dpll *pll;
9967 	enum port port;
9968 	uint32_t tmp;
9969 
9970 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9971 
9972 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9973 
9974 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9975 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
9976 	else if (IS_BROXTON(dev))
9977 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
9978 	else
9979 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
9980 
9981 	pll = pipe_config->shared_dpll;
9982 	if (pll) {
9983 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9984 						 &pipe_config->dpll_hw_state));
9985 	}
9986 
9987 	/*
9988 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9989 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
9990 	 * the PCH transcoder is on.
9991 	 */
9992 	if (INTEL_INFO(dev)->gen < 9 &&
9993 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9994 		pipe_config->has_pch_encoder = true;
9995 
9996 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9997 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9998 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9999 
10000 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
10001 	}
10002 }
10003 
10004 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10005 				    struct intel_crtc_state *pipe_config)
10006 {
10007 	struct drm_device *dev = crtc->base.dev;
10008 	struct drm_i915_private *dev_priv = dev->dev_private;
10009 	enum intel_display_power_domain power_domain;
10010 	unsigned long power_domain_mask;
10011 	bool active;
10012 
10013 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10014 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10015 		return false;
10016 	power_domain_mask = BIT(power_domain);
10017 
10018 	pipe_config->shared_dpll = NULL;
10019 
10020 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10021 
10022 	if (IS_BROXTON(dev_priv)) {
10023 		bxt_get_dsi_transcoder_state(crtc, pipe_config,
10024 					     &power_domain_mask);
10025 		WARN_ON(active && pipe_config->has_dsi_encoder);
10026 		if (pipe_config->has_dsi_encoder)
10027 			active = true;
10028 	}
10029 
10030 	if (!active)
10031 		goto out;
10032 
10033 	if (!pipe_config->has_dsi_encoder) {
10034 		haswell_get_ddi_port_state(crtc, pipe_config);
10035 		intel_get_pipe_timings(crtc, pipe_config);
10036 	}
10037 
10038 	intel_get_pipe_src_size(crtc, pipe_config);
10039 
10040 	pipe_config->gamma_mode =
10041 		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10042 
10043 	if (INTEL_INFO(dev)->gen >= 9) {
10044 		skl_init_scalers(dev, crtc, pipe_config);
10045 	}
10046 
10047 	if (INTEL_INFO(dev)->gen >= 9) {
10048 		pipe_config->scaler_state.scaler_id = -1;
10049 		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10050 	}
10051 
10052 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10053 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10054 		power_domain_mask |= BIT(power_domain);
10055 		if (INTEL_INFO(dev)->gen >= 9)
10056 			skylake_get_pfit_config(crtc, pipe_config);
10057 		else
10058 			ironlake_get_pfit_config(crtc, pipe_config);
10059 	}
10060 
10061 	if (IS_HASWELL(dev))
10062 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10063 			(I915_READ(IPS_CTL) & IPS_ENABLE);
10064 
10065 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10066 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10067 		pipe_config->pixel_multiplier =
10068 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10069 	} else {
10070 		pipe_config->pixel_multiplier = 1;
10071 	}
10072 
10073 out:
10074 	for_each_power_domain(power_domain, power_domain_mask)
10075 		intel_display_power_put(dev_priv, power_domain);
10076 
10077 	return active;
10078 }
10079 
10080 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10081 			       const struct intel_plane_state *plane_state)
10082 {
10083 	struct drm_device *dev = crtc->dev;
10084 	struct drm_i915_private *dev_priv = dev->dev_private;
10085 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10086 	uint32_t cntl = 0, size = 0;
10087 
10088 	if (plane_state && plane_state->visible) {
10089 		unsigned int width = plane_state->base.crtc_w;
10090 		unsigned int height = plane_state->base.crtc_h;
10091 		unsigned int stride = roundup_pow_of_two(width) * 4;
10092 
10093 		switch (stride) {
10094 		default:
10095 			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10096 				  width, stride);
10097 			stride = 256;
10098 			/* fallthrough */
10099 		case 256:
10100 		case 512:
10101 		case 1024:
10102 		case 2048:
10103 			break;
10104 		}
10105 
10106 		cntl |= CURSOR_ENABLE |
10107 			CURSOR_GAMMA_ENABLE |
10108 			CURSOR_FORMAT_ARGB |
10109 			CURSOR_STRIDE(stride);
10110 
10111 		size = (height << 12) | width;
10112 	}
10113 
10114 	if (intel_crtc->cursor_cntl != 0 &&
10115 	    (intel_crtc->cursor_base != base ||
10116 	     intel_crtc->cursor_size != size ||
10117 	     intel_crtc->cursor_cntl != cntl)) {
10118 		/* On these chipsets we can only modify the base/size/stride
10119 		 * whilst the cursor is disabled.
10120 		 */
10121 		I915_WRITE(CURCNTR(PIPE_A), 0);
10122 		POSTING_READ(CURCNTR(PIPE_A));
10123 		intel_crtc->cursor_cntl = 0;
10124 	}
10125 
10126 	if (intel_crtc->cursor_base != base) {
10127 		I915_WRITE(CURBASE(PIPE_A), base);
10128 		intel_crtc->cursor_base = base;
10129 	}
10130 
10131 	if (intel_crtc->cursor_size != size) {
10132 		I915_WRITE(CURSIZE, size);
10133 		intel_crtc->cursor_size = size;
10134 	}
10135 
10136 	if (intel_crtc->cursor_cntl != cntl) {
10137 		I915_WRITE(CURCNTR(PIPE_A), cntl);
10138 		POSTING_READ(CURCNTR(PIPE_A));
10139 		intel_crtc->cursor_cntl = cntl;
10140 	}
10141 }
10142 
10143 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10144 			       const struct intel_plane_state *plane_state)
10145 {
10146 	struct drm_device *dev = crtc->dev;
10147 	struct drm_i915_private *dev_priv = dev->dev_private;
10148 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10149 	int pipe = intel_crtc->pipe;
10150 	uint32_t cntl = 0;
10151 
10152 	if (plane_state && plane_state->visible) {
10153 		cntl = MCURSOR_GAMMA_ENABLE;
10154 		switch (plane_state->base.crtc_w) {
10155 			case 64:
10156 				cntl |= CURSOR_MODE_64_ARGB_AX;
10157 				break;
10158 			case 128:
10159 				cntl |= CURSOR_MODE_128_ARGB_AX;
10160 				break;
10161 			case 256:
10162 				cntl |= CURSOR_MODE_256_ARGB_AX;
10163 				break;
10164 			default:
10165 				MISSING_CASE(plane_state->base.crtc_w);
10166 				return;
10167 		}
10168 		cntl |= pipe << 28; /* Connect to correct pipe */
10169 
10170 		if (HAS_DDI(dev))
10171 			cntl |= CURSOR_PIPE_CSC_ENABLE;
10172 
10173 		if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10174 			cntl |= CURSOR_ROTATE_180;
10175 	}
10176 
10177 	if (intel_crtc->cursor_cntl != cntl) {
10178 		I915_WRITE(CURCNTR(pipe), cntl);
10179 		POSTING_READ(CURCNTR(pipe));
10180 		intel_crtc->cursor_cntl = cntl;
10181 	}
10182 
10183 	/* and commit changes on next vblank */
10184 	I915_WRITE(CURBASE(pipe), base);
10185 	POSTING_READ(CURBASE(pipe));
10186 
10187 	intel_crtc->cursor_base = base;
10188 }
10189 
10190 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10191 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10192 				     const struct intel_plane_state *plane_state)
10193 {
10194 	struct drm_device *dev = crtc->dev;
10195 	struct drm_i915_private *dev_priv = dev->dev_private;
10196 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10197 	int pipe = intel_crtc->pipe;
10198 	u32 base = intel_crtc->cursor_addr;
10199 	u32 pos = 0;
10200 
10201 	if (plane_state) {
10202 		int x = plane_state->base.crtc_x;
10203 		int y = plane_state->base.crtc_y;
10204 
10205 		if (x < 0) {
10206 			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10207 			x = -x;
10208 		}
10209 		pos |= x << CURSOR_X_SHIFT;
10210 
10211 		if (y < 0) {
10212 			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10213 			y = -y;
10214 		}
10215 		pos |= y << CURSOR_Y_SHIFT;
10216 
10217 		/* ILK+ do this automagically */
10218 		if (HAS_GMCH_DISPLAY(dev) &&
10219 		    plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10220 			base += (plane_state->base.crtc_h *
10221 				 plane_state->base.crtc_w - 1) * 4;
10222 		}
10223 	}
10224 
10225 	I915_WRITE(CURPOS(pipe), pos);
10226 
10227 	if (IS_845G(dev) || IS_I865G(dev))
10228 		i845_update_cursor(crtc, base, plane_state);
10229 	else
10230 		i9xx_update_cursor(crtc, base, plane_state);
10231 }
10232 
10233 static bool cursor_size_ok(struct drm_device *dev,
10234 			   uint32_t width, uint32_t height)
10235 {
10236 	if (width == 0 || height == 0)
10237 		return false;
10238 
10239 	/*
10240 	 * 845g/865g are special in that they are only limited by
10241 	 * the width of their cursors, the height is arbitrary up to
10242 	 * the precision of the register. Everything else requires
10243 	 * square cursors, limited to a few power-of-two sizes.
10244 	 */
10245 	if (IS_845G(dev) || IS_I865G(dev)) {
10246 		if ((width & 63) != 0)
10247 			return false;
10248 
10249 		if (width > (IS_845G(dev) ? 64 : 512))
10250 			return false;
10251 
10252 		if (height > 1023)
10253 			return false;
10254 	} else {
10255 		switch (width | height) {
10256 		case 256:
10257 		case 128:
10258 			if (IS_GEN2(dev))
10259 				return false;
10260 		case 64:
10261 			break;
10262 		default:
10263 			return false;
10264 		}
10265 	}
10266 
10267 	return true;
10268 }
10269 
10270 /* VESA 640x480x72Hz mode to set on the pipe */
10271 static struct drm_display_mode load_detect_mode = {
10272 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10273 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10274 };
10275 
10276 struct drm_framebuffer *
10277 __intel_framebuffer_create(struct drm_device *dev,
10278 			   struct drm_mode_fb_cmd2 *mode_cmd,
10279 			   struct drm_i915_gem_object *obj)
10280 {
10281 	struct intel_framebuffer *intel_fb;
10282 	int ret;
10283 
10284 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10285 	if (!intel_fb)
10286 		return ERR_PTR(-ENOMEM);
10287 
10288 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10289 	if (ret)
10290 		goto err;
10291 
10292 	return &intel_fb->base;
10293 
10294 err:
10295 	kfree(intel_fb);
10296 	return ERR_PTR(ret);
10297 }
10298 
10299 static struct drm_framebuffer *
10300 intel_framebuffer_create(struct drm_device *dev,
10301 			 struct drm_mode_fb_cmd2 *mode_cmd,
10302 			 struct drm_i915_gem_object *obj)
10303 {
10304 	struct drm_framebuffer *fb;
10305 	int ret;
10306 
10307 	ret = i915_mutex_lock_interruptible(dev);
10308 	if (ret)
10309 		return ERR_PTR(ret);
10310 	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10311 	mutex_unlock(&dev->struct_mutex);
10312 
10313 	return fb;
10314 }
10315 
10316 static u32
10317 intel_framebuffer_pitch_for_width(int width, int bpp)
10318 {
10319 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10320 	return ALIGN(pitch, 64);
10321 }
10322 
10323 static u32
10324 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10325 {
10326 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10327 	return PAGE_ALIGN(pitch * mode->vdisplay);
10328 }
10329 
10330 static struct drm_framebuffer *
10331 intel_framebuffer_create_for_mode(struct drm_device *dev,
10332 				  struct drm_display_mode *mode,
10333 				  int depth, int bpp)
10334 {
10335 	struct drm_framebuffer *fb;
10336 	struct drm_i915_gem_object *obj;
10337 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10338 
10339 	obj = i915_gem_alloc_object(dev,
10340 				    intel_framebuffer_size_for_mode(mode, bpp));
10341 	if (obj == NULL)
10342 		return ERR_PTR(-ENOMEM);
10343 
10344 	mode_cmd.width = mode->hdisplay;
10345 	mode_cmd.height = mode->vdisplay;
10346 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10347 								bpp);
10348 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10349 
10350 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10351 	if (IS_ERR(fb))
10352 		drm_gem_object_unreference_unlocked(&obj->base);
10353 
10354 	return fb;
10355 }
10356 
10357 static struct drm_framebuffer *
10358 mode_fits_in_fbdev(struct drm_device *dev,
10359 		   struct drm_display_mode *mode)
10360 {
10361 #ifdef CONFIG_DRM_FBDEV_EMULATION
10362 	struct drm_i915_private *dev_priv = dev->dev_private;
10363 	struct drm_i915_gem_object *obj;
10364 	struct drm_framebuffer *fb;
10365 
10366 	if (!dev_priv->fbdev)
10367 		return NULL;
10368 
10369 	if (!dev_priv->fbdev->fb)
10370 		return NULL;
10371 
10372 	obj = dev_priv->fbdev->fb->obj;
10373 	BUG_ON(!obj);
10374 
10375 	fb = &dev_priv->fbdev->fb->base;
10376 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10377 							       fb->bits_per_pixel))
10378 		return NULL;
10379 
10380 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10381 		return NULL;
10382 
10383 	drm_framebuffer_reference(fb);
10384 	return fb;
10385 #else
10386 	return NULL;
10387 #endif
10388 }
10389 
10390 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10391 					   struct drm_crtc *crtc,
10392 					   struct drm_display_mode *mode,
10393 					   struct drm_framebuffer *fb,
10394 					   int x, int y)
10395 {
10396 	struct drm_plane_state *plane_state;
10397 	int hdisplay, vdisplay;
10398 	int ret;
10399 
10400 	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10401 	if (IS_ERR(plane_state))
10402 		return PTR_ERR(plane_state);
10403 
10404 	if (mode)
10405 		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10406 	else
10407 		hdisplay = vdisplay = 0;
10408 
10409 	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10410 	if (ret)
10411 		return ret;
10412 	drm_atomic_set_fb_for_plane(plane_state, fb);
10413 	plane_state->crtc_x = 0;
10414 	plane_state->crtc_y = 0;
10415 	plane_state->crtc_w = hdisplay;
10416 	plane_state->crtc_h = vdisplay;
10417 	plane_state->src_x = x << 16;
10418 	plane_state->src_y = y << 16;
10419 	plane_state->src_w = hdisplay << 16;
10420 	plane_state->src_h = vdisplay << 16;
10421 
10422 	return 0;
10423 }
10424 
10425 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10426 				struct drm_display_mode *mode,
10427 				struct intel_load_detect_pipe *old,
10428 				struct drm_modeset_acquire_ctx *ctx)
10429 {
10430 	struct intel_crtc *intel_crtc;
10431 	struct intel_encoder *intel_encoder =
10432 		intel_attached_encoder(connector);
10433 	struct drm_crtc *possible_crtc;
10434 	struct drm_encoder *encoder = &intel_encoder->base;
10435 	struct drm_crtc *crtc = NULL;
10436 	struct drm_device *dev = encoder->dev;
10437 	struct drm_framebuffer *fb;
10438 	struct drm_mode_config *config = &dev->mode_config;
10439 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
10440 	struct drm_connector_state *connector_state;
10441 	struct intel_crtc_state *crtc_state;
10442 	int ret, i = -1;
10443 
10444 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10445 		      connector->base.id, connector->name,
10446 		      encoder->base.id, encoder->name);
10447 
10448 	old->restore_state = NULL;
10449 
10450 retry:
10451 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10452 	if (ret)
10453 		goto fail;
10454 
10455 	/*
10456 	 * Algorithm gets a little messy:
10457 	 *
10458 	 *   - if the connector already has an assigned crtc, use it (but make
10459 	 *     sure it's on first)
10460 	 *
10461 	 *   - try to find the first unused crtc that can drive this connector,
10462 	 *     and use that if we find one
10463 	 */
10464 
10465 	/* See if we already have a CRTC for this connector */
10466 	if (connector->state->crtc) {
10467 		crtc = connector->state->crtc;
10468 
10469 		ret = drm_modeset_lock(&crtc->mutex, ctx);
10470 		if (ret)
10471 			goto fail;
10472 
10473 		/* Make sure the crtc and connector are running */
10474 		goto found;
10475 	}
10476 
10477 	/* Find an unused one (if possible) */
10478 	for_each_crtc(dev, possible_crtc) {
10479 		i++;
10480 		if (!(encoder->possible_crtcs & (1 << i)))
10481 			continue;
10482 
10483 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10484 		if (ret)
10485 			goto fail;
10486 
10487 		if (possible_crtc->state->enable) {
10488 			drm_modeset_unlock(&possible_crtc->mutex);
10489 			continue;
10490 		}
10491 
10492 		crtc = possible_crtc;
10493 		break;
10494 	}
10495 
10496 	/*
10497 	 * If we didn't find an unused CRTC, don't use any.
10498 	 */
10499 	if (!crtc) {
10500 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10501 		goto fail;
10502 	}
10503 
10504 found:
10505 	intel_crtc = to_intel_crtc(crtc);
10506 
10507 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10508 	if (ret)
10509 		goto fail;
10510 
10511 	state = drm_atomic_state_alloc(dev);
10512 	restore_state = drm_atomic_state_alloc(dev);
10513 	if (!state || !restore_state) {
10514 		ret = -ENOMEM;
10515 		goto fail;
10516 	}
10517 
10518 	state->acquire_ctx = ctx;
10519 	restore_state->acquire_ctx = ctx;
10520 
10521 	connector_state = drm_atomic_get_connector_state(state, connector);
10522 	if (IS_ERR(connector_state)) {
10523 		ret = PTR_ERR(connector_state);
10524 		goto fail;
10525 	}
10526 
10527 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10528 	if (ret)
10529 		goto fail;
10530 
10531 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10532 	if (IS_ERR(crtc_state)) {
10533 		ret = PTR_ERR(crtc_state);
10534 		goto fail;
10535 	}
10536 
10537 	crtc_state->base.active = crtc_state->base.enable = true;
10538 
10539 	if (!mode)
10540 		mode = &load_detect_mode;
10541 
10542 	/* We need a framebuffer large enough to accommodate all accesses
10543 	 * that the plane may generate whilst we perform load detection.
10544 	 * We can not rely on the fbcon either being present (we get called
10545 	 * during its initialisation to detect all boot displays, or it may
10546 	 * not even exist) or that it is large enough to satisfy the
10547 	 * requested mode.
10548 	 */
10549 	fb = mode_fits_in_fbdev(dev, mode);
10550 	if (fb == NULL) {
10551 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10552 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10553 	} else
10554 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10555 	if (IS_ERR(fb)) {
10556 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10557 		goto fail;
10558 	}
10559 
10560 	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10561 	if (ret)
10562 		goto fail;
10563 
10564 	drm_framebuffer_unreference(fb);
10565 
10566 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10567 	if (ret)
10568 		goto fail;
10569 
10570 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10571 	if (!ret)
10572 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10573 	if (!ret)
10574 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10575 	if (ret) {
10576 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10577 		goto fail;
10578 	}
10579 
10580 	ret = drm_atomic_commit(state);
10581 	if (ret) {
10582 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10583 		goto fail;
10584 	}
10585 
10586 	old->restore_state = restore_state;
10587 
10588 	/* let the connector get through one full cycle before testing */
10589 	intel_wait_for_vblank(dev, intel_crtc->pipe);
10590 	return true;
10591 
10592 fail:
10593 	drm_atomic_state_free(state);
10594 	drm_atomic_state_free(restore_state);
10595 	restore_state = state = NULL;
10596 
10597 	if (ret == -EDEADLK) {
10598 		drm_modeset_backoff(ctx);
10599 		goto retry;
10600 	}
10601 
10602 	return false;
10603 }
10604 
10605 void intel_release_load_detect_pipe(struct drm_connector *connector,
10606 				    struct intel_load_detect_pipe *old,
10607 				    struct drm_modeset_acquire_ctx *ctx)
10608 {
10609 	struct intel_encoder *intel_encoder =
10610 		intel_attached_encoder(connector);
10611 	struct drm_encoder *encoder = &intel_encoder->base;
10612 	struct drm_atomic_state *state = old->restore_state;
10613 	int ret;
10614 
10615 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10616 		      connector->base.id, connector->name,
10617 		      encoder->base.id, encoder->name);
10618 
10619 	if (!state)
10620 		return;
10621 
10622 	ret = drm_atomic_commit(state);
10623 	if (ret) {
10624 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10625 		drm_atomic_state_free(state);
10626 	}
10627 }
10628 
10629 static int i9xx_pll_refclk(struct drm_device *dev,
10630 			   const struct intel_crtc_state *pipe_config)
10631 {
10632 	struct drm_i915_private *dev_priv = dev->dev_private;
10633 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10634 
10635 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10636 		return dev_priv->vbt.lvds_ssc_freq;
10637 	else if (HAS_PCH_SPLIT(dev))
10638 		return 120000;
10639 	else if (!IS_GEN2(dev))
10640 		return 96000;
10641 	else
10642 		return 48000;
10643 }
10644 
10645 /* Returns the clock of the currently programmed mode of the given pipe. */
10646 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10647 				struct intel_crtc_state *pipe_config)
10648 {
10649 	struct drm_device *dev = crtc->base.dev;
10650 	struct drm_i915_private *dev_priv = dev->dev_private;
10651 	int pipe = pipe_config->cpu_transcoder;
10652 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10653 	u32 fp;
10654 	intel_clock_t clock;
10655 	int port_clock;
10656 	int refclk = i9xx_pll_refclk(dev, pipe_config);
10657 
10658 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10659 		fp = pipe_config->dpll_hw_state.fp0;
10660 	else
10661 		fp = pipe_config->dpll_hw_state.fp1;
10662 
10663 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10664 	if (IS_PINEVIEW(dev)) {
10665 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10666 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10667 	} else {
10668 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10669 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10670 	}
10671 
10672 	if (!IS_GEN2(dev)) {
10673 		if (IS_PINEVIEW(dev))
10674 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10675 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10676 		else
10677 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10678 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10679 
10680 		switch (dpll & DPLL_MODE_MASK) {
10681 		case DPLLB_MODE_DAC_SERIAL:
10682 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10683 				5 : 10;
10684 			break;
10685 		case DPLLB_MODE_LVDS:
10686 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10687 				7 : 14;
10688 			break;
10689 		default:
10690 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10691 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10692 			return;
10693 		}
10694 
10695 		if (IS_PINEVIEW(dev))
10696 			port_clock = pnv_calc_dpll_params(refclk, &clock);
10697 		else
10698 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10699 	} else {
10700 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10701 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10702 
10703 		if (is_lvds) {
10704 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10705 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10706 
10707 			if (lvds & LVDS_CLKB_POWER_UP)
10708 				clock.p2 = 7;
10709 			else
10710 				clock.p2 = 14;
10711 		} else {
10712 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10713 				clock.p1 = 2;
10714 			else {
10715 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10716 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10717 			}
10718 			if (dpll & PLL_P2_DIVIDE_BY_4)
10719 				clock.p2 = 4;
10720 			else
10721 				clock.p2 = 2;
10722 		}
10723 
10724 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10725 	}
10726 
10727 	/*
10728 	 * This value includes pixel_multiplier. We will use
10729 	 * port_clock to compute adjusted_mode.crtc_clock in the
10730 	 * encoder's get_config() function.
10731 	 */
10732 	pipe_config->port_clock = port_clock;
10733 }
10734 
10735 int intel_dotclock_calculate(int link_freq,
10736 			     const struct intel_link_m_n *m_n)
10737 {
10738 	/*
10739 	 * The calculation for the data clock is:
10740 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10741 	 * But we want to avoid losing precison if possible, so:
10742 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10743 	 *
10744 	 * and the link clock is simpler:
10745 	 * link_clock = (m * link_clock) / n
10746 	 */
10747 
10748 	if (!m_n->link_n)
10749 		return 0;
10750 
10751 	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10752 }
10753 
10754 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10755 				   struct intel_crtc_state *pipe_config)
10756 {
10757 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10758 
10759 	/* read out port_clock from the DPLL */
10760 	i9xx_crtc_clock_get(crtc, pipe_config);
10761 
10762 	/*
10763 	 * In case there is an active pipe without active ports,
10764 	 * we may need some idea for the dotclock anyway.
10765 	 * Calculate one based on the FDI configuration.
10766 	 */
10767 	pipe_config->base.adjusted_mode.crtc_clock =
10768 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10769 					 &pipe_config->fdi_m_n);
10770 }
10771 
10772 /** Returns the currently programmed mode of the given pipe. */
10773 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10774 					     struct drm_crtc *crtc)
10775 {
10776 	struct drm_i915_private *dev_priv = dev->dev_private;
10777 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10778 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10779 	struct drm_display_mode *mode;
10780 	struct intel_crtc_state *pipe_config;
10781 	int htot = I915_READ(HTOTAL(cpu_transcoder));
10782 	int hsync = I915_READ(HSYNC(cpu_transcoder));
10783 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10784 	int vsync = I915_READ(VSYNC(cpu_transcoder));
10785 	enum i915_pipe pipe = intel_crtc->pipe;
10786 
10787 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10788 	if (!mode)
10789 		return NULL;
10790 
10791 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10792 	if (!pipe_config) {
10793 		kfree(mode);
10794 		return NULL;
10795 	}
10796 
10797 	/*
10798 	 * Construct a pipe_config sufficient for getting the clock info
10799 	 * back out of crtc_clock_get.
10800 	 *
10801 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10802 	 * to use a real value here instead.
10803 	 */
10804 	pipe_config->cpu_transcoder = (enum transcoder) pipe;
10805 	pipe_config->pixel_multiplier = 1;
10806 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10807 	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10808 	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10809 	i9xx_crtc_clock_get(intel_crtc, pipe_config);
10810 
10811 	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10812 	mode->hdisplay = (htot & 0xffff) + 1;
10813 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10814 	mode->hsync_start = (hsync & 0xffff) + 1;
10815 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10816 	mode->vdisplay = (vtot & 0xffff) + 1;
10817 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10818 	mode->vsync_start = (vsync & 0xffff) + 1;
10819 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10820 
10821 	drm_mode_set_name(mode);
10822 
10823 	kfree(pipe_config);
10824 
10825 	return mode;
10826 }
10827 
10828 void intel_mark_busy(struct drm_device *dev)
10829 {
10830 	struct drm_i915_private *dev_priv = dev->dev_private;
10831 
10832 	if (dev_priv->mm.busy)
10833 		return;
10834 
10835 	intel_runtime_pm_get(dev_priv);
10836 	i915_update_gfx_val(dev_priv);
10837 	if (INTEL_INFO(dev)->gen >= 6)
10838 		gen6_rps_busy(dev_priv);
10839 	dev_priv->mm.busy = true;
10840 }
10841 
10842 void intel_mark_idle(struct drm_device *dev)
10843 {
10844 	struct drm_i915_private *dev_priv = dev->dev_private;
10845 
10846 	if (!dev_priv->mm.busy)
10847 		return;
10848 
10849 	dev_priv->mm.busy = false;
10850 
10851 	if (INTEL_INFO(dev)->gen >= 6)
10852 		gen6_rps_idle(dev->dev_private);
10853 
10854 	intel_runtime_pm_put(dev_priv);
10855 }
10856 
10857 static void intel_crtc_destroy(struct drm_crtc *crtc)
10858 {
10859 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10860 	struct drm_device *dev = crtc->dev;
10861 	struct intel_unpin_work *work;
10862 
10863 	spin_lock_irq(&dev->event_lock);
10864 	work = intel_crtc->unpin_work;
10865 	intel_crtc->unpin_work = NULL;
10866 	spin_unlock_irq(&dev->event_lock);
10867 
10868 	if (work) {
10869 		cancel_work_sync(&work->work);
10870 		kfree(work);
10871 	}
10872 
10873 	drm_crtc_cleanup(crtc);
10874 
10875 	kfree(intel_crtc);
10876 }
10877 
10878 static void intel_unpin_work_fn(struct work_struct *__work)
10879 {
10880 	struct intel_unpin_work *work =
10881 		container_of(__work, struct intel_unpin_work, work);
10882 	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10883 	struct drm_device *dev = crtc->base.dev;
10884 	struct drm_plane *primary = crtc->base.primary;
10885 
10886 	mutex_lock(&dev->struct_mutex);
10887 	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10888 	drm_gem_object_unreference(&work->pending_flip_obj->base);
10889 
10890 	if (work->flip_queued_req)
10891 		i915_gem_request_assign(&work->flip_queued_req, NULL);
10892 	mutex_unlock(&dev->struct_mutex);
10893 
10894 	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10895 	intel_fbc_post_update(crtc);
10896 	drm_framebuffer_unreference(work->old_fb);
10897 
10898 	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10899 	atomic_dec(&crtc->unpin_work_count);
10900 
10901 	kfree(work);
10902 }
10903 
10904 static void do_intel_finish_page_flip(struct drm_device *dev,
10905 				      struct drm_crtc *crtc)
10906 {
10907 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10908 	struct intel_unpin_work *work;
10909 	unsigned long flags;
10910 
10911 	/* Ignore early vblank irqs */
10912 	if (intel_crtc == NULL)
10913 		return;
10914 
10915 	/*
10916 	 * This is called both by irq handlers and the reset code (to complete
10917 	 * lost pageflips) so needs the full irqsave spinlocks.
10918 	 */
10919 	spin_lock_irqsave(&dev->event_lock, flags);
10920 	work = intel_crtc->unpin_work;
10921 
10922 	/* Ensure we don't miss a work->pending update ... */
10923 	smp_rmb();
10924 
10925 	if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10926 		spin_unlock_irqrestore(&dev->event_lock, flags);
10927 		return;
10928 	}
10929 
10930 	page_flip_completed(intel_crtc);
10931 
10932 	spin_unlock_irqrestore(&dev->event_lock, flags);
10933 }
10934 
10935 void intel_finish_page_flip(struct drm_device *dev, int pipe)
10936 {
10937 	struct drm_i915_private *dev_priv = dev->dev_private;
10938 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10939 
10940 	do_intel_finish_page_flip(dev, crtc);
10941 }
10942 
10943 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10944 {
10945 	struct drm_i915_private *dev_priv = dev->dev_private;
10946 	struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10947 
10948 	do_intel_finish_page_flip(dev, crtc);
10949 }
10950 
10951 /* Is 'a' after or equal to 'b'? */
10952 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10953 {
10954 	return !((a - b) & 0x80000000);
10955 }
10956 
10957 static bool page_flip_finished(struct intel_crtc *crtc)
10958 {
10959 	struct drm_device *dev = crtc->base.dev;
10960 	struct drm_i915_private *dev_priv = dev->dev_private;
10961 	unsigned reset_counter;
10962 
10963 	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
10964 	if (crtc->reset_counter != reset_counter)
10965 		return true;
10966 
10967 	/*
10968 	 * The relevant registers doen't exist on pre-ctg.
10969 	 * As the flip done interrupt doesn't trigger for mmio
10970 	 * flips on gmch platforms, a flip count check isn't
10971 	 * really needed there. But since ctg has the registers,
10972 	 * include it in the check anyway.
10973 	 */
10974 	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10975 		return true;
10976 
10977 	/*
10978 	 * BDW signals flip done immediately if the plane
10979 	 * is disabled, even if the plane enable is already
10980 	 * armed to occur at the next vblank :(
10981 	 */
10982 
10983 	/*
10984 	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10985 	 * used the same base address. In that case the mmio flip might
10986 	 * have completed, but the CS hasn't even executed the flip yet.
10987 	 *
10988 	 * A flip count check isn't enough as the CS might have updated
10989 	 * the base address just after start of vblank, but before we
10990 	 * managed to process the interrupt. This means we'd complete the
10991 	 * CS flip too soon.
10992 	 *
10993 	 * Combining both checks should get us a good enough result. It may
10994 	 * still happen that the CS flip has been executed, but has not
10995 	 * yet actually completed. But in case the base address is the same
10996 	 * anyway, we don't really care.
10997 	 */
10998 	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10999 		crtc->unpin_work->gtt_offset &&
11000 		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11001 				    crtc->unpin_work->flip_count);
11002 }
11003 
11004 void intel_prepare_page_flip(struct drm_device *dev, int plane)
11005 {
11006 	struct drm_i915_private *dev_priv = dev->dev_private;
11007 	struct intel_crtc *intel_crtc =
11008 		to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
11009 	unsigned long flags;
11010 
11011 
11012 	/*
11013 	 * This is called both by irq handlers and the reset code (to complete
11014 	 * lost pageflips) so needs the full irqsave spinlocks.
11015 	 *
11016 	 * NB: An MMIO update of the plane base pointer will also
11017 	 * generate a page-flip completion irq, i.e. every modeset
11018 	 * is also accompanied by a spurious intel_prepare_page_flip().
11019 	 */
11020 	spin_lock_irqsave(&dev->event_lock, flags);
11021 	if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
11022 		atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
11023 	spin_unlock_irqrestore(&dev->event_lock, flags);
11024 }
11025 
11026 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
11027 {
11028 	/* Ensure that the work item is consistent when activating it ... */
11029 	smp_wmb();
11030 	atomic_set(&work->pending, INTEL_FLIP_PENDING);
11031 	/* and that it is marked active as soon as the irq could fire. */
11032 	smp_wmb();
11033 }
11034 
11035 static int intel_gen2_queue_flip(struct drm_device *dev,
11036 				 struct drm_crtc *crtc,
11037 				 struct drm_framebuffer *fb,
11038 				 struct drm_i915_gem_object *obj,
11039 				 struct drm_i915_gem_request *req,
11040 				 uint32_t flags)
11041 {
11042 	struct intel_engine_cs *engine = req->engine;
11043 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11044 	u32 flip_mask;
11045 	int ret;
11046 
11047 	ret = intel_ring_begin(req, 6);
11048 	if (ret)
11049 		return ret;
11050 
11051 	/* Can't queue multiple flips, so wait for the previous
11052 	 * one to finish before executing the next.
11053 	 */
11054 	if (intel_crtc->plane)
11055 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11056 	else
11057 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11058 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11059 	intel_ring_emit(engine, MI_NOOP);
11060 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11061 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11062 	intel_ring_emit(engine, fb->pitches[0]);
11063 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11064 	intel_ring_emit(engine, 0); /* aux display base address, unused */
11065 
11066 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11067 	return 0;
11068 }
11069 
11070 static int intel_gen3_queue_flip(struct drm_device *dev,
11071 				 struct drm_crtc *crtc,
11072 				 struct drm_framebuffer *fb,
11073 				 struct drm_i915_gem_object *obj,
11074 				 struct drm_i915_gem_request *req,
11075 				 uint32_t flags)
11076 {
11077 	struct intel_engine_cs *engine = req->engine;
11078 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11079 	u32 flip_mask;
11080 	int ret;
11081 
11082 	ret = intel_ring_begin(req, 6);
11083 	if (ret)
11084 		return ret;
11085 
11086 	if (intel_crtc->plane)
11087 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11088 	else
11089 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11090 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11091 	intel_ring_emit(engine, MI_NOOP);
11092 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11093 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11094 	intel_ring_emit(engine, fb->pitches[0]);
11095 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11096 	intel_ring_emit(engine, MI_NOOP);
11097 
11098 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11099 	return 0;
11100 }
11101 
11102 static int intel_gen4_queue_flip(struct drm_device *dev,
11103 				 struct drm_crtc *crtc,
11104 				 struct drm_framebuffer *fb,
11105 				 struct drm_i915_gem_object *obj,
11106 				 struct drm_i915_gem_request *req,
11107 				 uint32_t flags)
11108 {
11109 	struct intel_engine_cs *engine = req->engine;
11110 	struct drm_i915_private *dev_priv = dev->dev_private;
11111 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11112 	uint32_t pf, pipesrc;
11113 	int ret;
11114 
11115 	ret = intel_ring_begin(req, 4);
11116 	if (ret)
11117 		return ret;
11118 
11119 	/* i965+ uses the linear or tiled offsets from the
11120 	 * Display Registers (which do not change across a page-flip)
11121 	 * so we need only reprogram the base address.
11122 	 */
11123 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11124 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11125 	intel_ring_emit(engine, fb->pitches[0]);
11126 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
11127 			obj->tiling_mode);
11128 
11129 	/* XXX Enabling the panel-fitter across page-flip is so far
11130 	 * untested on non-native modes, so ignore it for now.
11131 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11132 	 */
11133 	pf = 0;
11134 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11135 	intel_ring_emit(engine, pf | pipesrc);
11136 
11137 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11138 	return 0;
11139 }
11140 
11141 static int intel_gen6_queue_flip(struct drm_device *dev,
11142 				 struct drm_crtc *crtc,
11143 				 struct drm_framebuffer *fb,
11144 				 struct drm_i915_gem_object *obj,
11145 				 struct drm_i915_gem_request *req,
11146 				 uint32_t flags)
11147 {
11148 	struct intel_engine_cs *engine = req->engine;
11149 	struct drm_i915_private *dev_priv = dev->dev_private;
11150 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11151 	uint32_t pf, pipesrc;
11152 	int ret;
11153 
11154 	ret = intel_ring_begin(req, 4);
11155 	if (ret)
11156 		return ret;
11157 
11158 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11159 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11160 	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11161 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11162 
11163 	/* Contrary to the suggestions in the documentation,
11164 	 * "Enable Panel Fitter" does not seem to be required when page
11165 	 * flipping with a non-native mode, and worse causes a normal
11166 	 * modeset to fail.
11167 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11168 	 */
11169 	pf = 0;
11170 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11171 	intel_ring_emit(engine, pf | pipesrc);
11172 
11173 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11174 	return 0;
11175 }
11176 
11177 static int intel_gen7_queue_flip(struct drm_device *dev,
11178 				 struct drm_crtc *crtc,
11179 				 struct drm_framebuffer *fb,
11180 				 struct drm_i915_gem_object *obj,
11181 				 struct drm_i915_gem_request *req,
11182 				 uint32_t flags)
11183 {
11184 	struct intel_engine_cs *engine = req->engine;
11185 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11186 	uint32_t plane_bit = 0;
11187 	int len, ret;
11188 
11189 	switch (intel_crtc->plane) {
11190 	case PLANE_A:
11191 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11192 		break;
11193 	case PLANE_B:
11194 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11195 		break;
11196 	case PLANE_C:
11197 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11198 		break;
11199 	default:
11200 		WARN_ONCE(1, "unknown plane in flip command\n");
11201 		return -ENODEV;
11202 	}
11203 
11204 	len = 4;
11205 	if (engine->id == RCS) {
11206 		len += 6;
11207 		/*
11208 		 * On Gen 8, SRM is now taking an extra dword to accommodate
11209 		 * 48bits addresses, and we need a NOOP for the batch size to
11210 		 * stay even.
11211 		 */
11212 		if (IS_GEN8(dev))
11213 			len += 2;
11214 	}
11215 
11216 	/*
11217 	 * BSpec MI_DISPLAY_FLIP for IVB:
11218 	 * "The full packet must be contained within the same cache line."
11219 	 *
11220 	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11221 	 * cacheline, if we ever start emitting more commands before
11222 	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11223 	 * then do the cacheline alignment, and finally emit the
11224 	 * MI_DISPLAY_FLIP.
11225 	 */
11226 	ret = intel_ring_cacheline_align(req);
11227 	if (ret)
11228 		return ret;
11229 
11230 	ret = intel_ring_begin(req, len);
11231 	if (ret)
11232 		return ret;
11233 
11234 	/* Unmask the flip-done completion message. Note that the bspec says that
11235 	 * we should do this for both the BCS and RCS, and that we must not unmask
11236 	 * more than one flip event at any time (or ensure that one flip message
11237 	 * can be sent by waiting for flip-done prior to queueing new flips).
11238 	 * Experimentation says that BCS works despite DERRMR masking all
11239 	 * flip-done completion events and that unmasking all planes at once
11240 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11241 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11242 	 */
11243 	if (engine->id == RCS) {
11244 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11245 		intel_ring_emit_reg(engine, DERRMR);
11246 		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11247 					  DERRMR_PIPEB_PRI_FLIP_DONE |
11248 					  DERRMR_PIPEC_PRI_FLIP_DONE));
11249 		if (IS_GEN8(dev))
11250 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11251 					      MI_SRM_LRM_GLOBAL_GTT);
11252 		else
11253 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11254 					      MI_SRM_LRM_GLOBAL_GTT);
11255 		intel_ring_emit_reg(engine, DERRMR);
11256 		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11257 		if (IS_GEN8(dev)) {
11258 			intel_ring_emit(engine, 0);
11259 			intel_ring_emit(engine, MI_NOOP);
11260 		}
11261 	}
11262 
11263 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11264 	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11265 	intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11266 	intel_ring_emit(engine, (MI_NOOP));
11267 
11268 	intel_mark_page_flip_active(intel_crtc->unpin_work);
11269 	return 0;
11270 }
11271 
11272 static bool use_mmio_flip(struct intel_engine_cs *engine,
11273 			  struct drm_i915_gem_object *obj)
11274 {
11275 	/*
11276 	 * This is not being used for older platforms, because
11277 	 * non-availability of flip done interrupt forces us to use
11278 	 * CS flips. Older platforms derive flip done using some clever
11279 	 * tricks involving the flip_pending status bits and vblank irqs.
11280 	 * So using MMIO flips there would disrupt this mechanism.
11281 	 */
11282 
11283 	if (engine == NULL)
11284 		return true;
11285 
11286 	if (INTEL_INFO(engine->dev)->gen < 5)
11287 		return false;
11288 
11289 	if (i915.use_mmio_flip < 0)
11290 		return false;
11291 	else if (i915.use_mmio_flip > 0)
11292 		return true;
11293 	else if (i915.enable_execlists)
11294 		return true;
11295 #if 0
11296 	else if (obj->base.dma_buf &&
11297 		 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11298 						       false))
11299 		return true;
11300 #endif
11301 	else
11302 		return engine != i915_gem_request_get_engine(obj->last_write_req);
11303 }
11304 
11305 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11306 			     unsigned int rotation,
11307 			     struct intel_unpin_work *work)
11308 {
11309 	struct drm_device *dev = intel_crtc->base.dev;
11310 	struct drm_i915_private *dev_priv = dev->dev_private;
11311 	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11312 	const enum i915_pipe pipe = intel_crtc->pipe;
11313 	u32 ctl, stride, tile_height;
11314 
11315 	ctl = I915_READ(PLANE_CTL(pipe, 0));
11316 	ctl &= ~PLANE_CTL_TILED_MASK;
11317 	switch (fb->modifier[0]) {
11318 	case DRM_FORMAT_MOD_NONE:
11319 		break;
11320 	case I915_FORMAT_MOD_X_TILED:
11321 		ctl |= PLANE_CTL_TILED_X;
11322 		break;
11323 	case I915_FORMAT_MOD_Y_TILED:
11324 		ctl |= PLANE_CTL_TILED_Y;
11325 		break;
11326 	case I915_FORMAT_MOD_Yf_TILED:
11327 		ctl |= PLANE_CTL_TILED_YF;
11328 		break;
11329 	default:
11330 		MISSING_CASE(fb->modifier[0]);
11331 	}
11332 
11333 	/*
11334 	 * The stride is either expressed as a multiple of 64 bytes chunks for
11335 	 * linear buffers or in number of tiles for tiled buffers.
11336 	 */
11337 	if (intel_rotation_90_or_270(rotation)) {
11338 		/* stride = Surface height in tiles */
11339 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11340 		stride = DIV_ROUND_UP(fb->height, tile_height);
11341 	} else {
11342 		stride = fb->pitches[0] /
11343 			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11344 						  fb->pixel_format);
11345 	}
11346 
11347 	/*
11348 	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11349 	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11350 	 */
11351 	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11352 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11353 
11354 	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11355 	POSTING_READ(PLANE_SURF(pipe, 0));
11356 }
11357 
11358 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11359 			     struct intel_unpin_work *work)
11360 {
11361 	struct drm_device *dev = intel_crtc->base.dev;
11362 	struct drm_i915_private *dev_priv = dev->dev_private;
11363 	struct intel_framebuffer *intel_fb =
11364 		to_intel_framebuffer(intel_crtc->base.primary->fb);
11365 	struct drm_i915_gem_object *obj = intel_fb->obj;
11366 	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11367 	u32 dspcntr;
11368 
11369 	dspcntr = I915_READ(reg);
11370 
11371 	if (obj->tiling_mode != I915_TILING_NONE)
11372 		dspcntr |= DISPPLANE_TILED;
11373 	else
11374 		dspcntr &= ~DISPPLANE_TILED;
11375 
11376 	I915_WRITE(reg, dspcntr);
11377 
11378 	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11379 	POSTING_READ(DSPSURF(intel_crtc->plane));
11380 }
11381 
11382 /*
11383  * XXX: This is the temporary way to update the plane registers until we get
11384  * around to using the usual plane update functions for MMIO flips
11385  */
11386 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11387 {
11388 	struct intel_crtc *crtc = mmio_flip->crtc;
11389 	struct intel_unpin_work *work;
11390 
11391 	spin_lock_irq(&crtc->base.dev->event_lock);
11392 	work = crtc->unpin_work;
11393 	spin_unlock_irq(&crtc->base.dev->event_lock);
11394 	if (work == NULL)
11395 		return;
11396 
11397 	intel_mark_page_flip_active(work);
11398 
11399 	intel_pipe_update_start(crtc);
11400 
11401 	if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11402 		skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11403 	else
11404 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11405 		ilk_do_mmio_flip(crtc, work);
11406 
11407 	intel_pipe_update_end(crtc);
11408 }
11409 
11410 static void intel_mmio_flip_work_func(struct work_struct *work)
11411 {
11412 	struct intel_mmio_flip *mmio_flip =
11413 		container_of(work, struct intel_mmio_flip, work);
11414 #if 0
11415 	struct intel_framebuffer *intel_fb =
11416 		to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11417 	struct drm_i915_gem_object *obj = intel_fb->obj;
11418 #endif
11419 
11420 	if (mmio_flip->req) {
11421 		WARN_ON(__i915_wait_request(mmio_flip->req,
11422 					    false, NULL,
11423 					    &mmio_flip->i915->rps.mmioflips));
11424 		i915_gem_request_unreference__unlocked(mmio_flip->req);
11425 	}
11426 
11427 	/* For framebuffer backed by dmabuf, wait for fence */
11428 #if 0
11429 	if (obj->base.dma_buf)
11430 		WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11431 							    false, false,
11432 							    MAX_SCHEDULE_TIMEOUT) < 0);
11433 #endif
11434 
11435 	intel_do_mmio_flip(mmio_flip);
11436 	kfree(mmio_flip);
11437 }
11438 
11439 static int intel_queue_mmio_flip(struct drm_device *dev,
11440 				 struct drm_crtc *crtc,
11441 				 struct drm_i915_gem_object *obj)
11442 {
11443 	struct intel_mmio_flip *mmio_flip;
11444 
11445 	mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK);
11446 	if (mmio_flip == NULL)
11447 		return -ENOMEM;
11448 
11449 	mmio_flip->i915 = to_i915(dev);
11450 	mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11451 	mmio_flip->crtc = to_intel_crtc(crtc);
11452 	mmio_flip->rotation = crtc->primary->state->rotation;
11453 
11454 	INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11455 	schedule_work(&mmio_flip->work);
11456 
11457 	return 0;
11458 }
11459 
11460 static int intel_default_queue_flip(struct drm_device *dev,
11461 				    struct drm_crtc *crtc,
11462 				    struct drm_framebuffer *fb,
11463 				    struct drm_i915_gem_object *obj,
11464 				    struct drm_i915_gem_request *req,
11465 				    uint32_t flags)
11466 {
11467 	return -ENODEV;
11468 }
11469 
11470 static bool __intel_pageflip_stall_check(struct drm_device *dev,
11471 					 struct drm_crtc *crtc)
11472 {
11473 	struct drm_i915_private *dev_priv = dev->dev_private;
11474 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11475 	struct intel_unpin_work *work = intel_crtc->unpin_work;
11476 	u32 addr;
11477 
11478 	if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11479 		return true;
11480 
11481 	if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11482 		return false;
11483 
11484 	if (!work->enable_stall_check)
11485 		return false;
11486 
11487 	if (work->flip_ready_vblank == 0) {
11488 		if (work->flip_queued_req &&
11489 		    !i915_gem_request_completed(work->flip_queued_req, true))
11490 			return false;
11491 
11492 		work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11493 	}
11494 
11495 	if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11496 		return false;
11497 
11498 	/* Potential stall - if we see that the flip has happened,
11499 	 * assume a missed interrupt. */
11500 	if (INTEL_INFO(dev)->gen >= 4)
11501 		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11502 	else
11503 		addr = I915_READ(DSPADDR(intel_crtc->plane));
11504 
11505 	/* There is a potential issue here with a false positive after a flip
11506 	 * to the same address. We could address this by checking for a
11507 	 * non-incrementing frame counter.
11508 	 */
11509 	return addr == work->gtt_offset;
11510 }
11511 
11512 void intel_check_page_flip(struct drm_device *dev, int pipe)
11513 {
11514 	struct drm_i915_private *dev_priv = dev->dev_private;
11515 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11516 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11517 	struct intel_unpin_work *work;
11518 
11519 //	WARN_ON(!in_interrupt());
11520 
11521 	if (crtc == NULL)
11522 		return;
11523 
11524 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
11525 	work = intel_crtc->unpin_work;
11526 	if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11527 		WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11528 			 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11529 		page_flip_completed(intel_crtc);
11530 		work = NULL;
11531 	}
11532 	if (work != NULL &&
11533 	    drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11534 		intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11535 	lockmgr(&dev->event_lock, LK_RELEASE);
11536 }
11537 
11538 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11539 				struct drm_framebuffer *fb,
11540 				struct drm_pending_vblank_event *event,
11541 				uint32_t page_flip_flags)
11542 {
11543 	struct drm_device *dev = crtc->dev;
11544 	struct drm_i915_private *dev_priv = dev->dev_private;
11545 	struct drm_framebuffer *old_fb = crtc->primary->fb;
11546 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11547 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11548 	struct drm_plane *primary = crtc->primary;
11549 	enum i915_pipe pipe = intel_crtc->pipe;
11550 	struct intel_unpin_work *work;
11551 	struct intel_engine_cs *engine;
11552 	bool mmio_flip;
11553 	struct drm_i915_gem_request *request = NULL;
11554 	int ret;
11555 
11556 	/*
11557 	 * drm_mode_page_flip_ioctl() should already catch this, but double
11558 	 * check to be safe.  In the future we may enable pageflipping from
11559 	 * a disabled primary plane.
11560 	 */
11561 	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11562 		return -EBUSY;
11563 
11564 	/* Can't change pixel format via MI display flips. */
11565 	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11566 		return -EINVAL;
11567 
11568 	/*
11569 	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11570 	 * Note that pitch changes could also affect these register.
11571 	 */
11572 	if (INTEL_INFO(dev)->gen > 3 &&
11573 	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11574 	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11575 		return -EINVAL;
11576 
11577 	if (i915_terminally_wedged(&dev_priv->gpu_error))
11578 		goto out_hang;
11579 
11580 	work = kzalloc(sizeof(*work), GFP_KERNEL);
11581 	if (work == NULL)
11582 		return -ENOMEM;
11583 
11584 	work->event = event;
11585 	work->crtc = crtc;
11586 	work->old_fb = old_fb;
11587 	INIT_WORK(&work->work, intel_unpin_work_fn);
11588 
11589 	ret = drm_crtc_vblank_get(crtc);
11590 	if (ret)
11591 		goto free_work;
11592 
11593 	/* We borrow the event spin lock for protecting unpin_work */
11594 	spin_lock_irq(&dev->event_lock);
11595 	if (intel_crtc->unpin_work) {
11596 		/* Before declaring the flip queue wedged, check if
11597 		 * the hardware completed the operation behind our backs.
11598 		 */
11599 		if (__intel_pageflip_stall_check(dev, crtc)) {
11600 			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11601 			page_flip_completed(intel_crtc);
11602 		} else {
11603 			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11604 			spin_unlock_irq(&dev->event_lock);
11605 
11606 			drm_crtc_vblank_put(crtc);
11607 			kfree(work);
11608 			return -EBUSY;
11609 		}
11610 	}
11611 	intel_crtc->unpin_work = work;
11612 	spin_unlock_irq(&dev->event_lock);
11613 
11614 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11615 		flush_workqueue(dev_priv->wq);
11616 
11617 	/* Reference the objects for the scheduled work. */
11618 	drm_framebuffer_reference(work->old_fb);
11619 	drm_gem_object_reference(&obj->base);
11620 
11621 	crtc->primary->fb = fb;
11622 	update_state_fb(crtc->primary);
11623 	intel_fbc_pre_update(intel_crtc);
11624 
11625 	work->pending_flip_obj = obj;
11626 
11627 	ret = i915_mutex_lock_interruptible(dev);
11628 	if (ret)
11629 		goto cleanup;
11630 
11631 	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11632 	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11633 		ret = -EIO;
11634 		goto cleanup;
11635 	}
11636 
11637 	atomic_inc(&intel_crtc->unpin_work_count);
11638 
11639 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11640 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11641 
11642 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11643 		engine = &dev_priv->engine[BCS];
11644 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11645 			/* vlv: DISPLAY_FLIP fails to change tiling */
11646 			engine = NULL;
11647 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11648 		engine = &dev_priv->engine[BCS];
11649 	} else if (INTEL_INFO(dev)->gen >= 7) {
11650 		engine = i915_gem_request_get_engine(obj->last_write_req);
11651 		if (engine == NULL || engine->id != RCS)
11652 			engine = &dev_priv->engine[BCS];
11653 	} else {
11654 		engine = &dev_priv->engine[RCS];
11655 	}
11656 
11657 	mmio_flip = use_mmio_flip(engine, obj);
11658 
11659 	/* When using CS flips, we want to emit semaphores between rings.
11660 	 * However, when using mmio flips we will create a task to do the
11661 	 * synchronisation, so all we want here is to pin the framebuffer
11662 	 * into the display plane and skip any waits.
11663 	 */
11664 	if (!mmio_flip) {
11665 		ret = i915_gem_object_sync(obj, engine, &request);
11666 		if (ret)
11667 			goto cleanup_pending;
11668 	}
11669 
11670 	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11671 	if (ret)
11672 		goto cleanup_pending;
11673 
11674 	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11675 						  obj, 0);
11676 	work->gtt_offset += intel_crtc->dspaddr_offset;
11677 
11678 	if (mmio_flip) {
11679 		ret = intel_queue_mmio_flip(dev, crtc, obj);
11680 		if (ret)
11681 			goto cleanup_unpin;
11682 
11683 		i915_gem_request_assign(&work->flip_queued_req,
11684 					obj->last_write_req);
11685 	} else {
11686 		if (!request) {
11687 			request = i915_gem_request_alloc(engine, NULL);
11688 			if (IS_ERR(request)) {
11689 				ret = PTR_ERR(request);
11690 				goto cleanup_unpin;
11691 			}
11692 		}
11693 
11694 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11695 						   page_flip_flags);
11696 		if (ret)
11697 			goto cleanup_unpin;
11698 
11699 		i915_gem_request_assign(&work->flip_queued_req, request);
11700 	}
11701 
11702 	if (request)
11703 		i915_add_request_no_flush(request);
11704 
11705 	work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11706 	work->enable_stall_check = true;
11707 
11708 	i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11709 			  to_intel_plane(primary)->frontbuffer_bit);
11710 	mutex_unlock(&dev->struct_mutex);
11711 
11712 	intel_frontbuffer_flip_prepare(dev,
11713 				       to_intel_plane(primary)->frontbuffer_bit);
11714 
11715 	trace_i915_flip_request(intel_crtc->plane, obj);
11716 
11717 	return 0;
11718 
11719 cleanup_unpin:
11720 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11721 cleanup_pending:
11722 	if (!IS_ERR_OR_NULL(request))
11723 		i915_add_request_no_flush(request);
11724 	atomic_dec(&intel_crtc->unpin_work_count);
11725 	mutex_unlock(&dev->struct_mutex);
11726 cleanup:
11727 	crtc->primary->fb = old_fb;
11728 	update_state_fb(crtc->primary);
11729 
11730 	drm_gem_object_unreference_unlocked(&obj->base);
11731 	drm_framebuffer_unreference(work->old_fb);
11732 
11733 	spin_lock_irq(&dev->event_lock);
11734 	intel_crtc->unpin_work = NULL;
11735 	spin_unlock_irq(&dev->event_lock);
11736 
11737 	drm_crtc_vblank_put(crtc);
11738 free_work:
11739 	kfree(work);
11740 
11741 	if (ret == -EIO) {
11742 		struct drm_atomic_state *state;
11743 		struct drm_plane_state *plane_state;
11744 
11745 out_hang:
11746 		state = drm_atomic_state_alloc(dev);
11747 		if (!state)
11748 			return -ENOMEM;
11749 		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11750 
11751 retry:
11752 		plane_state = drm_atomic_get_plane_state(state, primary);
11753 		ret = PTR_ERR_OR_ZERO(plane_state);
11754 		if (!ret) {
11755 			drm_atomic_set_fb_for_plane(plane_state, fb);
11756 
11757 			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11758 			if (!ret)
11759 				ret = drm_atomic_commit(state);
11760 		}
11761 
11762 		if (ret == -EDEADLK) {
11763 			drm_modeset_backoff(state->acquire_ctx);
11764 			drm_atomic_state_clear(state);
11765 			goto retry;
11766 		}
11767 
11768 		if (ret)
11769 			drm_atomic_state_free(state);
11770 
11771 		if (ret == 0 && event) {
11772 			spin_lock_irq(&dev->event_lock);
11773 			drm_crtc_send_vblank_event(crtc, event);
11774 			spin_unlock_irq(&dev->event_lock);
11775 		}
11776 	}
11777 	return ret;
11778 }
11779 
11780 
11781 /**
11782  * intel_wm_need_update - Check whether watermarks need updating
11783  * @plane: drm plane
11784  * @state: new plane state
11785  *
11786  * Check current plane state versus the new one to determine whether
11787  * watermarks need to be recalculated.
11788  *
11789  * Returns true or false.
11790  */
11791 static bool intel_wm_need_update(struct drm_plane *plane,
11792 				 struct drm_plane_state *state)
11793 {
11794 	struct intel_plane_state *new = to_intel_plane_state(state);
11795 	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11796 
11797 	/* Update watermarks on tiling or size changes. */
11798 	if (new->visible != cur->visible)
11799 		return true;
11800 
11801 	if (!cur->base.fb || !new->base.fb)
11802 		return false;
11803 
11804 	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11805 	    cur->base.rotation != new->base.rotation ||
11806 	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11807 	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11808 	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11809 	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11810 		return true;
11811 
11812 	return false;
11813 }
11814 
11815 static bool needs_scaling(struct intel_plane_state *state)
11816 {
11817 	int src_w = drm_rect_width(&state->src) >> 16;
11818 	int src_h = drm_rect_height(&state->src) >> 16;
11819 	int dst_w = drm_rect_width(&state->dst);
11820 	int dst_h = drm_rect_height(&state->dst);
11821 
11822 	return (src_w != dst_w || src_h != dst_h);
11823 }
11824 
11825 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11826 				    struct drm_plane_state *plane_state)
11827 {
11828 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11829 	struct drm_crtc *crtc = crtc_state->crtc;
11830 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11831 	struct drm_plane *plane = plane_state->plane;
11832 	struct drm_device *dev = crtc->dev;
11833 	struct drm_i915_private *dev_priv = to_i915(dev);
11834 	struct intel_plane_state *old_plane_state =
11835 		to_intel_plane_state(plane->state);
11836 	int idx = intel_crtc->base.base.id, ret;
11837 	bool mode_changed = needs_modeset(crtc_state);
11838 	bool was_crtc_enabled = crtc->state->active;
11839 	bool is_crtc_enabled = crtc_state->active;
11840 	bool turn_off, turn_on, visible, was_visible;
11841 	struct drm_framebuffer *fb = plane_state->fb;
11842 
11843 	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11844 	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11845 		ret = skl_update_scaler_plane(
11846 			to_intel_crtc_state(crtc_state),
11847 			to_intel_plane_state(plane_state));
11848 		if (ret)
11849 			return ret;
11850 	}
11851 
11852 	was_visible = old_plane_state->visible;
11853 	visible = to_intel_plane_state(plane_state)->visible;
11854 
11855 	if (!was_crtc_enabled && WARN_ON(was_visible))
11856 		was_visible = false;
11857 
11858 	/*
11859 	 * Visibility is calculated as if the crtc was on, but
11860 	 * after scaler setup everything depends on it being off
11861 	 * when the crtc isn't active.
11862 	 */
11863 	if (!is_crtc_enabled)
11864 		to_intel_plane_state(plane_state)->visible = visible = false;
11865 
11866 	if (!was_visible && !visible)
11867 		return 0;
11868 
11869 	if (fb != old_plane_state->base.fb)
11870 		pipe_config->fb_changed = true;
11871 
11872 	turn_off = was_visible && (!visible || mode_changed);
11873 	turn_on = visible && (!was_visible || mode_changed);
11874 
11875 	DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11876 			 plane->base.id, fb ? fb->base.id : -1);
11877 
11878 	DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11879 			 plane->base.id, was_visible, visible,
11880 			 turn_off, turn_on, mode_changed);
11881 
11882 	if (turn_on) {
11883 		pipe_config->update_wm_pre = true;
11884 
11885 		/* must disable cxsr around plane enable/disable */
11886 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11887 			pipe_config->disable_cxsr = true;
11888 	} else if (turn_off) {
11889 		pipe_config->update_wm_post = true;
11890 
11891 		/* must disable cxsr around plane enable/disable */
11892 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
11893 			pipe_config->disable_cxsr = true;
11894 	} else if (intel_wm_need_update(plane, plane_state)) {
11895 		/* FIXME bollocks */
11896 		pipe_config->update_wm_pre = true;
11897 		pipe_config->update_wm_post = true;
11898 	}
11899 
11900 	/* Pre-gen9 platforms need two-step watermark updates */
11901 	if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
11902 	    INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
11903 		to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
11904 
11905 	if (visible || was_visible)
11906 		pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
11907 
11908 	/*
11909 	 * WaCxSRDisabledForSpriteScaling:ivb
11910 	 *
11911 	 * cstate->update_wm was already set above, so this flag will
11912 	 * take effect when we commit and program watermarks.
11913 	 */
11914 	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
11915 	    needs_scaling(to_intel_plane_state(plane_state)) &&
11916 	    !needs_scaling(old_plane_state))
11917 		pipe_config->disable_lp_wm = true;
11918 
11919 	return 0;
11920 }
11921 
11922 static bool encoders_cloneable(const struct intel_encoder *a,
11923 			       const struct intel_encoder *b)
11924 {
11925 	/* masks could be asymmetric, so check both ways */
11926 	return a == b || (a->cloneable & (1 << b->type) &&
11927 			  b->cloneable & (1 << a->type));
11928 }
11929 
11930 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11931 					 struct intel_crtc *crtc,
11932 					 struct intel_encoder *encoder)
11933 {
11934 	struct intel_encoder *source_encoder;
11935 	struct drm_connector *connector;
11936 	struct drm_connector_state *connector_state;
11937 	int i;
11938 
11939 	for_each_connector_in_state(state, connector, connector_state, i) {
11940 		if (connector_state->crtc != &crtc->base)
11941 			continue;
11942 
11943 		source_encoder =
11944 			to_intel_encoder(connector_state->best_encoder);
11945 		if (!encoders_cloneable(encoder, source_encoder))
11946 			return false;
11947 	}
11948 
11949 	return true;
11950 }
11951 
11952 static bool check_encoder_cloning(struct drm_atomic_state *state,
11953 				  struct intel_crtc *crtc)
11954 {
11955 	struct intel_encoder *encoder;
11956 	struct drm_connector *connector;
11957 	struct drm_connector_state *connector_state;
11958 	int i;
11959 
11960 	for_each_connector_in_state(state, connector, connector_state, i) {
11961 		if (connector_state->crtc != &crtc->base)
11962 			continue;
11963 
11964 		encoder = to_intel_encoder(connector_state->best_encoder);
11965 		if (!check_single_encoder_cloning(state, crtc, encoder))
11966 			return false;
11967 	}
11968 
11969 	return true;
11970 }
11971 
11972 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11973 				   struct drm_crtc_state *crtc_state)
11974 {
11975 	struct drm_device *dev = crtc->dev;
11976 	struct drm_i915_private *dev_priv = dev->dev_private;
11977 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11978 	struct intel_crtc_state *pipe_config =
11979 		to_intel_crtc_state(crtc_state);
11980 	struct drm_atomic_state *state = crtc_state->state;
11981 	int ret;
11982 	bool mode_changed = needs_modeset(crtc_state);
11983 
11984 	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11985 		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11986 		return -EINVAL;
11987 	}
11988 
11989 	if (mode_changed && !crtc_state->active)
11990 		pipe_config->update_wm_post = true;
11991 
11992 	if (mode_changed && crtc_state->enable &&
11993 	    dev_priv->display.crtc_compute_clock &&
11994 	    !WARN_ON(pipe_config->shared_dpll)) {
11995 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11996 							   pipe_config);
11997 		if (ret)
11998 			return ret;
11999 	}
12000 
12001 	if (crtc_state->color_mgmt_changed) {
12002 		ret = intel_color_check(crtc, crtc_state);
12003 		if (ret)
12004 			return ret;
12005 
12006 		/*
12007 		 * Changing color management on Intel hardware is
12008 		 * handled as part of planes update.
12009 		 */
12010 		crtc_state->planes_changed = true;
12011 	}
12012 
12013 	ret = 0;
12014 	if (dev_priv->display.compute_pipe_wm) {
12015 		ret = dev_priv->display.compute_pipe_wm(pipe_config);
12016 		if (ret) {
12017 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12018 			return ret;
12019 		}
12020 	}
12021 
12022 	if (dev_priv->display.compute_intermediate_wm &&
12023 	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
12024 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12025 			return 0;
12026 
12027 		/*
12028 		 * Calculate 'intermediate' watermarks that satisfy both the
12029 		 * old state and the new state.  We can program these
12030 		 * immediately.
12031 		 */
12032 		ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12033 								intel_crtc,
12034 								pipe_config);
12035 		if (ret) {
12036 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12037 			return ret;
12038 		}
12039 	} else if (dev_priv->display.compute_intermediate_wm) {
12040 		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12041 			pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
12042 	}
12043 
12044 	if (INTEL_INFO(dev)->gen >= 9) {
12045 		if (mode_changed)
12046 			ret = skl_update_scaler_crtc(pipe_config);
12047 
12048 		if (!ret)
12049 			ret = intel_atomic_setup_scalers(dev, intel_crtc,
12050 							 pipe_config);
12051 	}
12052 
12053 	return ret;
12054 }
12055 
12056 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12057 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12058 	.atomic_begin = intel_begin_crtc_commit,
12059 	.atomic_flush = intel_finish_crtc_commit,
12060 	.atomic_check = intel_crtc_atomic_check,
12061 };
12062 
12063 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12064 {
12065 	struct intel_connector *connector;
12066 
12067 	for_each_intel_connector(dev, connector) {
12068 		if (connector->base.state->crtc)
12069 			drm_connector_unreference(&connector->base);
12070 
12071 		if (connector->base.encoder) {
12072 			connector->base.state->best_encoder =
12073 				connector->base.encoder;
12074 			connector->base.state->crtc =
12075 				connector->base.encoder->crtc;
12076 
12077 			drm_connector_reference(&connector->base);
12078 		} else {
12079 			connector->base.state->best_encoder = NULL;
12080 			connector->base.state->crtc = NULL;
12081 		}
12082 	}
12083 }
12084 
12085 static void
12086 connected_sink_compute_bpp(struct intel_connector *connector,
12087 			   struct intel_crtc_state *pipe_config)
12088 {
12089 	int bpp = pipe_config->pipe_bpp;
12090 
12091 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12092 		connector->base.base.id,
12093 		connector->base.name);
12094 
12095 	/* Don't use an invalid EDID bpc value */
12096 	if (connector->base.display_info.bpc &&
12097 	    connector->base.display_info.bpc * 3 < bpp) {
12098 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12099 			      bpp, connector->base.display_info.bpc*3);
12100 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12101 	}
12102 
12103 	/* Clamp bpp to 8 on screens without EDID 1.4 */
12104 	if (connector->base.display_info.bpc == 0 && bpp > 24) {
12105 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
12106 			      bpp);
12107 		pipe_config->pipe_bpp = 24;
12108 	}
12109 }
12110 
12111 static int
12112 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12113 			  struct intel_crtc_state *pipe_config)
12114 {
12115 	struct drm_device *dev = crtc->base.dev;
12116 	struct drm_atomic_state *state;
12117 	struct drm_connector *connector;
12118 	struct drm_connector_state *connector_state;
12119 	int bpp, i;
12120 
12121 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12122 		bpp = 10*3;
12123 	else if (INTEL_INFO(dev)->gen >= 5)
12124 		bpp = 12*3;
12125 	else
12126 		bpp = 8*3;
12127 
12128 
12129 	pipe_config->pipe_bpp = bpp;
12130 
12131 	state = pipe_config->base.state;
12132 
12133 	/* Clamp display bpp to EDID value */
12134 	for_each_connector_in_state(state, connector, connector_state, i) {
12135 		if (connector_state->crtc != &crtc->base)
12136 			continue;
12137 
12138 		connected_sink_compute_bpp(to_intel_connector(connector),
12139 					   pipe_config);
12140 	}
12141 
12142 	return bpp;
12143 }
12144 
12145 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12146 {
12147 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12148 			"type: 0x%x flags: 0x%x\n",
12149 		mode->crtc_clock,
12150 		mode->crtc_hdisplay, mode->crtc_hsync_start,
12151 		mode->crtc_hsync_end, mode->crtc_htotal,
12152 		mode->crtc_vdisplay, mode->crtc_vsync_start,
12153 		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12154 }
12155 
12156 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12157 				   struct intel_crtc_state *pipe_config,
12158 				   const char *context)
12159 {
12160 	struct drm_device *dev = crtc->base.dev;
12161 	struct drm_plane *plane;
12162 	struct intel_plane *intel_plane;
12163 	struct intel_plane_state *state;
12164 	struct drm_framebuffer *fb;
12165 
12166 	DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12167 		      context, pipe_config, pipe_name(crtc->pipe));
12168 
12169 	DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
12170 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12171 		      pipe_config->pipe_bpp, pipe_config->dither);
12172 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12173 		      pipe_config->has_pch_encoder,
12174 		      pipe_config->fdi_lanes,
12175 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12176 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12177 		      pipe_config->fdi_m_n.tu);
12178 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12179 		      pipe_config->has_dp_encoder,
12180 		      pipe_config->lane_count,
12181 		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12182 		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12183 		      pipe_config->dp_m_n.tu);
12184 
12185 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12186 		      pipe_config->has_dp_encoder,
12187 		      pipe_config->lane_count,
12188 		      pipe_config->dp_m2_n2.gmch_m,
12189 		      pipe_config->dp_m2_n2.gmch_n,
12190 		      pipe_config->dp_m2_n2.link_m,
12191 		      pipe_config->dp_m2_n2.link_n,
12192 		      pipe_config->dp_m2_n2.tu);
12193 
12194 	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12195 		      pipe_config->has_audio,
12196 		      pipe_config->has_infoframe);
12197 
12198 	DRM_DEBUG_KMS("requested mode:\n");
12199 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12200 	DRM_DEBUG_KMS("adjusted mode:\n");
12201 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12202 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12203 	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12204 	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12205 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12206 	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12207 		      crtc->num_scalers,
12208 		      pipe_config->scaler_state.scaler_users,
12209 		      pipe_config->scaler_state.scaler_id);
12210 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12211 		      pipe_config->gmch_pfit.control,
12212 		      pipe_config->gmch_pfit.pgm_ratios,
12213 		      pipe_config->gmch_pfit.lvds_border_bits);
12214 	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12215 		      pipe_config->pch_pfit.pos,
12216 		      pipe_config->pch_pfit.size,
12217 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12218 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12219 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12220 
12221 	if (IS_BROXTON(dev)) {
12222 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12223 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12224 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12225 			      pipe_config->ddi_pll_sel,
12226 			      pipe_config->dpll_hw_state.ebb0,
12227 			      pipe_config->dpll_hw_state.ebb4,
12228 			      pipe_config->dpll_hw_state.pll0,
12229 			      pipe_config->dpll_hw_state.pll1,
12230 			      pipe_config->dpll_hw_state.pll2,
12231 			      pipe_config->dpll_hw_state.pll3,
12232 			      pipe_config->dpll_hw_state.pll6,
12233 			      pipe_config->dpll_hw_state.pll8,
12234 			      pipe_config->dpll_hw_state.pll9,
12235 			      pipe_config->dpll_hw_state.pll10,
12236 			      pipe_config->dpll_hw_state.pcsdw12);
12237 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12238 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12239 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12240 			      pipe_config->ddi_pll_sel,
12241 			      pipe_config->dpll_hw_state.ctrl1,
12242 			      pipe_config->dpll_hw_state.cfgcr1,
12243 			      pipe_config->dpll_hw_state.cfgcr2);
12244 	} else if (HAS_DDI(dev)) {
12245 		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12246 			      pipe_config->ddi_pll_sel,
12247 			      pipe_config->dpll_hw_state.wrpll,
12248 			      pipe_config->dpll_hw_state.spll);
12249 	} else {
12250 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12251 			      "fp0: 0x%x, fp1: 0x%x\n",
12252 			      pipe_config->dpll_hw_state.dpll,
12253 			      pipe_config->dpll_hw_state.dpll_md,
12254 			      pipe_config->dpll_hw_state.fp0,
12255 			      pipe_config->dpll_hw_state.fp1);
12256 	}
12257 
12258 	DRM_DEBUG_KMS("planes on this crtc\n");
12259 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12260 		intel_plane = to_intel_plane(plane);
12261 		if (intel_plane->pipe != crtc->pipe)
12262 			continue;
12263 
12264 		state = to_intel_plane_state(plane->state);
12265 		fb = state->base.fb;
12266 		if (!fb) {
12267 			DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12268 				"disabled, scaler_id = %d\n",
12269 				plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12270 				plane->base.id, intel_plane->pipe,
12271 				(crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12272 				drm_plane_index(plane), state->scaler_id);
12273 			continue;
12274 		}
12275 
12276 		DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12277 			plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12278 			plane->base.id, intel_plane->pipe,
12279 			crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12280 			drm_plane_index(plane));
12281 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12282 			fb->base.id, fb->width, fb->height, fb->pixel_format);
12283 		DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12284 			state->scaler_id,
12285 			state->src.x1 >> 16, state->src.y1 >> 16,
12286 			drm_rect_width(&state->src) >> 16,
12287 			drm_rect_height(&state->src) >> 16,
12288 			state->dst.x1, state->dst.y1,
12289 			drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12290 	}
12291 }
12292 
12293 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12294 {
12295 	struct drm_device *dev = state->dev;
12296 	struct drm_connector *connector;
12297 	unsigned int used_ports = 0;
12298 
12299 	/*
12300 	 * Walk the connector list instead of the encoder
12301 	 * list to detect the problem on ddi platforms
12302 	 * where there's just one encoder per digital port.
12303 	 */
12304 	drm_for_each_connector(connector, dev) {
12305 		struct drm_connector_state *connector_state;
12306 		struct intel_encoder *encoder;
12307 
12308 		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12309 		if (!connector_state)
12310 			connector_state = connector->state;
12311 
12312 		if (!connector_state->best_encoder)
12313 			continue;
12314 
12315 		encoder = to_intel_encoder(connector_state->best_encoder);
12316 
12317 		WARN_ON(!connector_state->crtc);
12318 
12319 		switch (encoder->type) {
12320 			unsigned int port_mask;
12321 		case INTEL_OUTPUT_UNKNOWN:
12322 			if (WARN_ON(!HAS_DDI(dev)))
12323 				break;
12324 		case INTEL_OUTPUT_DISPLAYPORT:
12325 		case INTEL_OUTPUT_HDMI:
12326 		case INTEL_OUTPUT_EDP:
12327 			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12328 
12329 			/* the same port mustn't appear more than once */
12330 			if (used_ports & port_mask)
12331 				return false;
12332 
12333 			used_ports |= port_mask;
12334 		default:
12335 			break;
12336 		}
12337 	}
12338 
12339 	return true;
12340 }
12341 
12342 static void
12343 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12344 {
12345 	struct drm_crtc_state tmp_state;
12346 	struct intel_crtc_scaler_state scaler_state;
12347 	struct intel_dpll_hw_state dpll_hw_state;
12348 	struct intel_shared_dpll *shared_dpll;
12349 	uint32_t ddi_pll_sel;
12350 	bool force_thru;
12351 
12352 	/* FIXME: before the switch to atomic started, a new pipe_config was
12353 	 * kzalloc'd. Code that depends on any field being zero should be
12354 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12355 	 * only fields that are know to not cause problems are preserved. */
12356 
12357 	tmp_state = crtc_state->base;
12358 	scaler_state = crtc_state->scaler_state;
12359 	shared_dpll = crtc_state->shared_dpll;
12360 	dpll_hw_state = crtc_state->dpll_hw_state;
12361 	ddi_pll_sel = crtc_state->ddi_pll_sel;
12362 	force_thru = crtc_state->pch_pfit.force_thru;
12363 
12364 	memset(crtc_state, 0, sizeof *crtc_state);
12365 
12366 	crtc_state->base = tmp_state;
12367 	crtc_state->scaler_state = scaler_state;
12368 	crtc_state->shared_dpll = shared_dpll;
12369 	crtc_state->dpll_hw_state = dpll_hw_state;
12370 	crtc_state->ddi_pll_sel = ddi_pll_sel;
12371 	crtc_state->pch_pfit.force_thru = force_thru;
12372 }
12373 
12374 static int
12375 intel_modeset_pipe_config(struct drm_crtc *crtc,
12376 			  struct intel_crtc_state *pipe_config)
12377 {
12378 	struct drm_atomic_state *state = pipe_config->base.state;
12379 	struct intel_encoder *encoder;
12380 	struct drm_connector *connector;
12381 	struct drm_connector_state *connector_state;
12382 	int base_bpp, ret = -EINVAL;
12383 	int i;
12384 	bool retry = true;
12385 
12386 	clear_intel_crtc_state(pipe_config);
12387 
12388 	pipe_config->cpu_transcoder =
12389 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12390 
12391 	/*
12392 	 * Sanitize sync polarity flags based on requested ones. If neither
12393 	 * positive or negative polarity is requested, treat this as meaning
12394 	 * negative polarity.
12395 	 */
12396 	if (!(pipe_config->base.adjusted_mode.flags &
12397 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12398 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12399 
12400 	if (!(pipe_config->base.adjusted_mode.flags &
12401 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12402 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12403 
12404 	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12405 					     pipe_config);
12406 	if (base_bpp < 0)
12407 		goto fail;
12408 
12409 	/*
12410 	 * Determine the real pipe dimensions. Note that stereo modes can
12411 	 * increase the actual pipe size due to the frame doubling and
12412 	 * insertion of additional space for blanks between the frame. This
12413 	 * is stored in the crtc timings. We use the requested mode to do this
12414 	 * computation to clearly distinguish it from the adjusted mode, which
12415 	 * can be changed by the connectors in the below retry loop.
12416 	 */
12417 	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12418 			       &pipe_config->pipe_src_w,
12419 			       &pipe_config->pipe_src_h);
12420 
12421 encoder_retry:
12422 	/* Ensure the port clock defaults are reset when retrying. */
12423 	pipe_config->port_clock = 0;
12424 	pipe_config->pixel_multiplier = 1;
12425 
12426 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12427 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12428 			      CRTC_STEREO_DOUBLE);
12429 
12430 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12431 	 * adjust it according to limitations or connector properties, and also
12432 	 * a chance to reject the mode entirely.
12433 	 */
12434 	for_each_connector_in_state(state, connector, connector_state, i) {
12435 		if (connector_state->crtc != crtc)
12436 			continue;
12437 
12438 		encoder = to_intel_encoder(connector_state->best_encoder);
12439 
12440 		if (!(encoder->compute_config(encoder, pipe_config))) {
12441 			DRM_DEBUG_KMS("Encoder config failure\n");
12442 			goto fail;
12443 		}
12444 	}
12445 
12446 	/* Set default port clock if not overwritten by the encoder. Needs to be
12447 	 * done afterwards in case the encoder adjusts the mode. */
12448 	if (!pipe_config->port_clock)
12449 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12450 			* pipe_config->pixel_multiplier;
12451 
12452 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12453 	if (ret < 0) {
12454 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12455 		goto fail;
12456 	}
12457 
12458 	if (ret == RETRY) {
12459 		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12460 			ret = -EINVAL;
12461 			goto fail;
12462 		}
12463 
12464 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12465 		retry = false;
12466 		goto encoder_retry;
12467 	}
12468 
12469 	/* Dithering seems to not pass-through bits correctly when it should, so
12470 	 * only enable it on 6bpc panels. */
12471 	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12472 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12473 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12474 
12475 fail:
12476 	return ret;
12477 }
12478 
12479 static void
12480 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12481 {
12482 	struct drm_crtc *crtc;
12483 	struct drm_crtc_state *crtc_state;
12484 	int i;
12485 
12486 	/* Double check state. */
12487 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12488 		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12489 
12490 		/* Update hwmode for vblank functions */
12491 		if (crtc->state->active)
12492 			crtc->hwmode = crtc->state->adjusted_mode;
12493 		else
12494 			crtc->hwmode.crtc_clock = 0;
12495 
12496 		/*
12497 		 * Update legacy state to satisfy fbc code. This can
12498 		 * be removed when fbc uses the atomic state.
12499 		 */
12500 		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12501 			struct drm_plane_state *plane_state = crtc->primary->state;
12502 
12503 			crtc->primary->fb = plane_state->fb;
12504 			crtc->x = plane_state->src_x >> 16;
12505 			crtc->y = plane_state->src_y >> 16;
12506 		}
12507 	}
12508 }
12509 
12510 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12511 {
12512 	int diff;
12513 
12514 	if (clock1 == clock2)
12515 		return true;
12516 
12517 	if (!clock1 || !clock2)
12518 		return false;
12519 
12520 	diff = abs(clock1 - clock2);
12521 
12522 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12523 		return true;
12524 
12525 	return false;
12526 }
12527 
12528 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12529 	list_for_each_entry((intel_crtc), \
12530 			    &(dev)->mode_config.crtc_list, \
12531 			    base.head) \
12532 		for_each_if (mask & (1 <<(intel_crtc)->pipe))
12533 
12534 static bool
12535 intel_compare_m_n(unsigned int m, unsigned int n,
12536 		  unsigned int m2, unsigned int n2,
12537 		  bool exact)
12538 {
12539 	if (m == m2 && n == n2)
12540 		return true;
12541 
12542 	if (exact || !m || !n || !m2 || !n2)
12543 		return false;
12544 
12545 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12546 
12547 	if (n > n2) {
12548 		while (n > n2) {
12549 			m2 <<= 1;
12550 			n2 <<= 1;
12551 		}
12552 	} else if (n < n2) {
12553 		while (n < n2) {
12554 			m <<= 1;
12555 			n <<= 1;
12556 		}
12557 	}
12558 
12559 	if (n != n2)
12560 		return false;
12561 
12562 	return intel_fuzzy_clock_check(m, m2);
12563 }
12564 
12565 static bool
12566 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12567 		       struct intel_link_m_n *m2_n2,
12568 		       bool adjust)
12569 {
12570 	if (m_n->tu == m2_n2->tu &&
12571 	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12572 			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12573 	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12574 			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12575 		if (adjust)
12576 			*m2_n2 = *m_n;
12577 
12578 		return true;
12579 	}
12580 
12581 	return false;
12582 }
12583 
12584 static bool
12585 intel_pipe_config_compare(struct drm_device *dev,
12586 			  struct intel_crtc_state *current_config,
12587 			  struct intel_crtc_state *pipe_config,
12588 			  bool adjust)
12589 {
12590 	bool ret = true;
12591 
12592 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12593 	do { \
12594 		if (!adjust) \
12595 			DRM_ERROR(fmt, ##__VA_ARGS__); \
12596 		else \
12597 			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12598 	} while (0)
12599 
12600 #define PIPE_CONF_CHECK_X(name)	\
12601 	if (current_config->name != pipe_config->name) { \
12602 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12603 			  "(expected 0x%08x, found 0x%08x)\n", \
12604 			  current_config->name, \
12605 			  pipe_config->name); \
12606 		ret = false; \
12607 	}
12608 
12609 #define PIPE_CONF_CHECK_I(name)	\
12610 	if (current_config->name != pipe_config->name) { \
12611 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12612 			  "(expected %i, found %i)\n", \
12613 			  current_config->name, \
12614 			  pipe_config->name); \
12615 		ret = false; \
12616 	}
12617 
12618 #define PIPE_CONF_CHECK_P(name)	\
12619 	if (current_config->name != pipe_config->name) { \
12620 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12621 			  "(expected %p, found %p)\n", \
12622 			  current_config->name, \
12623 			  pipe_config->name); \
12624 		ret = false; \
12625 	}
12626 
12627 #define PIPE_CONF_CHECK_M_N(name) \
12628 	if (!intel_compare_link_m_n(&current_config->name, \
12629 				    &pipe_config->name,\
12630 				    adjust)) { \
12631 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12632 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12633 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12634 			  current_config->name.tu, \
12635 			  current_config->name.gmch_m, \
12636 			  current_config->name.gmch_n, \
12637 			  current_config->name.link_m, \
12638 			  current_config->name.link_n, \
12639 			  pipe_config->name.tu, \
12640 			  pipe_config->name.gmch_m, \
12641 			  pipe_config->name.gmch_n, \
12642 			  pipe_config->name.link_m, \
12643 			  pipe_config->name.link_n); \
12644 		ret = false; \
12645 	}
12646 
12647 /* This is required for BDW+ where there is only one set of registers for
12648  * switching between high and low RR.
12649  * This macro can be used whenever a comparison has to be made between one
12650  * hw state and multiple sw state variables.
12651  */
12652 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12653 	if (!intel_compare_link_m_n(&current_config->name, \
12654 				    &pipe_config->name, adjust) && \
12655 	    !intel_compare_link_m_n(&current_config->alt_name, \
12656 				    &pipe_config->name, adjust)) { \
12657 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12658 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12659 			  "or tu %i gmch %i/%i link %i/%i, " \
12660 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12661 			  current_config->name.tu, \
12662 			  current_config->name.gmch_m, \
12663 			  current_config->name.gmch_n, \
12664 			  current_config->name.link_m, \
12665 			  current_config->name.link_n, \
12666 			  current_config->alt_name.tu, \
12667 			  current_config->alt_name.gmch_m, \
12668 			  current_config->alt_name.gmch_n, \
12669 			  current_config->alt_name.link_m, \
12670 			  current_config->alt_name.link_n, \
12671 			  pipe_config->name.tu, \
12672 			  pipe_config->name.gmch_m, \
12673 			  pipe_config->name.gmch_n, \
12674 			  pipe_config->name.link_m, \
12675 			  pipe_config->name.link_n); \
12676 		ret = false; \
12677 	}
12678 
12679 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12680 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12681 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12682 			  "(expected %i, found %i)\n", \
12683 			  current_config->name & (mask), \
12684 			  pipe_config->name & (mask)); \
12685 		ret = false; \
12686 	}
12687 
12688 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12689 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12690 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12691 			  "(expected %i, found %i)\n", \
12692 			  current_config->name, \
12693 			  pipe_config->name); \
12694 		ret = false; \
12695 	}
12696 
12697 #define PIPE_CONF_QUIRK(quirk)	\
12698 	((current_config->quirks | pipe_config->quirks) & (quirk))
12699 
12700 	PIPE_CONF_CHECK_I(cpu_transcoder);
12701 
12702 	PIPE_CONF_CHECK_I(has_pch_encoder);
12703 	PIPE_CONF_CHECK_I(fdi_lanes);
12704 	PIPE_CONF_CHECK_M_N(fdi_m_n);
12705 
12706 	PIPE_CONF_CHECK_I(has_dp_encoder);
12707 	PIPE_CONF_CHECK_I(lane_count);
12708 
12709 	if (INTEL_INFO(dev)->gen < 8) {
12710 		PIPE_CONF_CHECK_M_N(dp_m_n);
12711 
12712 		if (current_config->has_drrs)
12713 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12714 	} else
12715 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12716 
12717 	PIPE_CONF_CHECK_I(has_dsi_encoder);
12718 
12719 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12720 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12721 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12722 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12723 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12724 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12725 
12726 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12727 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12728 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12729 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12730 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12731 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12732 
12733 	PIPE_CONF_CHECK_I(pixel_multiplier);
12734 	PIPE_CONF_CHECK_I(has_hdmi_sink);
12735 	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12736 	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12737 		PIPE_CONF_CHECK_I(limited_color_range);
12738 	PIPE_CONF_CHECK_I(has_infoframe);
12739 
12740 	PIPE_CONF_CHECK_I(has_audio);
12741 
12742 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12743 			      DRM_MODE_FLAG_INTERLACE);
12744 
12745 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12746 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12747 				      DRM_MODE_FLAG_PHSYNC);
12748 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12749 				      DRM_MODE_FLAG_NHSYNC);
12750 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12751 				      DRM_MODE_FLAG_PVSYNC);
12752 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12753 				      DRM_MODE_FLAG_NVSYNC);
12754 	}
12755 
12756 	PIPE_CONF_CHECK_X(gmch_pfit.control);
12757 	/* pfit ratios are autocomputed by the hw on gen4+ */
12758 	if (INTEL_INFO(dev)->gen < 4)
12759 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12760 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12761 
12762 	if (!adjust) {
12763 		PIPE_CONF_CHECK_I(pipe_src_w);
12764 		PIPE_CONF_CHECK_I(pipe_src_h);
12765 
12766 		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12767 		if (current_config->pch_pfit.enabled) {
12768 			PIPE_CONF_CHECK_X(pch_pfit.pos);
12769 			PIPE_CONF_CHECK_X(pch_pfit.size);
12770 		}
12771 
12772 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12773 	}
12774 
12775 	/* BDW+ don't expose a synchronous way to read the state */
12776 	if (IS_HASWELL(dev))
12777 		PIPE_CONF_CHECK_I(ips_enabled);
12778 
12779 	PIPE_CONF_CHECK_I(double_wide);
12780 
12781 	PIPE_CONF_CHECK_X(ddi_pll_sel);
12782 
12783 	PIPE_CONF_CHECK_P(shared_dpll);
12784 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12785 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12786 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12787 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12788 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12789 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12790 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12791 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12792 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12793 
12794 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12795 	PIPE_CONF_CHECK_X(dsi_pll.div);
12796 
12797 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12798 		PIPE_CONF_CHECK_I(pipe_bpp);
12799 
12800 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12801 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12802 
12803 #undef PIPE_CONF_CHECK_X
12804 #undef PIPE_CONF_CHECK_I
12805 #undef PIPE_CONF_CHECK_P
12806 #undef PIPE_CONF_CHECK_FLAGS
12807 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12808 #undef PIPE_CONF_QUIRK
12809 #undef INTEL_ERR_OR_DBG_KMS
12810 
12811 	return ret;
12812 }
12813 
12814 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12815 					   const struct intel_crtc_state *pipe_config)
12816 {
12817 	if (pipe_config->has_pch_encoder) {
12818 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12819 							    &pipe_config->fdi_m_n);
12820 		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12821 
12822 		/*
12823 		 * FDI already provided one idea for the dotclock.
12824 		 * Yell if the encoder disagrees.
12825 		 */
12826 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12827 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12828 		     fdi_dotclock, dotclock);
12829 	}
12830 }
12831 
12832 static void verify_wm_state(struct drm_crtc *crtc,
12833 			    struct drm_crtc_state *new_state)
12834 {
12835 	struct drm_device *dev = crtc->dev;
12836 	struct drm_i915_private *dev_priv = dev->dev_private;
12837 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12838 	struct skl_ddb_entry *hw_entry, *sw_entry;
12839 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12840 	const enum i915_pipe pipe = intel_crtc->pipe;
12841 	int plane;
12842 
12843 	if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
12844 		return;
12845 
12846 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12847 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12848 
12849 	/* planes */
12850 	for_each_plane(dev_priv, pipe, plane) {
12851 		hw_entry = &hw_ddb.plane[pipe][plane];
12852 		sw_entry = &sw_ddb->plane[pipe][plane];
12853 
12854 		if (skl_ddb_entry_equal(hw_entry, sw_entry))
12855 			continue;
12856 
12857 		DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12858 			  "(expected (%u,%u), found (%u,%u))\n",
12859 			  pipe_name(pipe), plane + 1,
12860 			  sw_entry->start, sw_entry->end,
12861 			  hw_entry->start, hw_entry->end);
12862 	}
12863 
12864 	/* cursor */
12865 	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12866 	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12867 
12868 	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
12869 		DRM_ERROR("mismatch in DDB state pipe %c cursor "
12870 			  "(expected (%u,%u), found (%u,%u))\n",
12871 			  pipe_name(pipe),
12872 			  sw_entry->start, sw_entry->end,
12873 			  hw_entry->start, hw_entry->end);
12874 	}
12875 }
12876 
12877 static void
12878 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
12879 {
12880 	struct drm_connector *connector;
12881 
12882 	drm_for_each_connector(connector, dev) {
12883 		struct drm_encoder *encoder = connector->encoder;
12884 		struct drm_connector_state *state = connector->state;
12885 
12886 		if (state->crtc != crtc)
12887 			continue;
12888 
12889 		intel_connector_verify_state(to_intel_connector(connector));
12890 
12891 		I915_STATE_WARN(state->best_encoder != encoder,
12892 		     "connector's atomic encoder doesn't match legacy encoder\n");
12893 	}
12894 }
12895 
12896 static void
12897 verify_encoder_state(struct drm_device *dev)
12898 {
12899 	struct intel_encoder *encoder;
12900 	struct intel_connector *connector;
12901 
12902 	for_each_intel_encoder(dev, encoder) {
12903 		bool enabled = false;
12904 		enum i915_pipe pipe;
12905 
12906 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12907 			      encoder->base.base.id,
12908 			      encoder->base.name);
12909 
12910 		for_each_intel_connector(dev, connector) {
12911 			if (connector->base.state->best_encoder != &encoder->base)
12912 				continue;
12913 			enabled = true;
12914 
12915 			I915_STATE_WARN(connector->base.state->crtc !=
12916 					encoder->base.crtc,
12917 			     "connector's crtc doesn't match encoder crtc\n");
12918 		}
12919 
12920 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
12921 		     "encoder's enabled state mismatch "
12922 		     "(expected %i, found %i)\n",
12923 		     !!encoder->base.crtc, enabled);
12924 
12925 		if (!encoder->base.crtc) {
12926 			bool active;
12927 
12928 			active = encoder->get_hw_state(encoder, &pipe);
12929 			I915_STATE_WARN(active,
12930 			     "encoder detached but still enabled on pipe %c.\n",
12931 			     pipe_name(pipe));
12932 		}
12933 	}
12934 }
12935 
12936 static void
12937 verify_crtc_state(struct drm_crtc *crtc,
12938 		  struct drm_crtc_state *old_crtc_state,
12939 		  struct drm_crtc_state *new_crtc_state)
12940 {
12941 	struct drm_device *dev = crtc->dev;
12942 	struct drm_i915_private *dev_priv = dev->dev_private;
12943 	struct intel_encoder *encoder;
12944 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12945 	struct intel_crtc_state *pipe_config, *sw_config;
12946 	struct drm_atomic_state *old_state;
12947 	bool active;
12948 
12949 	old_state = old_crtc_state->state;
12950 	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
12951 	pipe_config = to_intel_crtc_state(old_crtc_state);
12952 	memset(pipe_config, 0, sizeof(*pipe_config));
12953 	pipe_config->base.crtc = crtc;
12954 	pipe_config->base.state = old_state;
12955 
12956 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
12957 
12958 	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12959 
12960 	/* hw state is inconsistent with the pipe quirk */
12961 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12962 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12963 		active = new_crtc_state->active;
12964 
12965 	I915_STATE_WARN(new_crtc_state->active != active,
12966 	     "crtc active state doesn't match with hw state "
12967 	     "(expected %i, found %i)\n", new_crtc_state->active, active);
12968 
12969 	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12970 	     "transitional active state does not match atomic hw state "
12971 	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12972 
12973 	for_each_encoder_on_crtc(dev, crtc, encoder) {
12974 		enum i915_pipe pipe;
12975 
12976 		active = encoder->get_hw_state(encoder, &pipe);
12977 		I915_STATE_WARN(active != new_crtc_state->active,
12978 			"[ENCODER:%i] active %i with crtc active %i\n",
12979 			encoder->base.base.id, active, new_crtc_state->active);
12980 
12981 		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12982 				"Encoder connected to wrong pipe %c\n",
12983 				pipe_name(pipe));
12984 
12985 		if (active)
12986 			encoder->get_config(encoder, pipe_config);
12987 	}
12988 
12989 	if (!new_crtc_state->active)
12990 		return;
12991 
12992 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
12993 
12994 	sw_config = to_intel_crtc_state(crtc->state);
12995 	if (!intel_pipe_config_compare(dev, sw_config,
12996 				       pipe_config, false)) {
12997 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
12998 		intel_dump_pipe_config(intel_crtc, pipe_config,
12999 				       "[hw state]");
13000 		intel_dump_pipe_config(intel_crtc, sw_config,
13001 				       "[sw state]");
13002 	}
13003 }
13004 
13005 static void
13006 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13007 			 struct intel_shared_dpll *pll,
13008 			 struct drm_crtc *crtc,
13009 			 struct drm_crtc_state *new_state)
13010 {
13011 	struct intel_dpll_hw_state dpll_hw_state;
13012 	unsigned crtc_mask;
13013 	bool active;
13014 
13015 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13016 
13017 	DRM_DEBUG_KMS("%s\n", pll->name);
13018 
13019 	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
13020 
13021 	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13022 		I915_STATE_WARN(!pll->on && pll->active_mask,
13023 		     "pll in active use but not on in sw tracking\n");
13024 		I915_STATE_WARN(pll->on && !pll->active_mask,
13025 		     "pll is on but not used by any active crtc\n");
13026 		I915_STATE_WARN(pll->on != active,
13027 		     "pll on state mismatch (expected %i, found %i)\n",
13028 		     pll->on, active);
13029 	}
13030 
13031 	if (!crtc) {
13032 		I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
13033 				"more active pll users than references: %x vs %x\n",
13034 				pll->active_mask, pll->config.crtc_mask);
13035 
13036 		return;
13037 	}
13038 
13039 	crtc_mask = 1 << drm_crtc_index(crtc);
13040 
13041 	if (new_state->active)
13042 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13043 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13044 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13045 	else
13046 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13047 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13048 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13049 
13050 	I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13051 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13052 			crtc_mask, pll->config.crtc_mask);
13053 
13054 	I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13055 					  &dpll_hw_state,
13056 					  sizeof(dpll_hw_state)),
13057 			"pll hw state mismatch\n");
13058 }
13059 
13060 static void
13061 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13062 			 struct drm_crtc_state *old_crtc_state,
13063 			 struct drm_crtc_state *new_crtc_state)
13064 {
13065 	struct drm_i915_private *dev_priv = dev->dev_private;
13066 	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13067 	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13068 
13069 	if (new_state->shared_dpll)
13070 		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13071 
13072 	if (old_state->shared_dpll &&
13073 	    old_state->shared_dpll != new_state->shared_dpll) {
13074 		unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13075 		struct intel_shared_dpll *pll = old_state->shared_dpll;
13076 
13077 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13078 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13079 				pipe_name(drm_crtc_index(crtc)));
13080 		I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13081 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13082 				pipe_name(drm_crtc_index(crtc)));
13083 	}
13084 }
13085 
13086 static void
13087 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13088 			 struct drm_crtc_state *old_state,
13089 			 struct drm_crtc_state *new_state)
13090 {
13091 	if (!needs_modeset(new_state) &&
13092 	    !to_intel_crtc_state(new_state)->update_pipe)
13093 		return;
13094 
13095 	verify_wm_state(crtc, new_state);
13096 	verify_connector_state(crtc->dev, crtc);
13097 	verify_crtc_state(crtc, old_state, new_state);
13098 	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13099 }
13100 
13101 static void
13102 verify_disabled_dpll_state(struct drm_device *dev)
13103 {
13104 	struct drm_i915_private *dev_priv = dev->dev_private;
13105 	int i;
13106 
13107 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
13108 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13109 }
13110 
13111 static void
13112 intel_modeset_verify_disabled(struct drm_device *dev)
13113 {
13114 	verify_encoder_state(dev);
13115 	verify_connector_state(dev, NULL);
13116 	verify_disabled_dpll_state(dev);
13117 }
13118 
13119 static void update_scanline_offset(struct intel_crtc *crtc)
13120 {
13121 	struct drm_device *dev = crtc->base.dev;
13122 
13123 	/*
13124 	 * The scanline counter increments at the leading edge of hsync.
13125 	 *
13126 	 * On most platforms it starts counting from vtotal-1 on the
13127 	 * first active line. That means the scanline counter value is
13128 	 * always one less than what we would expect. Ie. just after
13129 	 * start of vblank, which also occurs at start of hsync (on the
13130 	 * last active line), the scanline counter will read vblank_start-1.
13131 	 *
13132 	 * On gen2 the scanline counter starts counting from 1 instead
13133 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13134 	 * to keep the value positive), instead of adding one.
13135 	 *
13136 	 * On HSW+ the behaviour of the scanline counter depends on the output
13137 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13138 	 * there's an extra 1 line difference. So we need to add two instead of
13139 	 * one to the value.
13140 	 */
13141 	if (IS_GEN2(dev)) {
13142 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13143 		int vtotal;
13144 
13145 		vtotal = adjusted_mode->crtc_vtotal;
13146 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13147 			vtotal /= 2;
13148 
13149 		crtc->scanline_offset = vtotal - 1;
13150 	} else if (HAS_DDI(dev) &&
13151 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13152 		crtc->scanline_offset = 2;
13153 	} else
13154 		crtc->scanline_offset = 1;
13155 }
13156 
13157 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13158 {
13159 	struct drm_device *dev = state->dev;
13160 	struct drm_i915_private *dev_priv = to_i915(dev);
13161 	struct intel_shared_dpll_config *shared_dpll = NULL;
13162 	struct drm_crtc *crtc;
13163 	struct drm_crtc_state *crtc_state;
13164 	int i;
13165 
13166 	if (!dev_priv->display.crtc_compute_clock)
13167 		return;
13168 
13169 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13170 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13171 		struct intel_shared_dpll *old_dpll =
13172 			to_intel_crtc_state(crtc->state)->shared_dpll;
13173 
13174 		if (!needs_modeset(crtc_state))
13175 			continue;
13176 
13177 		to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
13178 
13179 		if (!old_dpll)
13180 			continue;
13181 
13182 		if (!shared_dpll)
13183 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13184 
13185 		intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
13186 	}
13187 }
13188 
13189 /*
13190  * This implements the workaround described in the "notes" section of the mode
13191  * set sequence documentation. When going from no pipes or single pipe to
13192  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13193  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13194  */
13195 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13196 {
13197 	struct drm_crtc_state *crtc_state;
13198 	struct intel_crtc *intel_crtc;
13199 	struct drm_crtc *crtc;
13200 	struct intel_crtc_state *first_crtc_state = NULL;
13201 	struct intel_crtc_state *other_crtc_state = NULL;
13202 	enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13203 	int i;
13204 
13205 	/* look at all crtc's that are going to be enabled in during modeset */
13206 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13207 		intel_crtc = to_intel_crtc(crtc);
13208 
13209 		if (!crtc_state->active || !needs_modeset(crtc_state))
13210 			continue;
13211 
13212 		if (first_crtc_state) {
13213 			other_crtc_state = to_intel_crtc_state(crtc_state);
13214 			break;
13215 		} else {
13216 			first_crtc_state = to_intel_crtc_state(crtc_state);
13217 			first_pipe = intel_crtc->pipe;
13218 		}
13219 	}
13220 
13221 	/* No workaround needed? */
13222 	if (!first_crtc_state)
13223 		return 0;
13224 
13225 	/* w/a possibly needed, check how many crtc's are already enabled. */
13226 	for_each_intel_crtc(state->dev, intel_crtc) {
13227 		struct intel_crtc_state *pipe_config;
13228 
13229 		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13230 		if (IS_ERR(pipe_config))
13231 			return PTR_ERR(pipe_config);
13232 
13233 		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13234 
13235 		if (!pipe_config->base.active ||
13236 		    needs_modeset(&pipe_config->base))
13237 			continue;
13238 
13239 		/* 2 or more enabled crtcs means no need for w/a */
13240 		if (enabled_pipe != INVALID_PIPE)
13241 			return 0;
13242 
13243 		enabled_pipe = intel_crtc->pipe;
13244 	}
13245 
13246 	if (enabled_pipe != INVALID_PIPE)
13247 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13248 	else if (other_crtc_state)
13249 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13250 
13251 	return 0;
13252 }
13253 
13254 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13255 {
13256 	struct drm_crtc *crtc;
13257 	struct drm_crtc_state *crtc_state;
13258 	int ret = 0;
13259 
13260 	/* add all active pipes to the state */
13261 	for_each_crtc(state->dev, crtc) {
13262 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13263 		if (IS_ERR(crtc_state))
13264 			return PTR_ERR(crtc_state);
13265 
13266 		if (!crtc_state->active || needs_modeset(crtc_state))
13267 			continue;
13268 
13269 		crtc_state->mode_changed = true;
13270 
13271 		ret = drm_atomic_add_affected_connectors(state, crtc);
13272 		if (ret)
13273 			break;
13274 
13275 		ret = drm_atomic_add_affected_planes(state, crtc);
13276 		if (ret)
13277 			break;
13278 	}
13279 
13280 	return ret;
13281 }
13282 
13283 static int intel_modeset_checks(struct drm_atomic_state *state)
13284 {
13285 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13286 	struct drm_i915_private *dev_priv = state->dev->dev_private;
13287 	struct drm_crtc *crtc;
13288 	struct drm_crtc_state *crtc_state;
13289 	int ret = 0, i;
13290 
13291 	if (!check_digital_port_conflicts(state)) {
13292 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13293 		return -EINVAL;
13294 	}
13295 
13296 	intel_state->modeset = true;
13297 	intel_state->active_crtcs = dev_priv->active_crtcs;
13298 
13299 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13300 		if (crtc_state->active)
13301 			intel_state->active_crtcs |= 1 << i;
13302 		else
13303 			intel_state->active_crtcs &= ~(1 << i);
13304 	}
13305 
13306 	/*
13307 	 * See if the config requires any additional preparation, e.g.
13308 	 * to adjust global state with pipes off.  We need to do this
13309 	 * here so we can get the modeset_pipe updated config for the new
13310 	 * mode set on this crtc.  For other crtcs we need to use the
13311 	 * adjusted_mode bits in the crtc directly.
13312 	 */
13313 	if (dev_priv->display.modeset_calc_cdclk) {
13314 		ret = dev_priv->display.modeset_calc_cdclk(state);
13315 
13316 		if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
13317 			ret = intel_modeset_all_pipes(state);
13318 
13319 		if (ret < 0)
13320 			return ret;
13321 
13322 		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13323 			      intel_state->cdclk, intel_state->dev_cdclk);
13324 	} else
13325 		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13326 
13327 	intel_modeset_clear_plls(state);
13328 
13329 	if (IS_HASWELL(dev_priv))
13330 		return haswell_mode_set_planes_workaround(state);
13331 
13332 	return 0;
13333 }
13334 
13335 /*
13336  * Handle calculation of various watermark data at the end of the atomic check
13337  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13338  * handlers to ensure that all derived state has been updated.
13339  */
13340 static void calc_watermark_data(struct drm_atomic_state *state)
13341 {
13342 	struct drm_device *dev = state->dev;
13343 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13344 	struct drm_crtc *crtc;
13345 	struct drm_crtc_state *cstate;
13346 	struct drm_plane *plane;
13347 	struct drm_plane_state *pstate;
13348 
13349 	/*
13350 	 * Calculate watermark configuration details now that derived
13351 	 * plane/crtc state is all properly updated.
13352 	 */
13353 	drm_for_each_crtc(crtc, dev) {
13354 		cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13355 			crtc->state;
13356 
13357 		if (cstate->active)
13358 			intel_state->wm_config.num_pipes_active++;
13359 	}
13360 	drm_for_each_legacy_plane(plane, dev) {
13361 		pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13362 			plane->state;
13363 
13364 		if (!to_intel_plane_state(pstate)->visible)
13365 			continue;
13366 
13367 		intel_state->wm_config.sprites_enabled = true;
13368 		if (pstate->crtc_w != pstate->src_w >> 16 ||
13369 		    pstate->crtc_h != pstate->src_h >> 16)
13370 			intel_state->wm_config.sprites_scaled = true;
13371 	}
13372 }
13373 
13374 /**
13375  * intel_atomic_check - validate state object
13376  * @dev: drm device
13377  * @state: state to validate
13378  */
13379 static int intel_atomic_check(struct drm_device *dev,
13380 			      struct drm_atomic_state *state)
13381 {
13382 	struct drm_i915_private *dev_priv = to_i915(dev);
13383 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13384 	struct drm_crtc *crtc;
13385 	struct drm_crtc_state *crtc_state;
13386 	int ret, i;
13387 	bool any_ms = false;
13388 
13389 	ret = drm_atomic_helper_check_modeset(dev, state);
13390 	if (ret)
13391 		return ret;
13392 
13393 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13394 		struct intel_crtc_state *pipe_config =
13395 			to_intel_crtc_state(crtc_state);
13396 
13397 		/* Catch I915_MODE_FLAG_INHERITED */
13398 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13399 			crtc_state->mode_changed = true;
13400 
13401 		if (!crtc_state->enable) {
13402 			if (needs_modeset(crtc_state))
13403 				any_ms = true;
13404 			continue;
13405 		}
13406 
13407 		if (!needs_modeset(crtc_state))
13408 			continue;
13409 
13410 		/* FIXME: For only active_changed we shouldn't need to do any
13411 		 * state recomputation at all. */
13412 
13413 		ret = drm_atomic_add_affected_connectors(state, crtc);
13414 		if (ret)
13415 			return ret;
13416 
13417 		ret = intel_modeset_pipe_config(crtc, pipe_config);
13418 		if (ret)
13419 			return ret;
13420 
13421 		if (i915.fastboot &&
13422 		    intel_pipe_config_compare(dev,
13423 					to_intel_crtc_state(crtc->state),
13424 					pipe_config, true)) {
13425 			crtc_state->mode_changed = false;
13426 			to_intel_crtc_state(crtc_state)->update_pipe = true;
13427 		}
13428 
13429 		if (needs_modeset(crtc_state)) {
13430 			any_ms = true;
13431 
13432 			ret = drm_atomic_add_affected_planes(state, crtc);
13433 			if (ret)
13434 				return ret;
13435 		}
13436 
13437 		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13438 				       needs_modeset(crtc_state) ?
13439 				       "[modeset]" : "[fastset]");
13440 	}
13441 
13442 	if (any_ms) {
13443 		ret = intel_modeset_checks(state);
13444 
13445 		if (ret)
13446 			return ret;
13447 	} else
13448 		intel_state->cdclk = dev_priv->cdclk_freq;
13449 
13450 	ret = drm_atomic_helper_check_planes(dev, state);
13451 	if (ret)
13452 		return ret;
13453 
13454 	intel_fbc_choose_crtc(dev_priv, state);
13455 	calc_watermark_data(state);
13456 
13457 	return 0;
13458 }
13459 
13460 static int intel_atomic_prepare_commit(struct drm_device *dev,
13461 				       struct drm_atomic_state *state,
13462 				       bool nonblock)
13463 {
13464 	struct drm_i915_private *dev_priv = dev->dev_private;
13465 	struct drm_plane_state *plane_state;
13466 	struct drm_crtc_state *crtc_state;
13467 	struct drm_plane *plane;
13468 	struct drm_crtc *crtc;
13469 	int i, ret;
13470 
13471 	if (nonblock) {
13472 		DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
13473 		return -EINVAL;
13474 	}
13475 
13476 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13477 		if (state->legacy_cursor_update)
13478 			continue;
13479 
13480 		ret = intel_crtc_wait_for_pending_flips(crtc);
13481 		if (ret)
13482 			return ret;
13483 
13484 		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13485 			flush_workqueue(dev_priv->wq);
13486 	}
13487 
13488 	ret = mutex_lock_interruptible(&dev->struct_mutex);
13489 	if (ret)
13490 		return ret;
13491 
13492 	ret = drm_atomic_helper_prepare_planes(dev, state);
13493 	mutex_unlock(&dev->struct_mutex);
13494 
13495 	if (!ret && !nonblock) {
13496 		for_each_plane_in_state(state, plane, plane_state, i) {
13497 			struct intel_plane_state *intel_plane_state =
13498 				to_intel_plane_state(plane_state);
13499 
13500 			if (!intel_plane_state->wait_req)
13501 				continue;
13502 
13503 			ret = __i915_wait_request(intel_plane_state->wait_req,
13504 						  true, NULL, NULL);
13505 			if (ret) {
13506 				/* Any hang should be swallowed by the wait */
13507 				WARN_ON(ret == -EIO);
13508 				mutex_lock(&dev->struct_mutex);
13509 				drm_atomic_helper_cleanup_planes(dev, state);
13510 				mutex_unlock(&dev->struct_mutex);
13511 				break;
13512 			}
13513 		}
13514 	}
13515 
13516 	return ret;
13517 }
13518 
13519 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13520 					  struct drm_i915_private *dev_priv,
13521 					  unsigned crtc_mask)
13522 {
13523 	unsigned last_vblank_count[I915_MAX_PIPES];
13524 	enum i915_pipe pipe;
13525 	int ret;
13526 
13527 	if (!crtc_mask)
13528 		return;
13529 
13530 	for_each_pipe(dev_priv, pipe) {
13531 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13532 
13533 		if (!((1 << pipe) & crtc_mask))
13534 			continue;
13535 
13536 		ret = drm_crtc_vblank_get(crtc);
13537 		if (WARN_ON(ret != 0)) {
13538 			crtc_mask &= ~(1 << pipe);
13539 			continue;
13540 		}
13541 
13542 		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13543 	}
13544 
13545 	for_each_pipe(dev_priv, pipe) {
13546 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13547 		long lret;
13548 
13549 		if (!((1 << pipe) & crtc_mask))
13550 			continue;
13551 
13552 		lret = wait_event_timeout(dev->vblank[pipe].queue,
13553 				last_vblank_count[pipe] !=
13554 					drm_crtc_vblank_count(crtc),
13555 				msecs_to_jiffies(50));
13556 
13557 		WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
13558 
13559 		drm_crtc_vblank_put(crtc);
13560 	}
13561 }
13562 
13563 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13564 {
13565 	/* fb updated, need to unpin old fb */
13566 	if (crtc_state->fb_changed)
13567 		return true;
13568 
13569 	/* wm changes, need vblank before final wm's */
13570 	if (crtc_state->update_wm_post)
13571 		return true;
13572 
13573 	/*
13574 	 * cxsr is re-enabled after vblank.
13575 	 * This is already handled by crtc_state->update_wm_post,
13576 	 * but added for clarity.
13577 	 */
13578 	if (crtc_state->disable_cxsr)
13579 		return true;
13580 
13581 	return false;
13582 }
13583 
13584 /**
13585  * intel_atomic_commit - commit validated state object
13586  * @dev: DRM device
13587  * @state: the top-level driver state object
13588  * @nonblock: nonblocking commit
13589  *
13590  * This function commits a top-level state object that has been validated
13591  * with drm_atomic_helper_check().
13592  *
13593  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13594  * we can only handle plane-related operations and do not yet support
13595  * nonblocking commit.
13596  *
13597  * RETURNS
13598  * Zero for success or -errno.
13599  */
13600 static int intel_atomic_commit(struct drm_device *dev,
13601 			       struct drm_atomic_state *state,
13602 			       bool nonblock)
13603 {
13604 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13605 	struct drm_i915_private *dev_priv = dev->dev_private;
13606 	struct drm_crtc_state *old_crtc_state;
13607 	struct drm_crtc *crtc;
13608 	struct intel_crtc_state *intel_cstate;
13609 	int ret = 0, i;
13610 	bool hw_check = intel_state->modeset;
13611 	unsigned long put_domains[I915_MAX_PIPES] = {};
13612 	unsigned crtc_vblank_mask = 0;
13613 
13614 	ret = intel_atomic_prepare_commit(dev, state, nonblock);
13615 	if (ret) {
13616 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13617 		return ret;
13618 	}
13619 
13620 	drm_atomic_helper_swap_state(dev, state);
13621 	dev_priv->wm.config = intel_state->wm_config;
13622 	intel_shared_dpll_commit(state);
13623 
13624 	if (intel_state->modeset) {
13625 		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13626 		       sizeof(intel_state->min_pixclk));
13627 		dev_priv->active_crtcs = intel_state->active_crtcs;
13628 		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13629 
13630 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13631 	}
13632 
13633 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13634 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13635 
13636 		if (needs_modeset(crtc->state) ||
13637 		    to_intel_crtc_state(crtc->state)->update_pipe) {
13638 			hw_check = true;
13639 
13640 			put_domains[to_intel_crtc(crtc)->pipe] =
13641 				modeset_get_crtc_power_domains(crtc,
13642 					to_intel_crtc_state(crtc->state));
13643 		}
13644 
13645 		if (!needs_modeset(crtc->state))
13646 			continue;
13647 
13648 		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13649 
13650 		if (old_crtc_state->active) {
13651 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13652 			dev_priv->display.crtc_disable(crtc);
13653 			intel_crtc->active = false;
13654 			intel_fbc_disable(intel_crtc);
13655 			intel_disable_shared_dpll(intel_crtc);
13656 
13657 			/*
13658 			 * Underruns don't always raise
13659 			 * interrupts, so check manually.
13660 			 */
13661 			intel_check_cpu_fifo_underruns(dev_priv);
13662 			intel_check_pch_fifo_underruns(dev_priv);
13663 
13664 			if (!crtc->state->active)
13665 				intel_update_watermarks(crtc);
13666 		}
13667 	}
13668 
13669 	/* Only after disabling all output pipelines that will be changed can we
13670 	 * update the the output configuration. */
13671 	intel_modeset_update_crtc_state(state);
13672 
13673 	if (intel_state->modeset) {
13674 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13675 
13676 		if (dev_priv->display.modeset_commit_cdclk &&
13677 		    intel_state->dev_cdclk != dev_priv->cdclk_freq)
13678 			dev_priv->display.modeset_commit_cdclk(state);
13679 
13680 		intel_modeset_verify_disabled(dev);
13681 	}
13682 
13683 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13684 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13685 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13686 		bool modeset = needs_modeset(crtc->state);
13687 		struct intel_crtc_state *pipe_config =
13688 			to_intel_crtc_state(crtc->state);
13689 		bool update_pipe = !modeset && pipe_config->update_pipe;
13690 
13691 		if (modeset && crtc->state->active) {
13692 			update_scanline_offset(to_intel_crtc(crtc));
13693 			dev_priv->display.crtc_enable(crtc);
13694 		}
13695 
13696 		if (!modeset)
13697 			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13698 
13699 		if (crtc->state->active &&
13700 		    drm_atomic_get_existing_plane_state(state, crtc->primary))
13701 			intel_fbc_enable(intel_crtc);
13702 
13703 		if (crtc->state->active &&
13704 		    (crtc->state->planes_changed || update_pipe))
13705 			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13706 
13707 		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13708 			crtc_vblank_mask |= 1 << i;
13709 	}
13710 
13711 	/* FIXME: add subpixel order */
13712 
13713 	if (!state->legacy_cursor_update)
13714 		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13715 
13716 	/*
13717 	 * Now that the vblank has passed, we can go ahead and program the
13718 	 * optimal watermarks on platforms that need two-step watermark
13719 	 * programming.
13720 	 *
13721 	 * TODO: Move this (and other cleanup) to an async worker eventually.
13722 	 */
13723 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13724 		intel_cstate = to_intel_crtc_state(crtc->state);
13725 
13726 		if (dev_priv->display.optimize_watermarks)
13727 			dev_priv->display.optimize_watermarks(intel_cstate);
13728 	}
13729 
13730 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13731 		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13732 
13733 		if (put_domains[i])
13734 			modeset_put_power_domains(dev_priv, put_domains[i]);
13735 
13736 		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13737 	}
13738 
13739 	if (intel_state->modeset)
13740 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13741 
13742 	mutex_lock(&dev->struct_mutex);
13743 	drm_atomic_helper_cleanup_planes(dev, state);
13744 	mutex_unlock(&dev->struct_mutex);
13745 
13746 	drm_atomic_state_free(state);
13747 
13748 	/* As one of the primary mmio accessors, KMS has a high likelihood
13749 	 * of triggering bugs in unclaimed access. After we finish
13750 	 * modesetting, see if an error has been flagged, and if so
13751 	 * enable debugging for the next modeset - and hope we catch
13752 	 * the culprit.
13753 	 *
13754 	 * XXX note that we assume display power is on at this point.
13755 	 * This might hold true now but we need to add pm helper to check
13756 	 * unclaimed only when the hardware is on, as atomic commits
13757 	 * can happen also when the device is completely off.
13758 	 */
13759 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13760 
13761 	return 0;
13762 }
13763 
13764 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13765 {
13766 	struct drm_device *dev = crtc->dev;
13767 	struct drm_atomic_state *state;
13768 	struct drm_crtc_state *crtc_state;
13769 	int ret;
13770 
13771 	state = drm_atomic_state_alloc(dev);
13772 	if (!state) {
13773 		DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13774 			      crtc->base.id);
13775 		return;
13776 	}
13777 
13778 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13779 
13780 retry:
13781 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
13782 	ret = PTR_ERR_OR_ZERO(crtc_state);
13783 	if (!ret) {
13784 		if (!crtc_state->active)
13785 			goto out;
13786 
13787 		crtc_state->mode_changed = true;
13788 		ret = drm_atomic_commit(state);
13789 	}
13790 
13791 	if (ret == -EDEADLK) {
13792 		drm_atomic_state_clear(state);
13793 		drm_modeset_backoff(state->acquire_ctx);
13794 		goto retry;
13795 	}
13796 
13797 	if (ret)
13798 out:
13799 		drm_atomic_state_free(state);
13800 }
13801 
13802 #undef for_each_intel_crtc_masked
13803 
13804 static const struct drm_crtc_funcs intel_crtc_funcs = {
13805 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
13806 	.set_config = drm_atomic_helper_set_config,
13807 	.set_property = drm_atomic_helper_crtc_set_property,
13808 	.destroy = intel_crtc_destroy,
13809 	.page_flip = intel_crtc_page_flip,
13810 	.atomic_duplicate_state = intel_crtc_duplicate_state,
13811 	.atomic_destroy_state = intel_crtc_destroy_state,
13812 };
13813 
13814 /**
13815  * intel_prepare_plane_fb - Prepare fb for usage on plane
13816  * @plane: drm plane to prepare for
13817  * @fb: framebuffer to prepare for presentation
13818  *
13819  * Prepares a framebuffer for usage on a display plane.  Generally this
13820  * involves pinning the underlying object and updating the frontbuffer tracking
13821  * bits.  Some older platforms need special physical address handling for
13822  * cursor planes.
13823  *
13824  * Must be called with struct_mutex held.
13825  *
13826  * Returns 0 on success, negative error code on failure.
13827  */
13828 int
13829 intel_prepare_plane_fb(struct drm_plane *plane,
13830 		       struct drm_plane_state *new_state)
13831 {
13832 	struct drm_device *dev = plane->dev;
13833 	struct drm_framebuffer *fb = new_state->fb;
13834 	struct intel_plane *intel_plane = to_intel_plane(plane);
13835 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13836 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13837 	int ret = 0;
13838 
13839 	if (!obj && !old_obj)
13840 		return 0;
13841 
13842 	if (old_obj) {
13843 		struct drm_crtc_state *crtc_state =
13844 			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13845 
13846 		/* Big Hammer, we also need to ensure that any pending
13847 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13848 		 * current scanout is retired before unpinning the old
13849 		 * framebuffer. Note that we rely on userspace rendering
13850 		 * into the buffer attached to the pipe they are waiting
13851 		 * on. If not, userspace generates a GPU hang with IPEHR
13852 		 * point to the MI_WAIT_FOR_EVENT.
13853 		 *
13854 		 * This should only fail upon a hung GPU, in which case we
13855 		 * can safely continue.
13856 		 */
13857 		if (needs_modeset(crtc_state))
13858 			ret = i915_gem_object_wait_rendering(old_obj, true);
13859 		if (ret) {
13860 			/* GPU hangs should have been swallowed by the wait */
13861 			WARN_ON(ret == -EIO);
13862 			return ret;
13863 		}
13864 	}
13865 
13866 	/* For framebuffer backed by dmabuf, wait for fence */
13867 #if 0
13868 	if (obj && obj->base.dma_buf) {
13869 		long lret;
13870 
13871 		lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13872 							   false, true,
13873 							   MAX_SCHEDULE_TIMEOUT);
13874 		if (lret == -ERESTARTSYS)
13875 			return lret;
13876 
13877 		WARN(lret < 0, "waiting returns %li\n", lret);
13878 	}
13879 #endif
13880 
13881 	if (!obj) {
13882 		ret = 0;
13883 	} else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13884 	    INTEL_INFO(dev)->cursor_needs_physical) {
13885 		int align = IS_I830(dev) ? 16 * 1024 : 256;
13886 		ret = i915_gem_object_attach_phys(obj, align);
13887 		if (ret)
13888 			DRM_DEBUG_KMS("failed to attach phys object\n");
13889 	} else {
13890 		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
13891 	}
13892 
13893 	if (ret == 0) {
13894 		if (obj) {
13895 			struct intel_plane_state *plane_state =
13896 				to_intel_plane_state(new_state);
13897 
13898 			i915_gem_request_assign(&plane_state->wait_req,
13899 						obj->last_write_req);
13900 		}
13901 
13902 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13903 	}
13904 
13905 	return ret;
13906 }
13907 
13908 /**
13909  * intel_cleanup_plane_fb - Cleans up an fb after plane use
13910  * @plane: drm plane to clean up for
13911  * @fb: old framebuffer that was on plane
13912  *
13913  * Cleans up a framebuffer that has just been removed from a plane.
13914  *
13915  * Must be called with struct_mutex held.
13916  */
13917 void
13918 intel_cleanup_plane_fb(struct drm_plane *plane,
13919 		       struct drm_plane_state *old_state)
13920 {
13921 	struct drm_device *dev = plane->dev;
13922 	struct intel_plane *intel_plane = to_intel_plane(plane);
13923 	struct intel_plane_state *old_intel_state;
13924 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13925 	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13926 
13927 	old_intel_state = to_intel_plane_state(old_state);
13928 
13929 	if (!obj && !old_obj)
13930 		return;
13931 
13932 	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13933 	    !INTEL_INFO(dev)->cursor_needs_physical))
13934 		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
13935 
13936 	/* prepare_fb aborted? */
13937 	if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13938 	    (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13939 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13940 
13941 	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13942 }
13943 
13944 int
13945 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13946 {
13947 	int max_scale;
13948 	struct drm_device *dev;
13949 	struct drm_i915_private *dev_priv;
13950 	int crtc_clock, cdclk;
13951 
13952 	if (!intel_crtc || !crtc_state->base.enable)
13953 		return DRM_PLANE_HELPER_NO_SCALING;
13954 
13955 	dev = intel_crtc->base.dev;
13956 	dev_priv = dev->dev_private;
13957 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13958 	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13959 
13960 	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13961 		return DRM_PLANE_HELPER_NO_SCALING;
13962 
13963 	/*
13964 	 * skl max scale is lower of:
13965 	 *    close to 3 but not 3, -1 is for that purpose
13966 	 *            or
13967 	 *    cdclk/crtc_clock
13968 	 */
13969 	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13970 
13971 	return max_scale;
13972 }
13973 
13974 static int
13975 intel_check_primary_plane(struct drm_plane *plane,
13976 			  struct intel_crtc_state *crtc_state,
13977 			  struct intel_plane_state *state)
13978 {
13979 	struct drm_crtc *crtc = state->base.crtc;
13980 	struct drm_framebuffer *fb = state->base.fb;
13981 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13982 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13983 	bool can_position = false;
13984 
13985 	if (INTEL_INFO(plane->dev)->gen >= 9) {
13986 		/* use scaler when colorkey is not required */
13987 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13988 			min_scale = 1;
13989 			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13990 		}
13991 		can_position = true;
13992 	}
13993 
13994 	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13995 					     &state->dst, &state->clip,
13996 					     min_scale, max_scale,
13997 					     can_position, true,
13998 					     &state->visible);
13999 }
14000 
14001 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14002 				    struct drm_crtc_state *old_crtc_state)
14003 {
14004 	struct drm_device *dev = crtc->dev;
14005 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14006 	struct intel_crtc_state *old_intel_state =
14007 		to_intel_crtc_state(old_crtc_state);
14008 	bool modeset = needs_modeset(crtc->state);
14009 
14010 	/* Perform vblank evasion around commit operation */
14011 	intel_pipe_update_start(intel_crtc);
14012 
14013 	if (modeset)
14014 		return;
14015 
14016 	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14017 		intel_color_set_csc(crtc->state);
14018 		intel_color_load_luts(crtc->state);
14019 	}
14020 
14021 	if (to_intel_crtc_state(crtc->state)->update_pipe)
14022 		intel_update_pipe_config(intel_crtc, old_intel_state);
14023 	else if (INTEL_INFO(dev)->gen >= 9)
14024 		skl_detach_scalers(intel_crtc);
14025 }
14026 
14027 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14028 				     struct drm_crtc_state *old_crtc_state)
14029 {
14030 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14031 
14032 	intel_pipe_update_end(intel_crtc);
14033 }
14034 
14035 /**
14036  * intel_plane_destroy - destroy a plane
14037  * @plane: plane to destroy
14038  *
14039  * Common destruction function for all types of planes (primary, cursor,
14040  * sprite).
14041  */
14042 void intel_plane_destroy(struct drm_plane *plane)
14043 {
14044 	struct intel_plane *intel_plane = to_intel_plane(plane);
14045 	drm_plane_cleanup(plane);
14046 	kfree(intel_plane);
14047 }
14048 
14049 const struct drm_plane_funcs intel_plane_funcs = {
14050 	.update_plane = drm_atomic_helper_update_plane,
14051 	.disable_plane = drm_atomic_helper_disable_plane,
14052 	.destroy = intel_plane_destroy,
14053 	.set_property = drm_atomic_helper_plane_set_property,
14054 	.atomic_get_property = intel_plane_atomic_get_property,
14055 	.atomic_set_property = intel_plane_atomic_set_property,
14056 	.atomic_duplicate_state = intel_plane_duplicate_state,
14057 	.atomic_destroy_state = intel_plane_destroy_state,
14058 
14059 };
14060 
14061 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14062 						    int pipe)
14063 {
14064 	struct intel_plane *primary = NULL;
14065 	struct intel_plane_state *state = NULL;
14066 	const uint32_t *intel_primary_formats;
14067 	unsigned int num_formats;
14068 	int ret;
14069 
14070 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14071 	if (!primary)
14072 		goto fail;
14073 
14074 	state = intel_create_plane_state(&primary->base);
14075 	if (!state)
14076 		goto fail;
14077 	primary->base.state = &state->base;
14078 
14079 	primary->can_scale = false;
14080 	primary->max_downscale = 1;
14081 	if (INTEL_INFO(dev)->gen >= 9) {
14082 		primary->can_scale = true;
14083 		state->scaler_id = -1;
14084 	}
14085 	primary->pipe = pipe;
14086 	primary->plane = pipe;
14087 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14088 	primary->check_plane = intel_check_primary_plane;
14089 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14090 		primary->plane = !pipe;
14091 
14092 	if (INTEL_INFO(dev)->gen >= 9) {
14093 		intel_primary_formats = skl_primary_formats;
14094 		num_formats = ARRAY_SIZE(skl_primary_formats);
14095 
14096 		primary->update_plane = skylake_update_primary_plane;
14097 		primary->disable_plane = skylake_disable_primary_plane;
14098 	} else if (HAS_PCH_SPLIT(dev)) {
14099 		intel_primary_formats = i965_primary_formats;
14100 		num_formats = ARRAY_SIZE(i965_primary_formats);
14101 
14102 		primary->update_plane = ironlake_update_primary_plane;
14103 		primary->disable_plane = i9xx_disable_primary_plane;
14104 	} else if (INTEL_INFO(dev)->gen >= 4) {
14105 		intel_primary_formats = i965_primary_formats;
14106 		num_formats = ARRAY_SIZE(i965_primary_formats);
14107 
14108 		primary->update_plane = i9xx_update_primary_plane;
14109 		primary->disable_plane = i9xx_disable_primary_plane;
14110 	} else {
14111 		intel_primary_formats = i8xx_primary_formats;
14112 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
14113 
14114 		primary->update_plane = i9xx_update_primary_plane;
14115 		primary->disable_plane = i9xx_disable_primary_plane;
14116 	}
14117 
14118 	ret = drm_universal_plane_init(dev, &primary->base, 0,
14119 				       &intel_plane_funcs,
14120 				       intel_primary_formats, num_formats,
14121 				       DRM_PLANE_TYPE_PRIMARY, NULL);
14122 	if (ret)
14123 		goto fail;
14124 
14125 	if (INTEL_INFO(dev)->gen >= 4)
14126 		intel_create_rotation_property(dev, primary);
14127 
14128 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14129 
14130 	return &primary->base;
14131 
14132 fail:
14133 	kfree(state);
14134 	kfree(primary);
14135 
14136 	return NULL;
14137 }
14138 
14139 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14140 {
14141 	if (!dev->mode_config.rotation_property) {
14142 		unsigned long flags = BIT(DRM_ROTATE_0) |
14143 			BIT(DRM_ROTATE_180);
14144 
14145 		if (INTEL_INFO(dev)->gen >= 9)
14146 			flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14147 
14148 		dev->mode_config.rotation_property =
14149 			drm_mode_create_rotation_property(dev, flags);
14150 	}
14151 	if (dev->mode_config.rotation_property)
14152 		drm_object_attach_property(&plane->base.base,
14153 				dev->mode_config.rotation_property,
14154 				plane->base.state->rotation);
14155 }
14156 
14157 static int
14158 intel_check_cursor_plane(struct drm_plane *plane,
14159 			 struct intel_crtc_state *crtc_state,
14160 			 struct intel_plane_state *state)
14161 {
14162 	struct drm_crtc *crtc = crtc_state->base.crtc;
14163 	struct drm_framebuffer *fb = state->base.fb;
14164 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14165 	enum i915_pipe pipe = to_intel_plane(plane)->pipe;
14166 	unsigned stride;
14167 	int ret;
14168 
14169 	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14170 					    &state->dst, &state->clip,
14171 					    DRM_PLANE_HELPER_NO_SCALING,
14172 					    DRM_PLANE_HELPER_NO_SCALING,
14173 					    true, true, &state->visible);
14174 	if (ret)
14175 		return ret;
14176 
14177 	/* if we want to turn off the cursor ignore width and height */
14178 	if (!obj)
14179 		return 0;
14180 
14181 	/* Check for which cursor types we support */
14182 	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14183 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14184 			  state->base.crtc_w, state->base.crtc_h);
14185 		return -EINVAL;
14186 	}
14187 
14188 	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14189 	if (obj->base.size < stride * state->base.crtc_h) {
14190 		DRM_DEBUG_KMS("buffer is too small\n");
14191 		return -ENOMEM;
14192 	}
14193 
14194 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14195 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
14196 		return -EINVAL;
14197 	}
14198 
14199 	/*
14200 	 * There's something wrong with the cursor on CHV pipe C.
14201 	 * If it straddles the left edge of the screen then
14202 	 * moving it away from the edge or disabling it often
14203 	 * results in a pipe underrun, and often that can lead to
14204 	 * dead pipe (constant underrun reported, and it scans
14205 	 * out just a solid color). To recover from that, the
14206 	 * display power well must be turned off and on again.
14207 	 * Refuse the put the cursor into that compromised position.
14208 	 */
14209 	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14210 	    state->visible && state->base.crtc_x < 0) {
14211 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14212 		return -EINVAL;
14213 	}
14214 
14215 	return 0;
14216 }
14217 
14218 static void
14219 intel_disable_cursor_plane(struct drm_plane *plane,
14220 			   struct drm_crtc *crtc)
14221 {
14222 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14223 
14224 	intel_crtc->cursor_addr = 0;
14225 	intel_crtc_update_cursor(crtc, NULL);
14226 }
14227 
14228 static void
14229 intel_update_cursor_plane(struct drm_plane *plane,
14230 			  const struct intel_crtc_state *crtc_state,
14231 			  const struct intel_plane_state *state)
14232 {
14233 	struct drm_crtc *crtc = crtc_state->base.crtc;
14234 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14235 	struct drm_device *dev = plane->dev;
14236 	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14237 	uint32_t addr;
14238 
14239 	if (!obj)
14240 		addr = 0;
14241 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14242 		addr = i915_gem_obj_ggtt_offset(obj);
14243 	else
14244 		addr = obj->phys_handle->busaddr;
14245 
14246 	intel_crtc->cursor_addr = addr;
14247 	intel_crtc_update_cursor(crtc, state);
14248 }
14249 
14250 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14251 						   int pipe)
14252 {
14253 	struct intel_plane *cursor = NULL;
14254 	struct intel_plane_state *state = NULL;
14255 	int ret;
14256 
14257 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14258 	if (!cursor)
14259 		goto fail;
14260 
14261 	state = intel_create_plane_state(&cursor->base);
14262 	if (!state)
14263 		goto fail;
14264 	cursor->base.state = &state->base;
14265 
14266 	cursor->can_scale = false;
14267 	cursor->max_downscale = 1;
14268 	cursor->pipe = pipe;
14269 	cursor->plane = pipe;
14270 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14271 	cursor->check_plane = intel_check_cursor_plane;
14272 	cursor->update_plane = intel_update_cursor_plane;
14273 	cursor->disable_plane = intel_disable_cursor_plane;
14274 
14275 	ret = drm_universal_plane_init(dev, &cursor->base, 0,
14276 				       &intel_plane_funcs,
14277 				       intel_cursor_formats,
14278 				       ARRAY_SIZE(intel_cursor_formats),
14279 				       DRM_PLANE_TYPE_CURSOR, NULL);
14280 	if (ret)
14281 		goto fail;
14282 
14283 	if (INTEL_INFO(dev)->gen >= 4) {
14284 		if (!dev->mode_config.rotation_property)
14285 			dev->mode_config.rotation_property =
14286 				drm_mode_create_rotation_property(dev,
14287 							BIT(DRM_ROTATE_0) |
14288 							BIT(DRM_ROTATE_180));
14289 		if (dev->mode_config.rotation_property)
14290 			drm_object_attach_property(&cursor->base.base,
14291 				dev->mode_config.rotation_property,
14292 				state->base.rotation);
14293 	}
14294 
14295 	if (INTEL_INFO(dev)->gen >=9)
14296 		state->scaler_id = -1;
14297 
14298 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14299 
14300 	return &cursor->base;
14301 
14302 fail:
14303 	kfree(state);
14304 	kfree(cursor);
14305 
14306 	return NULL;
14307 }
14308 
14309 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14310 	struct intel_crtc_state *crtc_state)
14311 {
14312 	int i;
14313 	struct intel_scaler *intel_scaler;
14314 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14315 
14316 	for (i = 0; i < intel_crtc->num_scalers; i++) {
14317 		intel_scaler = &scaler_state->scalers[i];
14318 		intel_scaler->in_use = 0;
14319 		intel_scaler->mode = PS_SCALER_MODE_DYN;
14320 	}
14321 
14322 	scaler_state->scaler_id = -1;
14323 }
14324 
14325 static void intel_crtc_init(struct drm_device *dev, int pipe)
14326 {
14327 	struct drm_i915_private *dev_priv = dev->dev_private;
14328 	struct intel_crtc *intel_crtc;
14329 	struct intel_crtc_state *crtc_state = NULL;
14330 	struct drm_plane *primary = NULL;
14331 	struct drm_plane *cursor = NULL;
14332 	int ret;
14333 
14334 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14335 	if (intel_crtc == NULL)
14336 		return;
14337 
14338 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14339 	if (!crtc_state)
14340 		goto fail;
14341 	intel_crtc->config = crtc_state;
14342 	intel_crtc->base.state = &crtc_state->base;
14343 	crtc_state->base.crtc = &intel_crtc->base;
14344 
14345 	/* initialize shared scalers */
14346 	if (INTEL_INFO(dev)->gen >= 9) {
14347 		if (pipe == PIPE_C)
14348 			intel_crtc->num_scalers = 1;
14349 		else
14350 			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14351 
14352 		skl_init_scalers(dev, intel_crtc, crtc_state);
14353 	}
14354 
14355 	primary = intel_primary_plane_create(dev, pipe);
14356 	if (!primary)
14357 		goto fail;
14358 
14359 	cursor = intel_cursor_plane_create(dev, pipe);
14360 	if (!cursor)
14361 		goto fail;
14362 
14363 	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14364 					cursor, &intel_crtc_funcs, NULL);
14365 	if (ret)
14366 		goto fail;
14367 
14368 	/*
14369 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14370 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14371 	 */
14372 	intel_crtc->pipe = pipe;
14373 	intel_crtc->plane = pipe;
14374 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14375 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14376 		intel_crtc->plane = !pipe;
14377 	}
14378 
14379 	intel_crtc->cursor_base = ~0;
14380 	intel_crtc->cursor_cntl = ~0;
14381 	intel_crtc->cursor_size = ~0;
14382 
14383 	intel_crtc->wm.cxsr_allowed = true;
14384 
14385 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14386 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14387 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14388 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14389 
14390 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14391 
14392 	intel_color_init(&intel_crtc->base);
14393 
14394 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14395 	return;
14396 
14397 fail:
14398 	if (primary)
14399 		drm_plane_cleanup(primary);
14400 	if (cursor)
14401 		drm_plane_cleanup(cursor);
14402 	kfree(crtc_state);
14403 	kfree(intel_crtc);
14404 }
14405 
14406 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14407 {
14408 	struct drm_encoder *encoder = connector->base.encoder;
14409 	struct drm_device *dev = connector->base.dev;
14410 
14411 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14412 
14413 	if (!encoder || WARN_ON(!encoder->crtc))
14414 		return INVALID_PIPE;
14415 
14416 	return to_intel_crtc(encoder->crtc)->pipe;
14417 }
14418 
14419 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14420 				struct drm_file *file)
14421 {
14422 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14423 	struct drm_crtc *drmmode_crtc;
14424 	struct intel_crtc *crtc;
14425 
14426 	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14427 
14428 	if (!drmmode_crtc) {
14429 		DRM_ERROR("no such CRTC id\n");
14430 		return -ENOENT;
14431 	}
14432 
14433 	crtc = to_intel_crtc(drmmode_crtc);
14434 	pipe_from_crtc_id->pipe = crtc->pipe;
14435 
14436 	return 0;
14437 }
14438 
14439 static int intel_encoder_clones(struct intel_encoder *encoder)
14440 {
14441 	struct drm_device *dev = encoder->base.dev;
14442 	struct intel_encoder *source_encoder;
14443 	int index_mask = 0;
14444 	int entry = 0;
14445 
14446 	for_each_intel_encoder(dev, source_encoder) {
14447 		if (encoders_cloneable(encoder, source_encoder))
14448 			index_mask |= (1 << entry);
14449 
14450 		entry++;
14451 	}
14452 
14453 	return index_mask;
14454 }
14455 
14456 static bool has_edp_a(struct drm_device *dev)
14457 {
14458 	struct drm_i915_private *dev_priv = dev->dev_private;
14459 
14460 	if (!IS_MOBILE(dev))
14461 		return false;
14462 
14463 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14464 		return false;
14465 
14466 	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14467 		return false;
14468 
14469 	return true;
14470 }
14471 
14472 static bool intel_crt_present(struct drm_device *dev)
14473 {
14474 	struct drm_i915_private *dev_priv = dev->dev_private;
14475 
14476 	if (INTEL_INFO(dev)->gen >= 9)
14477 		return false;
14478 
14479 	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14480 		return false;
14481 
14482 	if (IS_CHERRYVIEW(dev))
14483 		return false;
14484 
14485 	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14486 		return false;
14487 
14488 	/* DDI E can't be used if DDI A requires 4 lanes */
14489 	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14490 		return false;
14491 
14492 	if (!dev_priv->vbt.int_crt_support)
14493 		return false;
14494 
14495 	return true;
14496 }
14497 
14498 static void intel_setup_outputs(struct drm_device *dev)
14499 {
14500 	struct drm_i915_private *dev_priv = dev->dev_private;
14501 	struct intel_encoder *encoder;
14502 	bool dpd_is_edp = false;
14503 
14504 	intel_lvds_init(dev);
14505 
14506 	if (intel_crt_present(dev))
14507 		intel_crt_init(dev);
14508 
14509 	if (IS_BROXTON(dev)) {
14510 		/*
14511 		 * FIXME: Broxton doesn't support port detection via the
14512 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14513 		 * detect the ports.
14514 		 */
14515 		intel_ddi_init(dev, PORT_A);
14516 		intel_ddi_init(dev, PORT_B);
14517 		intel_ddi_init(dev, PORT_C);
14518 
14519 		intel_dsi_init(dev);
14520 	} else if (HAS_DDI(dev)) {
14521 		int found;
14522 
14523 		/*
14524 		 * Haswell uses DDI functions to detect digital outputs.
14525 		 * On SKL pre-D0 the strap isn't connected, so we assume
14526 		 * it's there.
14527 		 */
14528 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14529 		/* WaIgnoreDDIAStrap: skl */
14530 		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14531 			intel_ddi_init(dev, PORT_A);
14532 
14533 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14534 		 * register */
14535 		found = I915_READ(SFUSE_STRAP);
14536 
14537 		if (found & SFUSE_STRAP_DDIB_DETECTED)
14538 			intel_ddi_init(dev, PORT_B);
14539 		if (found & SFUSE_STRAP_DDIC_DETECTED)
14540 			intel_ddi_init(dev, PORT_C);
14541 		if (found & SFUSE_STRAP_DDID_DETECTED)
14542 			intel_ddi_init(dev, PORT_D);
14543 		/*
14544 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14545 		 */
14546 		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14547 		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14548 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14549 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14550 			intel_ddi_init(dev, PORT_E);
14551 
14552 	} else if (HAS_PCH_SPLIT(dev)) {
14553 		int found;
14554 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14555 
14556 		if (has_edp_a(dev))
14557 			intel_dp_init(dev, DP_A, PORT_A);
14558 
14559 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14560 			/* PCH SDVOB multiplex with HDMIB */
14561 			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14562 			if (!found)
14563 				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14564 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14565 				intel_dp_init(dev, PCH_DP_B, PORT_B);
14566 		}
14567 
14568 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14569 			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14570 
14571 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14572 			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14573 
14574 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14575 			intel_dp_init(dev, PCH_DP_C, PORT_C);
14576 
14577 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14578 			intel_dp_init(dev, PCH_DP_D, PORT_D);
14579 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14580 		bool has_edp, has_port;
14581 
14582 		/*
14583 		 * The DP_DETECTED bit is the latched state of the DDC
14584 		 * SDA pin at boot. However since eDP doesn't require DDC
14585 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14586 		 * eDP ports may have been muxed to an alternate function.
14587 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14588 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14589 		 * detect eDP ports.
14590 		 *
14591 		 * Sadly the straps seem to be missing sometimes even for HDMI
14592 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14593 		 * and VBT for the presence of the port. Additionally we can't
14594 		 * trust the port type the VBT declares as we've seen at least
14595 		 * HDMI ports that the VBT claim are DP or eDP.
14596 		 */
14597 		has_edp = intel_dp_is_edp(dev, PORT_B);
14598 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14599 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14600 			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14601 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14602 			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14603 
14604 		has_edp = intel_dp_is_edp(dev, PORT_C);
14605 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14606 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14607 			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14608 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14609 			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14610 
14611 		if (IS_CHERRYVIEW(dev)) {
14612 			/*
14613 			 * eDP not supported on port D,
14614 			 * so no need to worry about it
14615 			 */
14616 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14617 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14618 				intel_dp_init(dev, CHV_DP_D, PORT_D);
14619 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14620 				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14621 		}
14622 
14623 		intel_dsi_init(dev);
14624 	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14625 		bool found = false;
14626 
14627 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14628 			DRM_DEBUG_KMS("probing SDVOB\n");
14629 			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14630 			if (!found && IS_G4X(dev)) {
14631 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14632 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14633 			}
14634 
14635 			if (!found && IS_G4X(dev))
14636 				intel_dp_init(dev, DP_B, PORT_B);
14637 		}
14638 
14639 		/* Before G4X SDVOC doesn't have its own detect register */
14640 
14641 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14642 			DRM_DEBUG_KMS("probing SDVOC\n");
14643 			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14644 		}
14645 
14646 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14647 
14648 			if (IS_G4X(dev)) {
14649 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14650 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14651 			}
14652 			if (IS_G4X(dev))
14653 				intel_dp_init(dev, DP_C, PORT_C);
14654 		}
14655 
14656 		if (IS_G4X(dev) &&
14657 		    (I915_READ(DP_D) & DP_DETECTED))
14658 			intel_dp_init(dev, DP_D, PORT_D);
14659 	} else if (IS_GEN2(dev))
14660 		intel_dvo_init(dev);
14661 
14662 	if (SUPPORTS_TV(dev))
14663 		intel_tv_init(dev);
14664 
14665 	intel_psr_init(dev);
14666 
14667 	for_each_intel_encoder(dev, encoder) {
14668 		encoder->base.possible_crtcs = encoder->crtc_mask;
14669 		encoder->base.possible_clones =
14670 			intel_encoder_clones(encoder);
14671 	}
14672 
14673 	intel_init_pch_refclk(dev);
14674 
14675 	drm_helper_move_panel_connectors_to_head(dev);
14676 }
14677 
14678 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14679 {
14680 	struct drm_device *dev = fb->dev;
14681 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14682 
14683 	drm_framebuffer_cleanup(fb);
14684 	mutex_lock(&dev->struct_mutex);
14685 	WARN_ON(!intel_fb->obj->framebuffer_references--);
14686 	drm_gem_object_unreference(&intel_fb->obj->base);
14687 	mutex_unlock(&dev->struct_mutex);
14688 	kfree(intel_fb);
14689 }
14690 
14691 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14692 						struct drm_file *file,
14693 						unsigned int *handle)
14694 {
14695 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14696 	struct drm_i915_gem_object *obj = intel_fb->obj;
14697 
14698 	if (obj->userptr.mm) {
14699 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14700 		return -EINVAL;
14701 	}
14702 
14703 	return drm_gem_handle_create(file, &obj->base, handle);
14704 }
14705 
14706 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14707 					struct drm_file *file,
14708 					unsigned flags, unsigned color,
14709 					struct drm_clip_rect *clips,
14710 					unsigned num_clips)
14711 {
14712 	struct drm_device *dev = fb->dev;
14713 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14714 	struct drm_i915_gem_object *obj = intel_fb->obj;
14715 
14716 	mutex_lock(&dev->struct_mutex);
14717 	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14718 	mutex_unlock(&dev->struct_mutex);
14719 
14720 	return 0;
14721 }
14722 
14723 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14724 	.destroy = intel_user_framebuffer_destroy,
14725 	.create_handle = intel_user_framebuffer_create_handle,
14726 	.dirty = intel_user_framebuffer_dirty,
14727 };
14728 
14729 static
14730 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14731 			 uint32_t pixel_format)
14732 {
14733 	u32 gen = INTEL_INFO(dev)->gen;
14734 
14735 	if (gen >= 9) {
14736 		int cpp = drm_format_plane_cpp(pixel_format, 0);
14737 
14738 		/* "The stride in bytes must not exceed the of the size of 8K
14739 		 *  pixels and 32K bytes."
14740 		 */
14741 		return min(8192 * cpp, 32768);
14742 	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14743 		return 32*1024;
14744 	} else if (gen >= 4) {
14745 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14746 			return 16*1024;
14747 		else
14748 			return 32*1024;
14749 	} else if (gen >= 3) {
14750 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14751 			return 8*1024;
14752 		else
14753 			return 16*1024;
14754 	} else {
14755 		/* XXX DSPC is limited to 4k tiled */
14756 		return 8*1024;
14757 	}
14758 }
14759 
14760 static int intel_framebuffer_init(struct drm_device *dev,
14761 				  struct intel_framebuffer *intel_fb,
14762 				  struct drm_mode_fb_cmd2 *mode_cmd,
14763 				  struct drm_i915_gem_object *obj)
14764 {
14765 	struct drm_i915_private *dev_priv = to_i915(dev);
14766 	unsigned int aligned_height;
14767 	int ret;
14768 	u32 pitch_limit, stride_alignment;
14769 
14770 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14771 
14772 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14773 		/* Enforce that fb modifier and tiling mode match, but only for
14774 		 * X-tiled. This is needed for FBC. */
14775 		if (!!(obj->tiling_mode == I915_TILING_X) !=
14776 		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14777 			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14778 			return -EINVAL;
14779 		}
14780 	} else {
14781 		if (obj->tiling_mode == I915_TILING_X)
14782 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14783 		else if (obj->tiling_mode == I915_TILING_Y) {
14784 			DRM_DEBUG("No Y tiling for legacy addfb\n");
14785 			return -EINVAL;
14786 		}
14787 	}
14788 
14789 	/* Passed in modifier sanity checking. */
14790 	switch (mode_cmd->modifier[0]) {
14791 	case I915_FORMAT_MOD_Y_TILED:
14792 	case I915_FORMAT_MOD_Yf_TILED:
14793 		if (INTEL_INFO(dev)->gen < 9) {
14794 			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14795 				  mode_cmd->modifier[0]);
14796 			return -EINVAL;
14797 		}
14798 	case DRM_FORMAT_MOD_NONE:
14799 	case I915_FORMAT_MOD_X_TILED:
14800 		break;
14801 	default:
14802 		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14803 			  mode_cmd->modifier[0]);
14804 		return -EINVAL;
14805 	}
14806 
14807 	stride_alignment = intel_fb_stride_alignment(dev_priv,
14808 						     mode_cmd->modifier[0],
14809 						     mode_cmd->pixel_format);
14810 	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14811 		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14812 			  mode_cmd->pitches[0], stride_alignment);
14813 		return -EINVAL;
14814 	}
14815 
14816 	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14817 					   mode_cmd->pixel_format);
14818 	if (mode_cmd->pitches[0] > pitch_limit) {
14819 		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14820 			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14821 			  "tiled" : "linear",
14822 			  mode_cmd->pitches[0], pitch_limit);
14823 		return -EINVAL;
14824 	}
14825 
14826 	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14827 	    mode_cmd->pitches[0] != obj->stride) {
14828 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14829 			  mode_cmd->pitches[0], obj->stride);
14830 		return -EINVAL;
14831 	}
14832 
14833 	/* Reject formats not supported by any plane early. */
14834 	switch (mode_cmd->pixel_format) {
14835 	case DRM_FORMAT_C8:
14836 	case DRM_FORMAT_RGB565:
14837 	case DRM_FORMAT_XRGB8888:
14838 	case DRM_FORMAT_ARGB8888:
14839 		break;
14840 	case DRM_FORMAT_XRGB1555:
14841 		if (INTEL_INFO(dev)->gen > 3) {
14842 			DRM_DEBUG("unsupported pixel format: %s\n",
14843 				  drm_get_format_name(mode_cmd->pixel_format));
14844 			return -EINVAL;
14845 		}
14846 		break;
14847 	case DRM_FORMAT_ABGR8888:
14848 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14849 		    INTEL_INFO(dev)->gen < 9) {
14850 			DRM_DEBUG("unsupported pixel format: %s\n",
14851 				  drm_get_format_name(mode_cmd->pixel_format));
14852 			return -EINVAL;
14853 		}
14854 		break;
14855 	case DRM_FORMAT_XBGR8888:
14856 	case DRM_FORMAT_XRGB2101010:
14857 	case DRM_FORMAT_XBGR2101010:
14858 		if (INTEL_INFO(dev)->gen < 4) {
14859 			DRM_DEBUG("unsupported pixel format: %s\n",
14860 				  drm_get_format_name(mode_cmd->pixel_format));
14861 			return -EINVAL;
14862 		}
14863 		break;
14864 	case DRM_FORMAT_ABGR2101010:
14865 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14866 			DRM_DEBUG("unsupported pixel format: %s\n",
14867 				  drm_get_format_name(mode_cmd->pixel_format));
14868 			return -EINVAL;
14869 		}
14870 		break;
14871 	case DRM_FORMAT_YUYV:
14872 	case DRM_FORMAT_UYVY:
14873 	case DRM_FORMAT_YVYU:
14874 	case DRM_FORMAT_VYUY:
14875 		if (INTEL_INFO(dev)->gen < 5) {
14876 			DRM_DEBUG("unsupported pixel format: %s\n",
14877 				  drm_get_format_name(mode_cmd->pixel_format));
14878 			return -EINVAL;
14879 		}
14880 		break;
14881 	default:
14882 		DRM_DEBUG("unsupported pixel format: %s\n",
14883 			  drm_get_format_name(mode_cmd->pixel_format));
14884 		return -EINVAL;
14885 	}
14886 
14887 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14888 	if (mode_cmd->offsets[0] != 0)
14889 		return -EINVAL;
14890 
14891 	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14892 					       mode_cmd->pixel_format,
14893 					       mode_cmd->modifier[0]);
14894 	/* FIXME drm helper for size checks (especially planar formats)? */
14895 	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14896 		return -EINVAL;
14897 
14898 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14899 	intel_fb->obj = obj;
14900 
14901 	intel_fill_fb_info(dev_priv, &intel_fb->base);
14902 
14903 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14904 	if (ret) {
14905 		DRM_ERROR("framebuffer init failed %d\n", ret);
14906 		return ret;
14907 	}
14908 
14909 	intel_fb->obj->framebuffer_references++;
14910 
14911 	return 0;
14912 }
14913 
14914 static struct drm_framebuffer *
14915 intel_user_framebuffer_create(struct drm_device *dev,
14916 			      struct drm_file *filp,
14917 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
14918 {
14919 	struct drm_framebuffer *fb;
14920 	struct drm_i915_gem_object *obj;
14921 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14922 
14923 	obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
14924 	if (&obj->base == NULL)
14925 		return ERR_PTR(-ENOENT);
14926 
14927 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14928 	if (IS_ERR(fb))
14929 		drm_gem_object_unreference_unlocked(&obj->base);
14930 
14931 	return fb;
14932 }
14933 
14934 #ifndef CONFIG_DRM_FBDEV_EMULATION
14935 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14936 {
14937 }
14938 #endif
14939 
14940 static const struct drm_mode_config_funcs intel_mode_funcs = {
14941 	.fb_create = intel_user_framebuffer_create,
14942 	.output_poll_changed = intel_fbdev_output_poll_changed,
14943 	.atomic_check = intel_atomic_check,
14944 	.atomic_commit = intel_atomic_commit,
14945 	.atomic_state_alloc = intel_atomic_state_alloc,
14946 	.atomic_state_clear = intel_atomic_state_clear,
14947 };
14948 
14949 /**
14950  * intel_init_display_hooks - initialize the display modesetting hooks
14951  * @dev_priv: device private
14952  */
14953 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14954 {
14955 	if (INTEL_INFO(dev_priv)->gen >= 9) {
14956 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14957 		dev_priv->display.get_initial_plane_config =
14958 			skylake_get_initial_plane_config;
14959 		dev_priv->display.crtc_compute_clock =
14960 			haswell_crtc_compute_clock;
14961 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14962 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14963 	} else if (HAS_DDI(dev_priv)) {
14964 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14965 		dev_priv->display.get_initial_plane_config =
14966 			ironlake_get_initial_plane_config;
14967 		dev_priv->display.crtc_compute_clock =
14968 			haswell_crtc_compute_clock;
14969 		dev_priv->display.crtc_enable = haswell_crtc_enable;
14970 		dev_priv->display.crtc_disable = haswell_crtc_disable;
14971 	} else if (HAS_PCH_SPLIT(dev_priv)) {
14972 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14973 		dev_priv->display.get_initial_plane_config =
14974 			ironlake_get_initial_plane_config;
14975 		dev_priv->display.crtc_compute_clock =
14976 			ironlake_crtc_compute_clock;
14977 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
14978 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
14979 	} else if (IS_CHERRYVIEW(dev_priv)) {
14980 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14981 		dev_priv->display.get_initial_plane_config =
14982 			i9xx_get_initial_plane_config;
14983 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14984 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14985 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14986 	} else if (IS_VALLEYVIEW(dev_priv)) {
14987 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14988 		dev_priv->display.get_initial_plane_config =
14989 			i9xx_get_initial_plane_config;
14990 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14991 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
14992 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
14993 	} else if (IS_G4X(dev_priv)) {
14994 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14995 		dev_priv->display.get_initial_plane_config =
14996 			i9xx_get_initial_plane_config;
14997 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14998 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
14999 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15000 	} else if (IS_PINEVIEW(dev_priv)) {
15001 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15002 		dev_priv->display.get_initial_plane_config =
15003 			i9xx_get_initial_plane_config;
15004 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15005 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15006 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15007 	} else if (!IS_GEN2(dev_priv)) {
15008 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15009 		dev_priv->display.get_initial_plane_config =
15010 			i9xx_get_initial_plane_config;
15011 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15012 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15013 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15014 	} else {
15015 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15016 		dev_priv->display.get_initial_plane_config =
15017 			i9xx_get_initial_plane_config;
15018 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15019 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15020 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15021 	}
15022 
15023 	/* Returns the core display clock speed */
15024 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
15025 		dev_priv->display.get_display_clock_speed =
15026 			skylake_get_display_clock_speed;
15027 	else if (IS_BROXTON(dev_priv))
15028 		dev_priv->display.get_display_clock_speed =
15029 			broxton_get_display_clock_speed;
15030 	else if (IS_BROADWELL(dev_priv))
15031 		dev_priv->display.get_display_clock_speed =
15032 			broadwell_get_display_clock_speed;
15033 	else if (IS_HASWELL(dev_priv))
15034 		dev_priv->display.get_display_clock_speed =
15035 			haswell_get_display_clock_speed;
15036 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15037 		dev_priv->display.get_display_clock_speed =
15038 			valleyview_get_display_clock_speed;
15039 	else if (IS_GEN5(dev_priv))
15040 		dev_priv->display.get_display_clock_speed =
15041 			ilk_get_display_clock_speed;
15042 	else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15043 		 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
15044 		dev_priv->display.get_display_clock_speed =
15045 			i945_get_display_clock_speed;
15046 	else if (IS_GM45(dev_priv))
15047 		dev_priv->display.get_display_clock_speed =
15048 			gm45_get_display_clock_speed;
15049 	else if (IS_CRESTLINE(dev_priv))
15050 		dev_priv->display.get_display_clock_speed =
15051 			i965gm_get_display_clock_speed;
15052 	else if (IS_PINEVIEW(dev_priv))
15053 		dev_priv->display.get_display_clock_speed =
15054 			pnv_get_display_clock_speed;
15055 	else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
15056 		dev_priv->display.get_display_clock_speed =
15057 			g33_get_display_clock_speed;
15058 	else if (IS_I915G(dev_priv))
15059 		dev_priv->display.get_display_clock_speed =
15060 			i915_get_display_clock_speed;
15061 	else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
15062 		dev_priv->display.get_display_clock_speed =
15063 			i9xx_misc_get_display_clock_speed;
15064 	else if (IS_I915GM(dev_priv))
15065 		dev_priv->display.get_display_clock_speed =
15066 			i915gm_get_display_clock_speed;
15067 	else if (IS_I865G(dev_priv))
15068 		dev_priv->display.get_display_clock_speed =
15069 			i865_get_display_clock_speed;
15070 	else if (IS_I85X(dev_priv))
15071 		dev_priv->display.get_display_clock_speed =
15072 			i85x_get_display_clock_speed;
15073 	else { /* 830 */
15074 		WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
15075 		dev_priv->display.get_display_clock_speed =
15076 			i830_get_display_clock_speed;
15077 	}
15078 
15079 	if (IS_GEN5(dev_priv)) {
15080 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15081 	} else if (IS_GEN6(dev_priv)) {
15082 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15083 	} else if (IS_IVYBRIDGE(dev_priv)) {
15084 		/* FIXME: detect B0+ stepping and use auto training */
15085 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15086 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15087 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15088 		if (IS_BROADWELL(dev_priv)) {
15089 			dev_priv->display.modeset_commit_cdclk =
15090 				broadwell_modeset_commit_cdclk;
15091 			dev_priv->display.modeset_calc_cdclk =
15092 				broadwell_modeset_calc_cdclk;
15093 		}
15094 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15095 		dev_priv->display.modeset_commit_cdclk =
15096 			valleyview_modeset_commit_cdclk;
15097 		dev_priv->display.modeset_calc_cdclk =
15098 			valleyview_modeset_calc_cdclk;
15099 	} else if (IS_BROXTON(dev_priv)) {
15100 		dev_priv->display.modeset_commit_cdclk =
15101 			broxton_modeset_commit_cdclk;
15102 		dev_priv->display.modeset_calc_cdclk =
15103 			broxton_modeset_calc_cdclk;
15104 	}
15105 
15106 	switch (INTEL_INFO(dev_priv)->gen) {
15107 	case 2:
15108 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15109 		break;
15110 
15111 	case 3:
15112 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15113 		break;
15114 
15115 	case 4:
15116 	case 5:
15117 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15118 		break;
15119 
15120 	case 6:
15121 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15122 		break;
15123 	case 7:
15124 	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15125 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15126 		break;
15127 	case 9:
15128 		/* Drop through - unsupported since execlist only. */
15129 	default:
15130 		/* Default just returns -ENODEV to indicate unsupported */
15131 		dev_priv->display.queue_flip = intel_default_queue_flip;
15132 	}
15133 }
15134 
15135 /*
15136  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15137  * resume, or other times.  This quirk makes sure that's the case for
15138  * affected systems.
15139  */
15140 static void quirk_pipea_force(struct drm_device *dev)
15141 {
15142 	struct drm_i915_private *dev_priv = dev->dev_private;
15143 
15144 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15145 	DRM_INFO("applying pipe a force quirk\n");
15146 }
15147 
15148 static void quirk_pipeb_force(struct drm_device *dev)
15149 {
15150 	struct drm_i915_private *dev_priv = dev->dev_private;
15151 
15152 	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15153 	DRM_INFO("applying pipe b force quirk\n");
15154 }
15155 
15156 /*
15157  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15158  */
15159 static void quirk_ssc_force_disable(struct drm_device *dev)
15160 {
15161 	struct drm_i915_private *dev_priv = dev->dev_private;
15162 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15163 	DRM_INFO("applying lvds SSC disable quirk\n");
15164 }
15165 
15166 /*
15167  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15168  * brightness value
15169  */
15170 static void quirk_invert_brightness(struct drm_device *dev)
15171 {
15172 	struct drm_i915_private *dev_priv = dev->dev_private;
15173 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15174 	DRM_INFO("applying inverted panel brightness quirk\n");
15175 }
15176 
15177 /* Some VBT's incorrectly indicate no backlight is present */
15178 static void quirk_backlight_present(struct drm_device *dev)
15179 {
15180 	struct drm_i915_private *dev_priv = dev->dev_private;
15181 	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15182 	DRM_INFO("applying backlight present quirk\n");
15183 }
15184 
15185 struct intel_quirk {
15186 	int device;
15187 	int subsystem_vendor;
15188 	int subsystem_device;
15189 	void (*hook)(struct drm_device *dev);
15190 };
15191 
15192 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15193 struct intel_dmi_quirk {
15194 	void (*hook)(struct drm_device *dev);
15195 	const struct dmi_system_id (*dmi_id_list)[];
15196 };
15197 
15198 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15199 {
15200 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15201 	return 1;
15202 }
15203 
15204 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15205 	{
15206 		.dmi_id_list = &(const struct dmi_system_id[]) {
15207 			{
15208 				.callback = intel_dmi_reverse_brightness,
15209 				.ident = "NCR Corporation",
15210 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15211 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15212 				},
15213 			},
15214 			{ }  /* terminating entry */
15215 		},
15216 		.hook = quirk_invert_brightness,
15217 	},
15218 };
15219 
15220 static struct intel_quirk intel_quirks[] = {
15221 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15222 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15223 
15224 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15225 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15226 
15227 	/* 830 needs to leave pipe A & dpll A up */
15228 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15229 
15230 	/* 830 needs to leave pipe B & dpll B up */
15231 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15232 
15233 	/* Lenovo U160 cannot use SSC on LVDS */
15234 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15235 
15236 	/* Sony Vaio Y cannot use SSC on LVDS */
15237 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15238 
15239 	/* Acer Aspire 5734Z must invert backlight brightness */
15240 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15241 
15242 	/* Acer/eMachines G725 */
15243 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15244 
15245 	/* Acer/eMachines e725 */
15246 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15247 
15248 	/* Acer/Packard Bell NCL20 */
15249 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15250 
15251 	/* Acer Aspire 4736Z */
15252 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15253 
15254 	/* Acer Aspire 5336 */
15255 	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15256 
15257 	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15258 	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15259 
15260 	/* Acer C720 Chromebook (Core i3 4005U) */
15261 	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15262 
15263 	/* Apple Macbook 2,1 (Core 2 T7400) */
15264 	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15265 
15266 	/* Apple Macbook 4,1 */
15267 	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15268 
15269 	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15270 	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15271 
15272 	/* HP Chromebook 14 (Celeron 2955U) */
15273 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15274 
15275 	/* Dell Chromebook 11 */
15276 	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15277 
15278 	/* Dell Chromebook 11 (2015 version) */
15279 	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15280 };
15281 
15282 static void intel_init_quirks(struct drm_device *dev)
15283 {
15284 	struct pci_dev *d = dev->pdev;
15285 	int i;
15286 
15287 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15288 		struct intel_quirk *q = &intel_quirks[i];
15289 
15290 		if (d->device == q->device &&
15291 		    (d->subsystem_vendor == q->subsystem_vendor ||
15292 		     q->subsystem_vendor == PCI_ANY_ID) &&
15293 		    (d->subsystem_device == q->subsystem_device ||
15294 		     q->subsystem_device == PCI_ANY_ID))
15295 			q->hook(dev);
15296 	}
15297 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15298 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15299 			intel_dmi_quirks[i].hook(dev);
15300 	}
15301 }
15302 
15303 /* Disable the VGA plane that we never use */
15304 static void i915_disable_vga(struct drm_device *dev)
15305 {
15306 	struct drm_i915_private *dev_priv = dev->dev_private;
15307 	u8 sr1;
15308 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15309 
15310 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15311 #if 0
15312 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15313 #endif
15314 	outb(VGA_SR_INDEX, SR01);
15315 	sr1 = inb(VGA_SR_DATA);
15316 	outb(VGA_SR_DATA, sr1 | 1 << 5);
15317 #if 0
15318 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15319 #endif
15320 	udelay(300);
15321 
15322 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15323 	POSTING_READ(vga_reg);
15324 }
15325 
15326 void intel_modeset_init_hw(struct drm_device *dev)
15327 {
15328 	struct drm_i915_private *dev_priv = dev->dev_private;
15329 
15330 	intel_update_cdclk(dev);
15331 
15332 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15333 
15334 	intel_init_clock_gating(dev);
15335 	intel_enable_gt_powersave(dev);
15336 }
15337 
15338 /*
15339  * Calculate what we think the watermarks should be for the state we've read
15340  * out of the hardware and then immediately program those watermarks so that
15341  * we ensure the hardware settings match our internal state.
15342  *
15343  * We can calculate what we think WM's should be by creating a duplicate of the
15344  * current state (which was constructed during hardware readout) and running it
15345  * through the atomic check code to calculate new watermark values in the
15346  * state object.
15347  */
15348 static void sanitize_watermarks(struct drm_device *dev)
15349 {
15350 	struct drm_i915_private *dev_priv = to_i915(dev);
15351 	struct drm_atomic_state *state;
15352 	struct drm_crtc *crtc;
15353 	struct drm_crtc_state *cstate;
15354 	struct drm_modeset_acquire_ctx ctx;
15355 	int ret;
15356 	int i;
15357 
15358 	/* Only supported on platforms that use atomic watermark design */
15359 	if (!dev_priv->display.optimize_watermarks)
15360 		return;
15361 
15362 	/*
15363 	 * We need to hold connection_mutex before calling duplicate_state so
15364 	 * that the connector loop is protected.
15365 	 */
15366 	drm_modeset_acquire_init(&ctx, 0);
15367 retry:
15368 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
15369 	if (ret == -EDEADLK) {
15370 		drm_modeset_backoff(&ctx);
15371 		goto retry;
15372 	} else if (WARN_ON(ret)) {
15373 		goto fail;
15374 	}
15375 
15376 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
15377 	if (WARN_ON(IS_ERR(state)))
15378 		goto fail;
15379 
15380 	/*
15381 	 * Hardware readout is the only time we don't want to calculate
15382 	 * intermediate watermarks (since we don't trust the current
15383 	 * watermarks).
15384 	 */
15385 	to_intel_atomic_state(state)->skip_intermediate_wm = true;
15386 
15387 	ret = intel_atomic_check(dev, state);
15388 	if (ret) {
15389 		/*
15390 		 * If we fail here, it means that the hardware appears to be
15391 		 * programmed in a way that shouldn't be possible, given our
15392 		 * understanding of watermark requirements.  This might mean a
15393 		 * mistake in the hardware readout code or a mistake in the
15394 		 * watermark calculations for a given platform.  Raise a WARN
15395 		 * so that this is noticeable.
15396 		 *
15397 		 * If this actually happens, we'll have to just leave the
15398 		 * BIOS-programmed watermarks untouched and hope for the best.
15399 		 */
15400 		WARN(true, "Could not determine valid watermarks for inherited state\n");
15401 		goto fail;
15402 	}
15403 
15404 	/* Write calculated watermark values back */
15405 	to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15406 	for_each_crtc_in_state(state, crtc, cstate, i) {
15407 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15408 
15409 		cs->wm.need_postvbl_update = true;
15410 		dev_priv->display.optimize_watermarks(cs);
15411 	}
15412 
15413 	drm_atomic_state_free(state);
15414 fail:
15415 	drm_modeset_drop_locks(&ctx);
15416 	drm_modeset_acquire_fini(&ctx);
15417 }
15418 
15419 void intel_modeset_init(struct drm_device *dev)
15420 {
15421 	struct drm_i915_private *dev_priv = to_i915(dev);
15422 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
15423 	int sprite, ret;
15424 	enum i915_pipe pipe;
15425 	struct intel_crtc *crtc;
15426 
15427 	drm_mode_config_init(dev);
15428 
15429 	dev->mode_config.min_width = 0;
15430 	dev->mode_config.min_height = 0;
15431 
15432 	dev->mode_config.preferred_depth = 24;
15433 	dev->mode_config.prefer_shadow = 1;
15434 
15435 	dev->mode_config.allow_fb_modifiers = true;
15436 
15437 	dev->mode_config.funcs = &intel_mode_funcs;
15438 
15439 	intel_init_quirks(dev);
15440 
15441 	intel_init_pm(dev);
15442 
15443 	if (INTEL_INFO(dev)->num_pipes == 0)
15444 		return;
15445 
15446 	/*
15447 	 * There may be no VBT; and if the BIOS enabled SSC we can
15448 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15449 	 * BIOS isn't using it, don't assume it will work even if the VBT
15450 	 * indicates as much.
15451 	 */
15452 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15453 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15454 					    DREF_SSC1_ENABLE);
15455 
15456 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15457 			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15458 				     bios_lvds_use_ssc ? "en" : "dis",
15459 				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15460 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15461 		}
15462 	}
15463 
15464 	if (IS_GEN2(dev)) {
15465 		dev->mode_config.max_width = 2048;
15466 		dev->mode_config.max_height = 2048;
15467 	} else if (IS_GEN3(dev)) {
15468 		dev->mode_config.max_width = 4096;
15469 		dev->mode_config.max_height = 4096;
15470 	} else {
15471 		dev->mode_config.max_width = 8192;
15472 		dev->mode_config.max_height = 8192;
15473 	}
15474 
15475 	if (IS_845G(dev) || IS_I865G(dev)) {
15476 		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15477 		dev->mode_config.cursor_height = 1023;
15478 	} else if (IS_GEN2(dev)) {
15479 		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15480 		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15481 	} else {
15482 		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15483 		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15484 	}
15485 
15486 	dev->mode_config.fb_base = ggtt->mappable_base;
15487 
15488 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
15489 		      INTEL_INFO(dev)->num_pipes,
15490 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15491 
15492 	for_each_pipe(dev_priv, pipe) {
15493 		intel_crtc_init(dev, pipe);
15494 		for_each_sprite(dev_priv, pipe, sprite) {
15495 			ret = intel_plane_init(dev, pipe, sprite);
15496 			if (ret)
15497 				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15498 					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
15499 		}
15500 	}
15501 
15502 	intel_update_czclk(dev_priv);
15503 	intel_update_rawclk(dev_priv);
15504 	intel_update_cdclk(dev);
15505 
15506 	intel_shared_dpll_init(dev);
15507 
15508 	/* Just disable it once at startup */
15509 	i915_disable_vga(dev);
15510 	intel_setup_outputs(dev);
15511 
15512 	drm_modeset_lock_all(dev);
15513 	intel_modeset_setup_hw_state(dev);
15514 	drm_modeset_unlock_all(dev);
15515 
15516 	for_each_intel_crtc(dev, crtc) {
15517 		struct intel_initial_plane_config plane_config = {};
15518 
15519 		if (!crtc->active)
15520 			continue;
15521 
15522 		/*
15523 		 * Note that reserving the BIOS fb up front prevents us
15524 		 * from stuffing other stolen allocations like the ring
15525 		 * on top.  This prevents some ugliness at boot time, and
15526 		 * can even allow for smooth boot transitions if the BIOS
15527 		 * fb is large enough for the active pipe configuration.
15528 		 */
15529 		dev_priv->display.get_initial_plane_config(crtc,
15530 							   &plane_config);
15531 
15532 		/*
15533 		 * If the fb is shared between multiple heads, we'll
15534 		 * just get the first one.
15535 		 */
15536 		intel_find_initial_plane_obj(crtc, &plane_config);
15537 	}
15538 
15539 	/*
15540 	 * Make sure hardware watermarks really match the state we read out.
15541 	 * Note that we need to do this after reconstructing the BIOS fb's
15542 	 * since the watermark calculation done here will use pstate->fb.
15543 	 */
15544 	sanitize_watermarks(dev);
15545 }
15546 
15547 static void intel_enable_pipe_a(struct drm_device *dev)
15548 {
15549 	struct intel_connector *connector;
15550 	struct drm_connector *crt = NULL;
15551 	struct intel_load_detect_pipe load_detect_temp;
15552 	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15553 
15554 	/* We can't just switch on the pipe A, we need to set things up with a
15555 	 * proper mode and output configuration. As a gross hack, enable pipe A
15556 	 * by enabling the load detect pipe once. */
15557 	for_each_intel_connector(dev, connector) {
15558 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15559 			crt = &connector->base;
15560 			break;
15561 		}
15562 	}
15563 
15564 	if (!crt)
15565 		return;
15566 
15567 	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15568 		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15569 }
15570 
15571 static bool
15572 intel_check_plane_mapping(struct intel_crtc *crtc)
15573 {
15574 	struct drm_device *dev = crtc->base.dev;
15575 	struct drm_i915_private *dev_priv = dev->dev_private;
15576 	u32 val;
15577 
15578 	if (INTEL_INFO(dev)->num_pipes == 1)
15579 		return true;
15580 
15581 	val = I915_READ(DSPCNTR(!crtc->plane));
15582 
15583 	if ((val & DISPLAY_PLANE_ENABLE) &&
15584 	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15585 		return false;
15586 
15587 	return true;
15588 }
15589 
15590 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15591 {
15592 	struct drm_device *dev = crtc->base.dev;
15593 	struct intel_encoder *encoder;
15594 
15595 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15596 		return true;
15597 
15598 	return false;
15599 }
15600 
15601 static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15602 {
15603 	struct drm_device *dev = encoder->base.dev;
15604 	struct intel_connector *connector;
15605 
15606 	for_each_connector_on_encoder(dev, &encoder->base, connector)
15607 		return true;
15608 
15609 	return false;
15610 }
15611 
15612 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15613 {
15614 	struct drm_device *dev = crtc->base.dev;
15615 	struct drm_i915_private *dev_priv = dev->dev_private;
15616 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15617 
15618 	/* Clear any frame start delays used for debugging left by the BIOS */
15619 	if (!transcoder_is_dsi(cpu_transcoder)) {
15620 		i915_reg_t reg = PIPECONF(cpu_transcoder);
15621 
15622 		I915_WRITE(reg,
15623 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15624 	}
15625 
15626 	/* restore vblank interrupts to correct state */
15627 	drm_crtc_vblank_reset(&crtc->base);
15628 	if (crtc->active) {
15629 		struct intel_plane *plane;
15630 
15631 		drm_crtc_vblank_on(&crtc->base);
15632 
15633 		/* Disable everything but the primary plane */
15634 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15635 			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15636 				continue;
15637 
15638 			plane->disable_plane(&plane->base, &crtc->base);
15639 		}
15640 	}
15641 
15642 	/* We need to sanitize the plane -> pipe mapping first because this will
15643 	 * disable the crtc (and hence change the state) if it is wrong. Note
15644 	 * that gen4+ has a fixed plane -> pipe mapping.  */
15645 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15646 		bool plane;
15647 
15648 		DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15649 			      crtc->base.base.id);
15650 
15651 		/* Pipe has the wrong plane attached and the plane is active.
15652 		 * Temporarily change the plane mapping and disable everything
15653 		 * ...  */
15654 		plane = crtc->plane;
15655 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15656 		crtc->plane = !plane;
15657 		intel_crtc_disable_noatomic(&crtc->base);
15658 		crtc->plane = plane;
15659 	}
15660 
15661 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15662 	    crtc->pipe == PIPE_A && !crtc->active) {
15663 		/* BIOS forgot to enable pipe A, this mostly happens after
15664 		 * resume. Force-enable the pipe to fix this, the update_dpms
15665 		 * call below we restore the pipe to the right state, but leave
15666 		 * the required bits on. */
15667 		intel_enable_pipe_a(dev);
15668 	}
15669 
15670 	/* Adjust the state of the output pipe according to whether we
15671 	 * have active connectors/encoders. */
15672 	if (crtc->active && !intel_crtc_has_encoders(crtc))
15673 		intel_crtc_disable_noatomic(&crtc->base);
15674 
15675 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15676 		/*
15677 		 * We start out with underrun reporting disabled to avoid races.
15678 		 * For correct bookkeeping mark this on active crtcs.
15679 		 *
15680 		 * Also on gmch platforms we dont have any hardware bits to
15681 		 * disable the underrun reporting. Which means we need to start
15682 		 * out with underrun reporting disabled also on inactive pipes,
15683 		 * since otherwise we'll complain about the garbage we read when
15684 		 * e.g. coming up after runtime pm.
15685 		 *
15686 		 * No protection against concurrent access is required - at
15687 		 * worst a fifo underrun happens which also sets this to false.
15688 		 */
15689 		crtc->cpu_fifo_underrun_disabled = true;
15690 		crtc->pch_fifo_underrun_disabled = true;
15691 	}
15692 }
15693 
15694 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15695 {
15696 	struct intel_connector *connector;
15697 	struct drm_device *dev = encoder->base.dev;
15698 
15699 	/* We need to check both for a crtc link (meaning that the
15700 	 * encoder is active and trying to read from a pipe) and the
15701 	 * pipe itself being active. */
15702 	bool has_active_crtc = encoder->base.crtc &&
15703 		to_intel_crtc(encoder->base.crtc)->active;
15704 
15705 	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15706 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15707 			      encoder->base.base.id,
15708 			      encoder->base.name);
15709 
15710 		/* Connector is active, but has no active pipe. This is
15711 		 * fallout from our resume register restoring. Disable
15712 		 * the encoder manually again. */
15713 		if (encoder->base.crtc) {
15714 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15715 				      encoder->base.base.id,
15716 				      encoder->base.name);
15717 			encoder->disable(encoder);
15718 			if (encoder->post_disable)
15719 				encoder->post_disable(encoder);
15720 		}
15721 		encoder->base.crtc = NULL;
15722 
15723 		/* Inconsistent output/port/pipe state happens presumably due to
15724 		 * a bug in one of the get_hw_state functions. Or someplace else
15725 		 * in our code, like the register restore mess on resume. Clamp
15726 		 * things to off as a safer default. */
15727 		for_each_intel_connector(dev, connector) {
15728 			if (connector->encoder != encoder)
15729 				continue;
15730 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15731 			connector->base.encoder = NULL;
15732 		}
15733 	}
15734 	/* Enabled encoders without active connectors will be fixed in
15735 	 * the crtc fixup. */
15736 }
15737 
15738 void i915_redisable_vga_power_on(struct drm_device *dev)
15739 {
15740 	struct drm_i915_private *dev_priv = dev->dev_private;
15741 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15742 
15743 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15744 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15745 		i915_disable_vga(dev);
15746 	}
15747 }
15748 
15749 void i915_redisable_vga(struct drm_device *dev)
15750 {
15751 	struct drm_i915_private *dev_priv = dev->dev_private;
15752 
15753 	/* This function can be called both from intel_modeset_setup_hw_state or
15754 	 * at a very early point in our resume sequence, where the power well
15755 	 * structures are not yet restored. Since this function is at a very
15756 	 * paranoid "someone might have enabled VGA while we were not looking"
15757 	 * level, just check if the power well is enabled instead of trying to
15758 	 * follow the "don't touch the power well if we don't need it" policy
15759 	 * the rest of the driver uses. */
15760 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15761 		return;
15762 
15763 	i915_redisable_vga_power_on(dev);
15764 
15765 	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15766 }
15767 
15768 static bool primary_get_hw_state(struct intel_plane *plane)
15769 {
15770 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15771 
15772 	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15773 }
15774 
15775 /* FIXME read out full plane state for all planes */
15776 static void readout_plane_state(struct intel_crtc *crtc)
15777 {
15778 	struct drm_plane *primary = crtc->base.primary;
15779 	struct intel_plane_state *plane_state =
15780 		to_intel_plane_state(primary->state);
15781 
15782 	plane_state->visible = crtc->active &&
15783 		primary_get_hw_state(to_intel_plane(primary));
15784 
15785 	if (plane_state->visible)
15786 		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15787 }
15788 
15789 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15790 {
15791 	struct drm_i915_private *dev_priv = dev->dev_private;
15792 	enum i915_pipe pipe;
15793 	struct intel_crtc *crtc;
15794 	struct intel_encoder *encoder;
15795 	struct intel_connector *connector;
15796 	int i;
15797 
15798 	dev_priv->active_crtcs = 0;
15799 
15800 	for_each_intel_crtc(dev, crtc) {
15801 		struct intel_crtc_state *crtc_state = crtc->config;
15802 		int pixclk = 0;
15803 
15804 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
15805 		memset(crtc_state, 0, sizeof(*crtc_state));
15806 		crtc_state->base.crtc = &crtc->base;
15807 
15808 		crtc_state->base.active = crtc_state->base.enable =
15809 			dev_priv->display.get_pipe_config(crtc, crtc_state);
15810 
15811 		crtc->base.enabled = crtc_state->base.enable;
15812 		crtc->active = crtc_state->base.active;
15813 
15814 		if (crtc_state->base.active) {
15815 			dev_priv->active_crtcs |= 1 << crtc->pipe;
15816 
15817 			if (IS_BROADWELL(dev_priv)) {
15818 				pixclk = ilk_pipe_pixel_rate(crtc_state);
15819 
15820 				/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15821 				if (crtc_state->ips_enabled)
15822 					pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15823 			} else if (IS_VALLEYVIEW(dev_priv) ||
15824 				   IS_CHERRYVIEW(dev_priv) ||
15825 				   IS_BROXTON(dev_priv))
15826 				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15827 			else
15828 				WARN_ON(dev_priv->display.modeset_calc_cdclk);
15829 		}
15830 
15831 		dev_priv->min_pixclk[crtc->pipe] = pixclk;
15832 
15833 		readout_plane_state(crtc);
15834 
15835 		DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15836 			      crtc->base.base.id,
15837 			      crtc->active ? "enabled" : "disabled");
15838 	}
15839 
15840 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15841 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15842 
15843 		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15844 						  &pll->config.hw_state);
15845 		pll->config.crtc_mask = 0;
15846 		for_each_intel_crtc(dev, crtc) {
15847 			if (crtc->active && crtc->config->shared_dpll == pll)
15848 				pll->config.crtc_mask |= 1 << crtc->pipe;
15849 		}
15850 		pll->active_mask = pll->config.crtc_mask;
15851 
15852 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15853 			      pll->name, pll->config.crtc_mask, pll->on);
15854 	}
15855 
15856 	for_each_intel_encoder(dev, encoder) {
15857 		pipe = 0;
15858 
15859 		if (encoder->get_hw_state(encoder, &pipe)) {
15860 			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15861 			encoder->base.crtc = &crtc->base;
15862 			encoder->get_config(encoder, crtc->config);
15863 		} else {
15864 			encoder->base.crtc = NULL;
15865 		}
15866 
15867 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15868 			      encoder->base.base.id,
15869 			      encoder->base.name,
15870 			      encoder->base.crtc ? "enabled" : "disabled",
15871 			      pipe_name(pipe));
15872 	}
15873 
15874 	for_each_intel_connector(dev, connector) {
15875 		if (connector->get_hw_state(connector)) {
15876 			connector->base.dpms = DRM_MODE_DPMS_ON;
15877 
15878 			encoder = connector->encoder;
15879 			connector->base.encoder = &encoder->base;
15880 
15881 			if (encoder->base.crtc &&
15882 			    encoder->base.crtc->state->active) {
15883 				/*
15884 				 * This has to be done during hardware readout
15885 				 * because anything calling .crtc_disable may
15886 				 * rely on the connector_mask being accurate.
15887 				 */
15888 				encoder->base.crtc->state->connector_mask |=
15889 					1 << drm_connector_index(&connector->base);
15890 				encoder->base.crtc->state->encoder_mask |=
15891 					1 << drm_encoder_index(&encoder->base);
15892 			}
15893 
15894 		} else {
15895 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15896 			connector->base.encoder = NULL;
15897 		}
15898 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15899 			      connector->base.base.id,
15900 			      connector->base.name,
15901 			      connector->base.encoder ? "enabled" : "disabled");
15902 	}
15903 
15904 	for_each_intel_crtc(dev, crtc) {
15905 		crtc->base.hwmode = crtc->config->base.adjusted_mode;
15906 
15907 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15908 		if (crtc->base.state->active) {
15909 			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15910 			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15911 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15912 
15913 			/*
15914 			 * The initial mode needs to be set in order to keep
15915 			 * the atomic core happy. It wants a valid mode if the
15916 			 * crtc's enabled, so we do the above call.
15917 			 *
15918 			 * At this point some state updated by the connectors
15919 			 * in their ->detect() callback has not run yet, so
15920 			 * no recalculation can be done yet.
15921 			 *
15922 			 * Even if we could do a recalculation and modeset
15923 			 * right now it would cause a double modeset if
15924 			 * fbdev or userspace chooses a different initial mode.
15925 			 *
15926 			 * If that happens, someone indicated they wanted a
15927 			 * mode change, which means it's safe to do a full
15928 			 * recalculation.
15929 			 */
15930 			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15931 
15932 			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15933 			update_scanline_offset(crtc);
15934 		}
15935 
15936 		intel_pipe_config_sanity_check(dev_priv, crtc->config);
15937 	}
15938 }
15939 
15940 /* Scan out the current hw modeset state,
15941  * and sanitizes it to the current state
15942  */
15943 static void
15944 intel_modeset_setup_hw_state(struct drm_device *dev)
15945 {
15946 	struct drm_i915_private *dev_priv = dev->dev_private;
15947 	enum i915_pipe pipe;
15948 	struct intel_crtc *crtc;
15949 	struct intel_encoder *encoder;
15950 	int i;
15951 
15952 	intel_modeset_readout_hw_state(dev);
15953 
15954 	/* HW state is read out, now we need to sanitize this mess. */
15955 	for_each_intel_encoder(dev, encoder) {
15956 		intel_sanitize_encoder(encoder);
15957 	}
15958 
15959 	for_each_pipe(dev_priv, pipe) {
15960 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15961 		intel_sanitize_crtc(crtc);
15962 		intel_dump_pipe_config(crtc, crtc->config,
15963 				       "[setup_hw_state]");
15964 	}
15965 
15966 	intel_modeset_update_connector_atomic_state(dev);
15967 
15968 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15969 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15970 
15971 		if (!pll->on || pll->active_mask)
15972 			continue;
15973 
15974 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15975 
15976 		pll->funcs.disable(dev_priv, pll);
15977 		pll->on = false;
15978 	}
15979 
15980 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15981 		vlv_wm_get_hw_state(dev);
15982 	else if (IS_GEN9(dev))
15983 		skl_wm_get_hw_state(dev);
15984 	else if (HAS_PCH_SPLIT(dev))
15985 		ilk_wm_get_hw_state(dev);
15986 
15987 	for_each_intel_crtc(dev, crtc) {
15988 		unsigned long put_domains;
15989 
15990 		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15991 		if (WARN_ON(put_domains))
15992 			modeset_put_power_domains(dev_priv, put_domains);
15993 	}
15994 	intel_display_set_init_power(dev_priv, false);
15995 
15996 	intel_fbc_init_pipe_state(dev_priv);
15997 }
15998 
15999 void intel_display_resume(struct drm_device *dev)
16000 {
16001 	struct drm_i915_private *dev_priv = to_i915(dev);
16002 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16003 	struct drm_modeset_acquire_ctx ctx;
16004 	int ret;
16005 	bool setup = false;
16006 
16007 	dev_priv->modeset_restore_state = NULL;
16008 
16009 	/*
16010 	 * This is a cludge because with real atomic modeset mode_config.mutex
16011 	 * won't be taken. Unfortunately some probed state like
16012 	 * audio_codec_enable is still protected by mode_config.mutex, so lock
16013 	 * it here for now.
16014 	 */
16015 	mutex_lock(&dev->mode_config.mutex);
16016 	drm_modeset_acquire_init(&ctx, 0);
16017 
16018 retry:
16019 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
16020 
16021 	if (ret == 0 && !setup) {
16022 		setup = true;
16023 
16024 		intel_modeset_setup_hw_state(dev);
16025 		i915_redisable_vga(dev);
16026 	}
16027 
16028 	if (ret == 0 && state) {
16029 		struct drm_crtc_state *crtc_state;
16030 		struct drm_crtc *crtc;
16031 		int i;
16032 
16033 		state->acquire_ctx = &ctx;
16034 
16035 		/* ignore any reset values/BIOS leftovers in the WM registers */
16036 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
16037 
16038 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
16039 			/*
16040 			 * Force recalculation even if we restore
16041 			 * current state. With fast modeset this may not result
16042 			 * in a modeset when the state is compatible.
16043 			 */
16044 			crtc_state->mode_changed = true;
16045 		}
16046 
16047 		ret = drm_atomic_commit(state);
16048 	}
16049 
16050 	if (ret == -EDEADLK) {
16051 		drm_modeset_backoff(&ctx);
16052 		goto retry;
16053 	}
16054 
16055 	drm_modeset_drop_locks(&ctx);
16056 	drm_modeset_acquire_fini(&ctx);
16057 	mutex_unlock(&dev->mode_config.mutex);
16058 
16059 	if (ret) {
16060 		DRM_ERROR("Restoring old state failed with %i\n", ret);
16061 		drm_atomic_state_free(state);
16062 	}
16063 }
16064 
16065 void intel_modeset_gem_init(struct drm_device *dev)
16066 {
16067 	struct drm_crtc *c;
16068 	struct drm_i915_gem_object *obj;
16069 	int ret;
16070 
16071 	intel_init_gt_powersave(dev);
16072 
16073 	intel_modeset_init_hw(dev);
16074 
16075 	intel_setup_overlay(dev);
16076 
16077 	/*
16078 	 * Make sure any fbs we allocated at startup are properly
16079 	 * pinned & fenced.  When we do the allocation it's too early
16080 	 * for this.
16081 	 */
16082 	for_each_crtc(dev, c) {
16083 		obj = intel_fb_obj(c->primary->fb);
16084 		if (obj == NULL)
16085 			continue;
16086 
16087 		mutex_lock(&dev->struct_mutex);
16088 		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16089 						 c->primary->state->rotation);
16090 		mutex_unlock(&dev->struct_mutex);
16091 		if (ret) {
16092 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
16093 				  to_intel_crtc(c)->pipe);
16094 			drm_framebuffer_unreference(c->primary->fb);
16095 			c->primary->fb = NULL;
16096 			c->primary->crtc = c->primary->state->crtc = NULL;
16097 			update_state_fb(c->primary);
16098 			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16099 		}
16100 	}
16101 
16102 	intel_backlight_register(dev);
16103 }
16104 
16105 void intel_connector_unregister(struct intel_connector *intel_connector)
16106 {
16107 	struct drm_connector *connector = &intel_connector->base;
16108 
16109 	intel_panel_destroy_backlight(connector);
16110 	drm_connector_unregister(connector);
16111 }
16112 
16113 void intel_modeset_cleanup(struct drm_device *dev)
16114 {
16115 	struct drm_i915_private *dev_priv = dev->dev_private;
16116 	struct intel_connector *connector;
16117 
16118 	intel_disable_gt_powersave(dev);
16119 
16120 	intel_backlight_unregister(dev);
16121 
16122 	/*
16123 	 * Interrupts and polling as the first thing to avoid creating havoc.
16124 	 * Too much stuff here (turning of connectors, ...) would
16125 	 * experience fancy races otherwise.
16126 	 */
16127 	intel_irq_uninstall(dev_priv);
16128 
16129 	/*
16130 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16131 	 * poll handlers. Hence disable polling after hpd handling is shut down.
16132 	 */
16133 	drm_kms_helper_poll_fini(dev);
16134 
16135 	intel_unregister_dsm_handler();
16136 
16137 	intel_fbc_global_disable(dev_priv);
16138 
16139 	/* flush any delayed tasks or pending work */
16140 	flush_scheduled_work();
16141 
16142 	/* destroy the backlight and sysfs files before encoders/connectors */
16143 	for_each_intel_connector(dev, connector)
16144 		connector->unregister(connector);
16145 
16146 	drm_mode_config_cleanup(dev);
16147 
16148 	intel_cleanup_overlay(dev);
16149 
16150 	intel_cleanup_gt_powersave(dev);
16151 
16152 	intel_teardown_gmbus(dev);
16153 }
16154 
16155 /*
16156  * Return which encoder is currently attached for connector.
16157  */
16158 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16159 {
16160 	return &intel_attached_encoder(connector)->base;
16161 }
16162 
16163 void intel_connector_attach_encoder(struct intel_connector *connector,
16164 				    struct intel_encoder *encoder)
16165 {
16166 	connector->encoder = encoder;
16167 	drm_mode_connector_attach_encoder(&connector->base,
16168 					  &encoder->base);
16169 }
16170 
16171 /*
16172  * set vga decode state - true == enable VGA decode
16173  */
16174 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16175 {
16176 	struct drm_i915_private *dev_priv = dev->dev_private;
16177 	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16178 	u16 gmch_ctrl;
16179 
16180 	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16181 		DRM_ERROR("failed to read control word\n");
16182 		return -EIO;
16183 	}
16184 
16185 	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16186 		return 0;
16187 
16188 	if (state)
16189 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16190 	else
16191 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16192 
16193 	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16194 		DRM_ERROR("failed to write control word\n");
16195 		return -EIO;
16196 	}
16197 
16198 	return 0;
16199 }
16200 
16201 #if 0
16202 struct intel_display_error_state {
16203 
16204 	u32 power_well_driver;
16205 
16206 	int num_transcoders;
16207 
16208 	struct intel_cursor_error_state {
16209 		u32 control;
16210 		u32 position;
16211 		u32 base;
16212 		u32 size;
16213 	} cursor[I915_MAX_PIPES];
16214 
16215 	struct intel_pipe_error_state {
16216 		bool power_domain_on;
16217 		u32 source;
16218 		u32 stat;
16219 	} pipe[I915_MAX_PIPES];
16220 
16221 	struct intel_plane_error_state {
16222 		u32 control;
16223 		u32 stride;
16224 		u32 size;
16225 		u32 pos;
16226 		u32 addr;
16227 		u32 surface;
16228 		u32 tile_offset;
16229 	} plane[I915_MAX_PIPES];
16230 
16231 	struct intel_transcoder_error_state {
16232 		bool power_domain_on;
16233 		enum transcoder cpu_transcoder;
16234 
16235 		u32 conf;
16236 
16237 		u32 htotal;
16238 		u32 hblank;
16239 		u32 hsync;
16240 		u32 vtotal;
16241 		u32 vblank;
16242 		u32 vsync;
16243 	} transcoder[4];
16244 };
16245 
16246 struct intel_display_error_state *
16247 intel_display_capture_error_state(struct drm_device *dev)
16248 {
16249 	struct drm_i915_private *dev_priv = dev->dev_private;
16250 	struct intel_display_error_state *error;
16251 	int transcoders[] = {
16252 		TRANSCODER_A,
16253 		TRANSCODER_B,
16254 		TRANSCODER_C,
16255 		TRANSCODER_EDP,
16256 	};
16257 	int i;
16258 
16259 	if (INTEL_INFO(dev)->num_pipes == 0)
16260 		return NULL;
16261 
16262 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
16263 	if (error == NULL)
16264 		return NULL;
16265 
16266 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16267 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16268 
16269 	for_each_pipe(dev_priv, i) {
16270 		error->pipe[i].power_domain_on =
16271 			__intel_display_power_is_enabled(dev_priv,
16272 							 POWER_DOMAIN_PIPE(i));
16273 		if (!error->pipe[i].power_domain_on)
16274 			continue;
16275 
16276 		error->cursor[i].control = I915_READ(CURCNTR(i));
16277 		error->cursor[i].position = I915_READ(CURPOS(i));
16278 		error->cursor[i].base = I915_READ(CURBASE(i));
16279 
16280 		error->plane[i].control = I915_READ(DSPCNTR(i));
16281 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16282 		if (INTEL_INFO(dev)->gen <= 3) {
16283 			error->plane[i].size = I915_READ(DSPSIZE(i));
16284 			error->plane[i].pos = I915_READ(DSPPOS(i));
16285 		}
16286 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16287 			error->plane[i].addr = I915_READ(DSPADDR(i));
16288 		if (INTEL_INFO(dev)->gen >= 4) {
16289 			error->plane[i].surface = I915_READ(DSPSURF(i));
16290 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16291 		}
16292 
16293 		error->pipe[i].source = I915_READ(PIPESRC(i));
16294 
16295 		if (HAS_GMCH_DISPLAY(dev))
16296 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
16297 	}
16298 
16299 	/* Note: this does not include DSI transcoders. */
16300 	error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16301 	if (HAS_DDI(dev_priv))
16302 		error->num_transcoders++; /* Account for eDP. */
16303 
16304 	for (i = 0; i < error->num_transcoders; i++) {
16305 		enum transcoder cpu_transcoder = transcoders[i];
16306 
16307 		error->transcoder[i].power_domain_on =
16308 			__intel_display_power_is_enabled(dev_priv,
16309 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16310 		if (!error->transcoder[i].power_domain_on)
16311 			continue;
16312 
16313 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16314 
16315 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16316 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16317 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16318 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16319 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16320 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16321 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16322 	}
16323 
16324 	return error;
16325 }
16326 
16327 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16328 
16329 void
16330 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16331 				struct drm_device *dev,
16332 				struct intel_display_error_state *error)
16333 {
16334 	struct drm_i915_private *dev_priv = dev->dev_private;
16335 	int i;
16336 
16337 	if (!error)
16338 		return;
16339 
16340 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16341 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16342 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16343 			   error->power_well_driver);
16344 	for_each_pipe(dev_priv, i) {
16345 		err_printf(m, "Pipe [%d]:\n", i);
16346 		err_printf(m, "  Power: %s\n",
16347 			   onoff(error->pipe[i].power_domain_on));
16348 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16349 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16350 
16351 		err_printf(m, "Plane [%d]:\n", i);
16352 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16353 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16354 		if (INTEL_INFO(dev)->gen <= 3) {
16355 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16356 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16357 		}
16358 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16359 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16360 		if (INTEL_INFO(dev)->gen >= 4) {
16361 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16362 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16363 		}
16364 
16365 		err_printf(m, "Cursor [%d]:\n", i);
16366 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16367 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16368 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16369 	}
16370 
16371 	for (i = 0; i < error->num_transcoders; i++) {
16372 		err_printf(m, "CPU transcoder: %s\n",
16373 			   transcoder_name(error->transcoder[i].cpu_transcoder));
16374 		err_printf(m, "  Power: %s\n",
16375 			   onoff(error->transcoder[i].power_domain_on));
16376 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16377 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16378 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16379 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16380 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16381 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16382 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16383 	}
16384 }
16385 #endif
16386