xref: /dragonfly/sys/dev/drm/i915/intel_display.c (revision 1487f786)
1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_gem_dmabuf.h"
40 #include "intel_dsi.h"
41 #include "i915_trace.h"
42 #include <drm/drm_atomic.h>
43 #include <drm/drm_atomic_helper.h>
44 #include <drm/drm_dp_helper.h>
45 #include <drm/drm_crtc_helper.h>
46 #include <drm/drm_plane_helper.h>
47 #include <drm/drm_rect.h>
48 #include <linux/dma_remapping.h>
49 #include <linux/reservation.h>
50 
51 static bool is_mmio_work(struct intel_flip_work *work)
52 {
53 	return work->mmio_work.func;
54 }
55 
56 /* Primary plane formats for gen <= 3 */
57 static const uint32_t i8xx_primary_formats[] = {
58 	DRM_FORMAT_C8,
59 	DRM_FORMAT_RGB565,
60 	DRM_FORMAT_XRGB1555,
61 	DRM_FORMAT_XRGB8888,
62 };
63 
64 /* Primary plane formats for gen >= 4 */
65 static const uint32_t i965_primary_formats[] = {
66 	DRM_FORMAT_C8,
67 	DRM_FORMAT_RGB565,
68 	DRM_FORMAT_XRGB8888,
69 	DRM_FORMAT_XBGR8888,
70 	DRM_FORMAT_XRGB2101010,
71 	DRM_FORMAT_XBGR2101010,
72 };
73 
74 static const uint32_t skl_primary_formats[] = {
75 	DRM_FORMAT_C8,
76 	DRM_FORMAT_RGB565,
77 	DRM_FORMAT_XRGB8888,
78 	DRM_FORMAT_XBGR8888,
79 	DRM_FORMAT_ARGB8888,
80 	DRM_FORMAT_ABGR8888,
81 	DRM_FORMAT_XRGB2101010,
82 	DRM_FORMAT_XBGR2101010,
83 	DRM_FORMAT_YUYV,
84 	DRM_FORMAT_YVYU,
85 	DRM_FORMAT_UYVY,
86 	DRM_FORMAT_VYUY,
87 };
88 
89 /* Cursor formats */
90 static const uint32_t intel_cursor_formats[] = {
91 	DRM_FORMAT_ARGB8888,
92 };
93 
94 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
95 				struct intel_crtc_state *pipe_config);
96 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
97 				   struct intel_crtc_state *pipe_config);
98 
99 static int intel_framebuffer_init(struct drm_device *dev,
100 				  struct intel_framebuffer *ifb,
101 				  struct drm_mode_fb_cmd2 *mode_cmd,
102 				  struct drm_i915_gem_object *obj);
103 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
104 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
105 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
106 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
107 					 struct intel_link_m_n *m_n,
108 					 struct intel_link_m_n *m2_n2);
109 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
110 static void haswell_set_pipeconf(struct drm_crtc *crtc);
111 static void haswell_set_pipemisc(struct drm_crtc *crtc);
112 static void vlv_prepare_pll(struct intel_crtc *crtc,
113 			    const struct intel_crtc_state *pipe_config);
114 static void chv_prepare_pll(struct intel_crtc *crtc,
115 			    const struct intel_crtc_state *pipe_config);
116 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
117 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
118 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
119 	struct intel_crtc_state *crtc_state);
120 static void skylake_pfit_enable(struct intel_crtc *crtc);
121 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122 static void ironlake_pfit_enable(struct intel_crtc *crtc);
123 static void intel_modeset_setup_hw_state(struct drm_device *dev);
124 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125 static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126 static int bxt_calc_cdclk(int max_pixclk);
127 
128 struct intel_limit {
129 	struct {
130 		int min, max;
131 	} dot, vco, n, m, m1, m2, p, p1;
132 
133 	struct {
134 		int dot_limit;
135 		int p2_slow, p2_fast;
136 	} p2;
137 };
138 
139 /* returns HPLL frequency in kHz */
140 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
141 {
142 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
143 
144 	/* Obtain SKU information */
145 	mutex_lock(&dev_priv->sb_lock);
146 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
147 		CCK_FUSE_HPLL_FREQ_MASK;
148 	mutex_unlock(&dev_priv->sb_lock);
149 
150 	return vco_freq[hpll_freq] * 1000;
151 }
152 
153 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
154 		      const char *name, u32 reg, int ref_freq)
155 {
156 	u32 val;
157 	int divider;
158 
159 	mutex_lock(&dev_priv->sb_lock);
160 	val = vlv_cck_read(dev_priv, reg);
161 	mutex_unlock(&dev_priv->sb_lock);
162 
163 	divider = val & CCK_FREQUENCY_VALUES;
164 
165 	WARN((val & CCK_FREQUENCY_STATUS) !=
166 	     (divider << CCK_FREQUENCY_STATUS_SHIFT),
167 	     "%s change in progress\n", name);
168 
169 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
170 }
171 
172 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
173 				  const char *name, u32 reg)
174 {
175 	if (dev_priv->hpll_freq == 0)
176 		dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
177 
178 	return vlv_get_cck_clock(dev_priv, name, reg,
179 				 dev_priv->hpll_freq);
180 }
181 
182 static int
183 intel_pch_rawclk(struct drm_i915_private *dev_priv)
184 {
185 	return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
186 }
187 
188 static int
189 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
190 {
191 	/* RAWCLK_FREQ_VLV register updated from power well code */
192 	return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
193 				      CCK_DISPLAY_REF_CLOCK_CONTROL);
194 }
195 
196 static int
197 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
198 {
199 	uint32_t clkcfg;
200 
201 	/* hrawclock is 1/4 the FSB frequency */
202 	clkcfg = I915_READ(CLKCFG);
203 	switch (clkcfg & CLKCFG_FSB_MASK) {
204 	case CLKCFG_FSB_400:
205 		return 100000;
206 	case CLKCFG_FSB_533:
207 		return 133333;
208 	case CLKCFG_FSB_667:
209 		return 166667;
210 	case CLKCFG_FSB_800:
211 		return 200000;
212 	case CLKCFG_FSB_1067:
213 		return 266667;
214 	case CLKCFG_FSB_1333:
215 		return 333333;
216 	/* these two are just a guess; one of them might be right */
217 	case CLKCFG_FSB_1600:
218 	case CLKCFG_FSB_1600_ALT:
219 		return 400000;
220 	default:
221 		return 133333;
222 	}
223 }
224 
225 void intel_update_rawclk(struct drm_i915_private *dev_priv)
226 {
227 	if (HAS_PCH_SPLIT(dev_priv))
228 		dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
229 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
230 		dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
231 	else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
232 		dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
233 	else
234 		return; /* no rawclk on other platforms, or no need to know it */
235 
236 	DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
237 }
238 
239 static void intel_update_czclk(struct drm_i915_private *dev_priv)
240 {
241 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
242 		return;
243 
244 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
245 						      CCK_CZ_CLOCK_CONTROL);
246 
247 	DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
248 }
249 
250 static inline u32 /* units of 100MHz */
251 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
252 		    const struct intel_crtc_state *pipe_config)
253 {
254 	if (HAS_DDI(dev_priv))
255 		return pipe_config->port_clock; /* SPLL */
256 	else if (IS_GEN5(dev_priv))
257 		return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
258 	else
259 		return 270000;
260 }
261 
262 static const struct intel_limit intel_limits_i8xx_dac = {
263 	.dot = { .min = 25000, .max = 350000 },
264 	.vco = { .min = 908000, .max = 1512000 },
265 	.n = { .min = 2, .max = 16 },
266 	.m = { .min = 96, .max = 140 },
267 	.m1 = { .min = 18, .max = 26 },
268 	.m2 = { .min = 6, .max = 16 },
269 	.p = { .min = 4, .max = 128 },
270 	.p1 = { .min = 2, .max = 33 },
271 	.p2 = { .dot_limit = 165000,
272 		.p2_slow = 4, .p2_fast = 2 },
273 };
274 
275 static const struct intel_limit intel_limits_i8xx_dvo = {
276 	.dot = { .min = 25000, .max = 350000 },
277 	.vco = { .min = 908000, .max = 1512000 },
278 	.n = { .min = 2, .max = 16 },
279 	.m = { .min = 96, .max = 140 },
280 	.m1 = { .min = 18, .max = 26 },
281 	.m2 = { .min = 6, .max = 16 },
282 	.p = { .min = 4, .max = 128 },
283 	.p1 = { .min = 2, .max = 33 },
284 	.p2 = { .dot_limit = 165000,
285 		.p2_slow = 4, .p2_fast = 4 },
286 };
287 
288 static const struct intel_limit intel_limits_i8xx_lvds = {
289 	.dot = { .min = 25000, .max = 350000 },
290 	.vco = { .min = 908000, .max = 1512000 },
291 	.n = { .min = 2, .max = 16 },
292 	.m = { .min = 96, .max = 140 },
293 	.m1 = { .min = 18, .max = 26 },
294 	.m2 = { .min = 6, .max = 16 },
295 	.p = { .min = 4, .max = 128 },
296 	.p1 = { .min = 1, .max = 6 },
297 	.p2 = { .dot_limit = 165000,
298 		.p2_slow = 14, .p2_fast = 7 },
299 };
300 
301 static const struct intel_limit intel_limits_i9xx_sdvo = {
302 	.dot = { .min = 20000, .max = 400000 },
303 	.vco = { .min = 1400000, .max = 2800000 },
304 	.n = { .min = 1, .max = 6 },
305 	.m = { .min = 70, .max = 120 },
306 	.m1 = { .min = 8, .max = 18 },
307 	.m2 = { .min = 3, .max = 7 },
308 	.p = { .min = 5, .max = 80 },
309 	.p1 = { .min = 1, .max = 8 },
310 	.p2 = { .dot_limit = 200000,
311 		.p2_slow = 10, .p2_fast = 5 },
312 };
313 
314 static const struct intel_limit intel_limits_i9xx_lvds = {
315 	.dot = { .min = 20000, .max = 400000 },
316 	.vco = { .min = 1400000, .max = 2800000 },
317 	.n = { .min = 1, .max = 6 },
318 	.m = { .min = 70, .max = 120 },
319 	.m1 = { .min = 8, .max = 18 },
320 	.m2 = { .min = 3, .max = 7 },
321 	.p = { .min = 7, .max = 98 },
322 	.p1 = { .min = 1, .max = 8 },
323 	.p2 = { .dot_limit = 112000,
324 		.p2_slow = 14, .p2_fast = 7 },
325 };
326 
327 
328 static const struct intel_limit intel_limits_g4x_sdvo = {
329 	.dot = { .min = 25000, .max = 270000 },
330 	.vco = { .min = 1750000, .max = 3500000},
331 	.n = { .min = 1, .max = 4 },
332 	.m = { .min = 104, .max = 138 },
333 	.m1 = { .min = 17, .max = 23 },
334 	.m2 = { .min = 5, .max = 11 },
335 	.p = { .min = 10, .max = 30 },
336 	.p1 = { .min = 1, .max = 3},
337 	.p2 = { .dot_limit = 270000,
338 		.p2_slow = 10,
339 		.p2_fast = 10
340 	},
341 };
342 
343 static const struct intel_limit intel_limits_g4x_hdmi = {
344 	.dot = { .min = 22000, .max = 400000 },
345 	.vco = { .min = 1750000, .max = 3500000},
346 	.n = { .min = 1, .max = 4 },
347 	.m = { .min = 104, .max = 138 },
348 	.m1 = { .min = 16, .max = 23 },
349 	.m2 = { .min = 5, .max = 11 },
350 	.p = { .min = 5, .max = 80 },
351 	.p1 = { .min = 1, .max = 8},
352 	.p2 = { .dot_limit = 165000,
353 		.p2_slow = 10, .p2_fast = 5 },
354 };
355 
356 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
357 	.dot = { .min = 20000, .max = 115000 },
358 	.vco = { .min = 1750000, .max = 3500000 },
359 	.n = { .min = 1, .max = 3 },
360 	.m = { .min = 104, .max = 138 },
361 	.m1 = { .min = 17, .max = 23 },
362 	.m2 = { .min = 5, .max = 11 },
363 	.p = { .min = 28, .max = 112 },
364 	.p1 = { .min = 2, .max = 8 },
365 	.p2 = { .dot_limit = 0,
366 		.p2_slow = 14, .p2_fast = 14
367 	},
368 };
369 
370 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
371 	.dot = { .min = 80000, .max = 224000 },
372 	.vco = { .min = 1750000, .max = 3500000 },
373 	.n = { .min = 1, .max = 3 },
374 	.m = { .min = 104, .max = 138 },
375 	.m1 = { .min = 17, .max = 23 },
376 	.m2 = { .min = 5, .max = 11 },
377 	.p = { .min = 14, .max = 42 },
378 	.p1 = { .min = 2, .max = 6 },
379 	.p2 = { .dot_limit = 0,
380 		.p2_slow = 7, .p2_fast = 7
381 	},
382 };
383 
384 static const struct intel_limit intel_limits_pineview_sdvo = {
385 	.dot = { .min = 20000, .max = 400000},
386 	.vco = { .min = 1700000, .max = 3500000 },
387 	/* Pineview's Ncounter is a ring counter */
388 	.n = { .min = 3, .max = 6 },
389 	.m = { .min = 2, .max = 256 },
390 	/* Pineview only has one combined m divider, which we treat as m2. */
391 	.m1 = { .min = 0, .max = 0 },
392 	.m2 = { .min = 0, .max = 254 },
393 	.p = { .min = 5, .max = 80 },
394 	.p1 = { .min = 1, .max = 8 },
395 	.p2 = { .dot_limit = 200000,
396 		.p2_slow = 10, .p2_fast = 5 },
397 };
398 
399 static const struct intel_limit intel_limits_pineview_lvds = {
400 	.dot = { .min = 20000, .max = 400000 },
401 	.vco = { .min = 1700000, .max = 3500000 },
402 	.n = { .min = 3, .max = 6 },
403 	.m = { .min = 2, .max = 256 },
404 	.m1 = { .min = 0, .max = 0 },
405 	.m2 = { .min = 0, .max = 254 },
406 	.p = { .min = 7, .max = 112 },
407 	.p1 = { .min = 1, .max = 8 },
408 	.p2 = { .dot_limit = 112000,
409 		.p2_slow = 14, .p2_fast = 14 },
410 };
411 
412 /* Ironlake / Sandybridge
413  *
414  * We calculate clock using (register_value + 2) for N/M1/M2, so here
415  * the range value for them is (actual_value - 2).
416  */
417 static const struct intel_limit intel_limits_ironlake_dac = {
418 	.dot = { .min = 25000, .max = 350000 },
419 	.vco = { .min = 1760000, .max = 3510000 },
420 	.n = { .min = 1, .max = 5 },
421 	.m = { .min = 79, .max = 127 },
422 	.m1 = { .min = 12, .max = 22 },
423 	.m2 = { .min = 5, .max = 9 },
424 	.p = { .min = 5, .max = 80 },
425 	.p1 = { .min = 1, .max = 8 },
426 	.p2 = { .dot_limit = 225000,
427 		.p2_slow = 10, .p2_fast = 5 },
428 };
429 
430 static const struct intel_limit intel_limits_ironlake_single_lvds = {
431 	.dot = { .min = 25000, .max = 350000 },
432 	.vco = { .min = 1760000, .max = 3510000 },
433 	.n = { .min = 1, .max = 3 },
434 	.m = { .min = 79, .max = 118 },
435 	.m1 = { .min = 12, .max = 22 },
436 	.m2 = { .min = 5, .max = 9 },
437 	.p = { .min = 28, .max = 112 },
438 	.p1 = { .min = 2, .max = 8 },
439 	.p2 = { .dot_limit = 225000,
440 		.p2_slow = 14, .p2_fast = 14 },
441 };
442 
443 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
444 	.dot = { .min = 25000, .max = 350000 },
445 	.vco = { .min = 1760000, .max = 3510000 },
446 	.n = { .min = 1, .max = 3 },
447 	.m = { .min = 79, .max = 127 },
448 	.m1 = { .min = 12, .max = 22 },
449 	.m2 = { .min = 5, .max = 9 },
450 	.p = { .min = 14, .max = 56 },
451 	.p1 = { .min = 2, .max = 8 },
452 	.p2 = { .dot_limit = 225000,
453 		.p2_slow = 7, .p2_fast = 7 },
454 };
455 
456 /* LVDS 100mhz refclk limits. */
457 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
458 	.dot = { .min = 25000, .max = 350000 },
459 	.vco = { .min = 1760000, .max = 3510000 },
460 	.n = { .min = 1, .max = 2 },
461 	.m = { .min = 79, .max = 126 },
462 	.m1 = { .min = 12, .max = 22 },
463 	.m2 = { .min = 5, .max = 9 },
464 	.p = { .min = 28, .max = 112 },
465 	.p1 = { .min = 2, .max = 8 },
466 	.p2 = { .dot_limit = 225000,
467 		.p2_slow = 14, .p2_fast = 14 },
468 };
469 
470 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
471 	.dot = { .min = 25000, .max = 350000 },
472 	.vco = { .min = 1760000, .max = 3510000 },
473 	.n = { .min = 1, .max = 3 },
474 	.m = { .min = 79, .max = 126 },
475 	.m1 = { .min = 12, .max = 22 },
476 	.m2 = { .min = 5, .max = 9 },
477 	.p = { .min = 14, .max = 42 },
478 	.p1 = { .min = 2, .max = 6 },
479 	.p2 = { .dot_limit = 225000,
480 		.p2_slow = 7, .p2_fast = 7 },
481 };
482 
483 static const struct intel_limit intel_limits_vlv = {
484 	 /*
485 	  * These are the data rate limits (measured in fast clocks)
486 	  * since those are the strictest limits we have. The fast
487 	  * clock and actual rate limits are more relaxed, so checking
488 	  * them would make no difference.
489 	  */
490 	.dot = { .min = 25000 * 5, .max = 270000 * 5 },
491 	.vco = { .min = 4000000, .max = 6000000 },
492 	.n = { .min = 1, .max = 7 },
493 	.m1 = { .min = 2, .max = 3 },
494 	.m2 = { .min = 11, .max = 156 },
495 	.p1 = { .min = 2, .max = 3 },
496 	.p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
497 };
498 
499 static const struct intel_limit intel_limits_chv = {
500 	/*
501 	 * These are the data rate limits (measured in fast clocks)
502 	 * since those are the strictest limits we have.  The fast
503 	 * clock and actual rate limits are more relaxed, so checking
504 	 * them would make no difference.
505 	 */
506 	.dot = { .min = 25000 * 5, .max = 540000 * 5},
507 	.vco = { .min = 4800000, .max = 6480000 },
508 	.n = { .min = 1, .max = 1 },
509 	.m1 = { .min = 2, .max = 2 },
510 	.m2 = { .min = 24 << 22, .max = 175 << 22 },
511 	.p1 = { .min = 2, .max = 4 },
512 	.p2 = {	.p2_slow = 1, .p2_fast = 14 },
513 };
514 
515 static const struct intel_limit intel_limits_bxt = {
516 	/* FIXME: find real dot limits */
517 	.dot = { .min = 0, .max = INT_MAX },
518 	.vco = { .min = 4800000, .max = 6700000 },
519 	.n = { .min = 1, .max = 1 },
520 	.m1 = { .min = 2, .max = 2 },
521 	/* FIXME: find real m2 limits */
522 	.m2 = { .min = 2 << 22, .max = 255 << 22 },
523 	.p1 = { .min = 2, .max = 4 },
524 	.p2 = { .p2_slow = 1, .p2_fast = 20 },
525 };
526 
527 static bool
528 needs_modeset(struct drm_crtc_state *state)
529 {
530 	return drm_atomic_crtc_needs_modeset(state);
531 }
532 
533 /**
534  * Returns whether any output on the specified pipe is of the specified type
535  */
536 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
537 {
538 	struct drm_device *dev = crtc->base.dev;
539 	struct intel_encoder *encoder;
540 
541 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
542 		if (encoder->type == type)
543 			return true;
544 
545 	return false;
546 }
547 
548 /**
549  * Returns whether any output on the specified pipe will have the specified
550  * type after a staged modeset is complete, i.e., the same as
551  * intel_pipe_has_type() but looking at encoder->new_crtc instead of
552  * encoder->crtc.
553  */
554 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
555 				      int type)
556 {
557 	struct drm_atomic_state *state = crtc_state->base.state;
558 	struct drm_connector *connector;
559 	struct drm_connector_state *connector_state;
560 	struct intel_encoder *encoder;
561 	int i, num_connectors = 0;
562 
563 	for_each_connector_in_state(state, connector, connector_state, i) {
564 		if (connector_state->crtc != crtc_state->base.crtc)
565 			continue;
566 
567 		num_connectors++;
568 
569 		encoder = to_intel_encoder(connector_state->best_encoder);
570 		if (encoder->type == type)
571 			return true;
572 	}
573 
574 	WARN_ON(num_connectors == 0);
575 
576 	return false;
577 }
578 
579 /*
580  * Platform specific helpers to calculate the port PLL loopback- (clock.m),
581  * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
582  * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
583  * The helpers' return value is the rate of the clock that is fed to the
584  * display engine's pipe which can be the above fast dot clock rate or a
585  * divided-down version of it.
586  */
587 /* m1 is reserved as 0 in Pineview, n is a ring counter */
588 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
589 {
590 	clock->m = clock->m2 + 2;
591 	clock->p = clock->p1 * clock->p2;
592 	if (WARN_ON(clock->n == 0 || clock->p == 0))
593 		return 0;
594 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
595 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
596 
597 	return clock->dot;
598 }
599 
600 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
601 {
602 	return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
603 }
604 
605 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
606 {
607 	clock->m = i9xx_dpll_compute_m(clock);
608 	clock->p = clock->p1 * clock->p2;
609 	if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
610 		return 0;
611 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
612 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
613 
614 	return clock->dot;
615 }
616 
617 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
618 {
619 	clock->m = clock->m1 * clock->m2;
620 	clock->p = clock->p1 * clock->p2;
621 	if (WARN_ON(clock->n == 0 || clock->p == 0))
622 		return 0;
623 	clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
624 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
625 
626 	return clock->dot / 5;
627 }
628 
629 int chv_calc_dpll_params(int refclk, struct dpll *clock)
630 {
631 	clock->m = clock->m1 * clock->m2;
632 	clock->p = clock->p1 * clock->p2;
633 	if (WARN_ON(clock->n == 0 || clock->p == 0))
634 		return 0;
635 	clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
636 			clock->n << 22);
637 	clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
638 
639 	return clock->dot / 5;
640 }
641 
642 #define INTELPllInvalid(s)   do { /* DRM_DEBUG(s); */ return false; } while (0)
643 /**
644  * Returns whether the given set of divisors are valid for a given refclk with
645  * the given connectors.
646  */
647 
648 static bool intel_PLL_is_valid(struct drm_device *dev,
649 			       const struct intel_limit *limit,
650 			       const struct dpll *clock)
651 {
652 	if (clock->n   < limit->n.min   || limit->n.max   < clock->n)
653 		INTELPllInvalid("n out of range\n");
654 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
655 		INTELPllInvalid("p1 out of range\n");
656 	if (clock->m2  < limit->m2.min  || limit->m2.max  < clock->m2)
657 		INTELPllInvalid("m2 out of range\n");
658 	if (clock->m1  < limit->m1.min  || limit->m1.max  < clock->m1)
659 		INTELPllInvalid("m1 out of range\n");
660 
661 	if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
662 	    !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
663 		if (clock->m1 <= clock->m2)
664 			INTELPllInvalid("m1 <= m2\n");
665 
666 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
667 		if (clock->p < limit->p.min || limit->p.max < clock->p)
668 			INTELPllInvalid("p out of range\n");
669 		if (clock->m < limit->m.min || limit->m.max < clock->m)
670 			INTELPllInvalid("m out of range\n");
671 	}
672 
673 	if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
674 		INTELPllInvalid("vco out of range\n");
675 	/* XXX: We may need to be checking "Dot clock" depending on the multiplier,
676 	 * connector, etc., rather than just a single range.
677 	 */
678 	if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
679 		INTELPllInvalid("dot out of range\n");
680 
681 	return true;
682 }
683 
684 static int
685 i9xx_select_p2_div(const struct intel_limit *limit,
686 		   const struct intel_crtc_state *crtc_state,
687 		   int target)
688 {
689 	struct drm_device *dev = crtc_state->base.crtc->dev;
690 
691 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
692 		/*
693 		 * For LVDS just rely on its current settings for dual-channel.
694 		 * We haven't figured out how to reliably set up different
695 		 * single/dual channel state, if we even can.
696 		 */
697 		if (intel_is_dual_link_lvds(dev))
698 			return limit->p2.p2_fast;
699 		else
700 			return limit->p2.p2_slow;
701 	} else {
702 		if (target < limit->p2.dot_limit)
703 			return limit->p2.p2_slow;
704 		else
705 			return limit->p2.p2_fast;
706 	}
707 }
708 
709 /*
710  * Returns a set of divisors for the desired target clock with the given
711  * refclk, or FALSE.  The returned values represent the clock equation:
712  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
713  *
714  * Target and reference clocks are specified in kHz.
715  *
716  * If match_clock is provided, then best_clock P divider must match the P
717  * divider from @match_clock used for LVDS downclocking.
718  */
719 static bool
720 i9xx_find_best_dpll(const struct intel_limit *limit,
721 		    struct intel_crtc_state *crtc_state,
722 		    int target, int refclk, struct dpll *match_clock,
723 		    struct dpll *best_clock)
724 {
725 	struct drm_device *dev = crtc_state->base.crtc->dev;
726 	struct dpll clock;
727 	int err = target;
728 
729 	memset(best_clock, 0, sizeof(*best_clock));
730 
731 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
732 
733 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
734 	     clock.m1++) {
735 		for (clock.m2 = limit->m2.min;
736 		     clock.m2 <= limit->m2.max; clock.m2++) {
737 			if (clock.m2 >= clock.m1)
738 				break;
739 			for (clock.n = limit->n.min;
740 			     clock.n <= limit->n.max; clock.n++) {
741 				for (clock.p1 = limit->p1.min;
742 					clock.p1 <= limit->p1.max; clock.p1++) {
743 					int this_err;
744 
745 					i9xx_calc_dpll_params(refclk, &clock);
746 					if (!intel_PLL_is_valid(dev, limit,
747 								&clock))
748 						continue;
749 					if (match_clock &&
750 					    clock.p != match_clock->p)
751 						continue;
752 
753 					this_err = abs(clock.dot - target);
754 					if (this_err < err) {
755 						*best_clock = clock;
756 						err = this_err;
757 					}
758 				}
759 			}
760 		}
761 	}
762 
763 	return (err != target);
764 }
765 
766 /*
767  * Returns a set of divisors for the desired target clock with the given
768  * refclk, or FALSE.  The returned values represent the clock equation:
769  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
770  *
771  * Target and reference clocks are specified in kHz.
772  *
773  * If match_clock is provided, then best_clock P divider must match the P
774  * divider from @match_clock used for LVDS downclocking.
775  */
776 static bool
777 pnv_find_best_dpll(const struct intel_limit *limit,
778 		   struct intel_crtc_state *crtc_state,
779 		   int target, int refclk, struct dpll *match_clock,
780 		   struct dpll *best_clock)
781 {
782 	struct drm_device *dev = crtc_state->base.crtc->dev;
783 	struct dpll clock;
784 	int err = target;
785 
786 	memset(best_clock, 0, sizeof(*best_clock));
787 
788 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
789 
790 	for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
791 	     clock.m1++) {
792 		for (clock.m2 = limit->m2.min;
793 		     clock.m2 <= limit->m2.max; clock.m2++) {
794 			for (clock.n = limit->n.min;
795 			     clock.n <= limit->n.max; clock.n++) {
796 				for (clock.p1 = limit->p1.min;
797 					clock.p1 <= limit->p1.max; clock.p1++) {
798 					int this_err;
799 
800 					pnv_calc_dpll_params(refclk, &clock);
801 					if (!intel_PLL_is_valid(dev, limit,
802 								&clock))
803 						continue;
804 					if (match_clock &&
805 					    clock.p != match_clock->p)
806 						continue;
807 
808 					this_err = abs(clock.dot - target);
809 					if (this_err < err) {
810 						*best_clock = clock;
811 						err = this_err;
812 					}
813 				}
814 			}
815 		}
816 	}
817 
818 	return (err != target);
819 }
820 
821 /*
822  * Returns a set of divisors for the desired target clock with the given
823  * refclk, or FALSE.  The returned values represent the clock equation:
824  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
825  *
826  * Target and reference clocks are specified in kHz.
827  *
828  * If match_clock is provided, then best_clock P divider must match the P
829  * divider from @match_clock used for LVDS downclocking.
830  */
831 static bool
832 g4x_find_best_dpll(const struct intel_limit *limit,
833 		   struct intel_crtc_state *crtc_state,
834 		   int target, int refclk, struct dpll *match_clock,
835 		   struct dpll *best_clock)
836 {
837 	struct drm_device *dev = crtc_state->base.crtc->dev;
838 	struct dpll clock;
839 	int max_n;
840 	bool found = false;
841 	/* approximately equals target * 0.00585 */
842 	int err_most = (target >> 8) + (target >> 9);
843 
844 	memset(best_clock, 0, sizeof(*best_clock));
845 
846 	clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
847 
848 	max_n = limit->n.max;
849 	/* based on hardware requirement, prefer smaller n to precision */
850 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
851 		/* based on hardware requirement, prefere larger m1,m2 */
852 		for (clock.m1 = limit->m1.max;
853 		     clock.m1 >= limit->m1.min; clock.m1--) {
854 			for (clock.m2 = limit->m2.max;
855 			     clock.m2 >= limit->m2.min; clock.m2--) {
856 				for (clock.p1 = limit->p1.max;
857 				     clock.p1 >= limit->p1.min; clock.p1--) {
858 					int this_err;
859 
860 					i9xx_calc_dpll_params(refclk, &clock);
861 					if (!intel_PLL_is_valid(dev, limit,
862 								&clock))
863 						continue;
864 
865 					this_err = abs(clock.dot - target);
866 					if (this_err < err_most) {
867 						*best_clock = clock;
868 						err_most = this_err;
869 						max_n = clock.n;
870 						found = true;
871 					}
872 				}
873 			}
874 		}
875 	}
876 	return found;
877 }
878 
879 /*
880  * Check if the calculated PLL configuration is more optimal compared to the
881  * best configuration and error found so far. Return the calculated error.
882  */
883 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
884 			       const struct dpll *calculated_clock,
885 			       const struct dpll *best_clock,
886 			       unsigned int best_error_ppm,
887 			       unsigned int *error_ppm)
888 {
889 	/*
890 	 * For CHV ignore the error and consider only the P value.
891 	 * Prefer a bigger P value based on HW requirements.
892 	 */
893 	if (IS_CHERRYVIEW(dev)) {
894 		*error_ppm = 0;
895 
896 		return calculated_clock->p > best_clock->p;
897 	}
898 
899 	if (WARN_ON_ONCE(!target_freq))
900 		return false;
901 
902 	*error_ppm = div_u64(1000000ULL *
903 				abs(target_freq - calculated_clock->dot),
904 			     target_freq);
905 	/*
906 	 * Prefer a better P value over a better (smaller) error if the error
907 	 * is small. Ensure this preference for future configurations too by
908 	 * setting the error to 0.
909 	 */
910 	if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
911 		*error_ppm = 0;
912 
913 		return true;
914 	}
915 
916 	return *error_ppm + 10 < best_error_ppm;
917 }
918 
919 /*
920  * Returns a set of divisors for the desired target clock with the given
921  * refclk, or FALSE.  The returned values represent the clock equation:
922  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
923  */
924 static bool
925 vlv_find_best_dpll(const struct intel_limit *limit,
926 		   struct intel_crtc_state *crtc_state,
927 		   int target, int refclk, struct dpll *match_clock,
928 		   struct dpll *best_clock)
929 {
930 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
931 	struct drm_device *dev = crtc->base.dev;
932 	struct dpll clock;
933 	unsigned int bestppm = 1000000;
934 	/* min update 19.2 MHz */
935 	int max_n = min(limit->n.max, refclk / 19200);
936 	bool found = false;
937 
938 	target *= 5; /* fast clock */
939 
940 	memset(best_clock, 0, sizeof(*best_clock));
941 
942 	/* based on hardware requirement, prefer smaller n to precision */
943 	for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
944 		for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
945 			for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
946 			     clock.p2 -= clock.p2 > 10 ? 2 : 1) {
947 				clock.p = clock.p1 * clock.p2;
948 				/* based on hardware requirement, prefer bigger m1,m2 values */
949 				for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
950 					unsigned int ppm;
951 
952 					clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
953 								     refclk * clock.m1);
954 
955 					vlv_calc_dpll_params(refclk, &clock);
956 
957 					if (!intel_PLL_is_valid(dev, limit,
958 								&clock))
959 						continue;
960 
961 					if (!vlv_PLL_is_optimal(dev, target,
962 								&clock,
963 								best_clock,
964 								bestppm, &ppm))
965 						continue;
966 
967 					*best_clock = clock;
968 					bestppm = ppm;
969 					found = true;
970 				}
971 			}
972 		}
973 	}
974 
975 	return found;
976 }
977 
978 /*
979  * Returns a set of divisors for the desired target clock with the given
980  * refclk, or FALSE.  The returned values represent the clock equation:
981  * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
982  */
983 static bool
984 chv_find_best_dpll(const struct intel_limit *limit,
985 		   struct intel_crtc_state *crtc_state,
986 		   int target, int refclk, struct dpll *match_clock,
987 		   struct dpll *best_clock)
988 {
989 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
990 	struct drm_device *dev = crtc->base.dev;
991 	unsigned int best_error_ppm;
992 	struct dpll clock;
993 	uint64_t m2;
994 	int found = false;
995 
996 	memset(best_clock, 0, sizeof(*best_clock));
997 	best_error_ppm = 1000000;
998 
999 	/*
1000 	 * Based on hardware doc, the n always set to 1, and m1 always
1001 	 * set to 2.  If requires to support 200Mhz refclk, we need to
1002 	 * revisit this because n may not 1 anymore.
1003 	 */
1004 	clock.n = 1, clock.m1 = 2;
1005 	target *= 5;	/* fast clock */
1006 
1007 	for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1008 		for (clock.p2 = limit->p2.p2_fast;
1009 				clock.p2 >= limit->p2.p2_slow;
1010 				clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1011 			unsigned int error_ppm;
1012 
1013 			clock.p = clock.p1 * clock.p2;
1014 
1015 			m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1016 					clock.n) << 22, refclk * clock.m1);
1017 
1018 			if (m2 > INT_MAX/clock.m1)
1019 				continue;
1020 
1021 			clock.m2 = m2;
1022 
1023 			chv_calc_dpll_params(refclk, &clock);
1024 
1025 			if (!intel_PLL_is_valid(dev, limit, &clock))
1026 				continue;
1027 
1028 			if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1029 						best_error_ppm, &error_ppm))
1030 				continue;
1031 
1032 			*best_clock = clock;
1033 			best_error_ppm = error_ppm;
1034 			found = true;
1035 		}
1036 	}
1037 
1038 	return found;
1039 }
1040 
1041 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1042 			struct dpll *best_clock)
1043 {
1044 	int refclk = 100000;
1045 	const struct intel_limit *limit = &intel_limits_bxt;
1046 
1047 	return chv_find_best_dpll(limit, crtc_state,
1048 				  target_clock, refclk, NULL, best_clock);
1049 }
1050 
1051 bool intel_crtc_active(struct drm_crtc *crtc)
1052 {
1053 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1054 
1055 	/* Be paranoid as we can arrive here with only partial
1056 	 * state retrieved from the hardware during setup.
1057 	 *
1058 	 * We can ditch the adjusted_mode.crtc_clock check as soon
1059 	 * as Haswell has gained clock readout/fastboot support.
1060 	 *
1061 	 * We can ditch the crtc->primary->fb check as soon as we can
1062 	 * properly reconstruct framebuffers.
1063 	 *
1064 	 * FIXME: The intel_crtc->active here should be switched to
1065 	 * crtc->state->active once we have proper CRTC states wired up
1066 	 * for atomic.
1067 	 */
1068 	return intel_crtc->active && crtc->primary->state->fb &&
1069 		intel_crtc->config->base.adjusted_mode.crtc_clock;
1070 }
1071 
1072 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1073 					     enum i915_pipe pipe)
1074 {
1075 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1076 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1077 
1078 	return intel_crtc->config->cpu_transcoder;
1079 }
1080 
1081 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe)
1082 {
1083 	struct drm_i915_private *dev_priv = dev->dev_private;
1084 	i915_reg_t reg = PIPEDSL(pipe);
1085 	u32 line1, line2;
1086 	u32 line_mask;
1087 
1088 	if (IS_GEN2(dev))
1089 		line_mask = DSL_LINEMASK_GEN2;
1090 	else
1091 		line_mask = DSL_LINEMASK_GEN3;
1092 
1093 	line1 = I915_READ(reg) & line_mask;
1094 	msleep(5);
1095 	line2 = I915_READ(reg) & line_mask;
1096 
1097 	return line1 == line2;
1098 }
1099 
1100 /*
1101  * intel_wait_for_pipe_off - wait for pipe to turn off
1102  * @crtc: crtc whose pipe to wait for
1103  *
1104  * After disabling a pipe, we can't wait for vblank in the usual way,
1105  * spinning on the vblank interrupt status bit, since we won't actually
1106  * see an interrupt when the pipe is disabled.
1107  *
1108  * On Gen4 and above:
1109  *   wait for the pipe register state bit to turn off
1110  *
1111  * Otherwise:
1112  *   wait for the display line value to settle (it usually
1113  *   ends up stopping at the start of the next frame).
1114  *
1115  */
1116 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1117 {
1118 	struct drm_device *dev = crtc->base.dev;
1119 	struct drm_i915_private *dev_priv = dev->dev_private;
1120 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1121 	enum i915_pipe pipe = crtc->pipe;
1122 
1123 	if (INTEL_INFO(dev)->gen >= 4) {
1124 		i915_reg_t reg = PIPECONF(cpu_transcoder);
1125 
1126 		/* Wait for the Pipe State to go off */
1127 		if (intel_wait_for_register(dev_priv,
1128 					    reg, I965_PIPECONF_ACTIVE, 0,
1129 					    100))
1130 			WARN(1, "pipe_off wait timed out\n");
1131 	} else {
1132 		/* Wait for the display line to settle */
1133 		if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1134 			WARN(1, "pipe_off wait timed out\n");
1135 	}
1136 }
1137 
1138 /* Only for pre-ILK configs */
1139 void assert_pll(struct drm_i915_private *dev_priv,
1140 		enum i915_pipe pipe, bool state)
1141 {
1142 	u32 val;
1143 	bool cur_state;
1144 
1145 	val = I915_READ(DPLL(pipe));
1146 	cur_state = !!(val & DPLL_VCO_ENABLE);
1147 	I915_STATE_WARN(cur_state != state,
1148 	     "PLL state assertion failure (expected %s, current %s)\n",
1149 			onoff(state), onoff(cur_state));
1150 }
1151 
1152 /* XXX: the dsi pll is shared between MIPI DSI ports */
1153 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1154 {
1155 	u32 val;
1156 	bool cur_state;
1157 
1158 	mutex_lock(&dev_priv->sb_lock);
1159 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1160 	mutex_unlock(&dev_priv->sb_lock);
1161 
1162 	cur_state = val & DSI_PLL_VCO_EN;
1163 	I915_STATE_WARN(cur_state != state,
1164 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
1165 			onoff(state), onoff(cur_state));
1166 }
1167 
1168 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1169 			  enum i915_pipe pipe, bool state)
1170 {
1171 	bool cur_state;
1172 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1173 								      pipe);
1174 
1175 	if (HAS_DDI(dev_priv)) {
1176 		/* DDI does not have a specific FDI_TX register */
1177 		u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1178 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1179 	} else {
1180 		u32 val = I915_READ(FDI_TX_CTL(pipe));
1181 		cur_state = !!(val & FDI_TX_ENABLE);
1182 	}
1183 	I915_STATE_WARN(cur_state != state,
1184 	     "FDI TX state assertion failure (expected %s, current %s)\n",
1185 			onoff(state), onoff(cur_state));
1186 }
1187 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1188 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1189 
1190 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1191 			  enum i915_pipe pipe, bool state)
1192 {
1193 	u32 val;
1194 	bool cur_state;
1195 
1196 	val = I915_READ(FDI_RX_CTL(pipe));
1197 	cur_state = !!(val & FDI_RX_ENABLE);
1198 	I915_STATE_WARN(cur_state != state,
1199 	     "FDI RX state assertion failure (expected %s, current %s)\n",
1200 			onoff(state), onoff(cur_state));
1201 }
1202 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1203 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1204 
1205 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1206 				      enum i915_pipe pipe)
1207 {
1208 	u32 val;
1209 
1210 	/* ILK FDI PLL is always enabled */
1211 	if (IS_GEN5(dev_priv))
1212 		return;
1213 
1214 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
1215 	if (HAS_DDI(dev_priv))
1216 		return;
1217 
1218 	val = I915_READ(FDI_TX_CTL(pipe));
1219 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1220 }
1221 
1222 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1223 		       enum i915_pipe pipe, bool state)
1224 {
1225 	u32 val;
1226 	bool cur_state;
1227 
1228 	val = I915_READ(FDI_RX_CTL(pipe));
1229 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
1230 	I915_STATE_WARN(cur_state != state,
1231 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
1232 			onoff(state), onoff(cur_state));
1233 }
1234 
1235 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1236 			   enum i915_pipe pipe)
1237 {
1238 	struct drm_device *dev = dev_priv->dev;
1239 	i915_reg_t pp_reg;
1240 	u32 val;
1241 	enum i915_pipe panel_pipe = PIPE_A;
1242 	bool locked = true;
1243 
1244 	if (WARN_ON(HAS_DDI(dev)))
1245 		return;
1246 
1247 	if (HAS_PCH_SPLIT(dev)) {
1248 		u32 port_sel;
1249 
1250 		pp_reg = PCH_PP_CONTROL;
1251 		port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1252 
1253 		if (port_sel == PANEL_PORT_SELECT_LVDS &&
1254 		    I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1255 			panel_pipe = PIPE_B;
1256 		/* XXX: else fix for eDP */
1257 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1258 		/* presumably write lock depends on pipe, not port select */
1259 		pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1260 		panel_pipe = pipe;
1261 	} else {
1262 		pp_reg = PP_CONTROL;
1263 		if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1264 			panel_pipe = PIPE_B;
1265 	}
1266 
1267 	val = I915_READ(pp_reg);
1268 	if (!(val & PANEL_POWER_ON) ||
1269 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1270 		locked = false;
1271 
1272 	I915_STATE_WARN(panel_pipe == pipe && locked,
1273 	     "panel assertion failure, pipe %c regs locked\n",
1274 	     pipe_name(pipe));
1275 }
1276 
1277 static void assert_cursor(struct drm_i915_private *dev_priv,
1278 			  enum i915_pipe pipe, bool state)
1279 {
1280 	struct drm_device *dev = dev_priv->dev;
1281 	bool cur_state;
1282 
1283 	if (IS_845G(dev) || IS_I865G(dev))
1284 		cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1285 	else
1286 		cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1287 
1288 	I915_STATE_WARN(cur_state != state,
1289 	     "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1290 			pipe_name(pipe), onoff(state), onoff(cur_state));
1291 }
1292 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1293 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1294 
1295 void assert_pipe(struct drm_i915_private *dev_priv,
1296 		 enum i915_pipe pipe, bool state)
1297 {
1298 	bool cur_state;
1299 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1300 								      pipe);
1301 	enum intel_display_power_domain power_domain;
1302 
1303 	/* if we need the pipe quirk it must be always on */
1304 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1305 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1306 		state = true;
1307 
1308 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1309 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1310 		u32 val = I915_READ(PIPECONF(cpu_transcoder));
1311 		cur_state = !!(val & PIPECONF_ENABLE);
1312 
1313 		intel_display_power_put(dev_priv, power_domain);
1314 	} else {
1315 		cur_state = false;
1316 	}
1317 
1318 	I915_STATE_WARN(cur_state != state,
1319 	     "pipe %c assertion failure (expected %s, current %s)\n",
1320 			pipe_name(pipe), onoff(state), onoff(cur_state));
1321 }
1322 
1323 static void assert_plane(struct drm_i915_private *dev_priv,
1324 			 enum plane plane, bool state)
1325 {
1326 	u32 val;
1327 	bool cur_state;
1328 
1329 	val = I915_READ(DSPCNTR(plane));
1330 	cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1331 	I915_STATE_WARN(cur_state != state,
1332 	     "plane %c assertion failure (expected %s, current %s)\n",
1333 			plane_name(plane), onoff(state), onoff(cur_state));
1334 }
1335 
1336 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1337 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1338 
1339 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1340 				   enum i915_pipe pipe)
1341 {
1342 	struct drm_device *dev = dev_priv->dev;
1343 	int i;
1344 
1345 	/* Primary planes are fixed to pipes on gen4+ */
1346 	if (INTEL_INFO(dev)->gen >= 4) {
1347 		u32 val = I915_READ(DSPCNTR(pipe));
1348 		I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1349 		     "plane %c assertion failure, should be disabled but not\n",
1350 		     plane_name(pipe));
1351 		return;
1352 	}
1353 
1354 	/* Need to check both planes against the pipe */
1355 	for_each_pipe(dev_priv, i) {
1356 		u32 val = I915_READ(DSPCNTR(i));
1357 		enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1358 			DISPPLANE_SEL_PIPE_SHIFT;
1359 		I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1360 		     "plane %c assertion failure, should be off on pipe %c but is still active\n",
1361 		     plane_name(i), pipe_name(pipe));
1362 	}
1363 }
1364 
1365 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1366 				    enum i915_pipe pipe)
1367 {
1368 	struct drm_device *dev = dev_priv->dev;
1369 	int sprite;
1370 
1371 	if (INTEL_INFO(dev)->gen >= 9) {
1372 		for_each_sprite(dev_priv, pipe, sprite) {
1373 			u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1374 			I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1375 			     "plane %d assertion failure, should be off on pipe %c but is still active\n",
1376 			     sprite, pipe_name(pipe));
1377 		}
1378 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1379 		for_each_sprite(dev_priv, pipe, sprite) {
1380 			u32 val = I915_READ(SPCNTR(pipe, sprite));
1381 			I915_STATE_WARN(val & SP_ENABLE,
1382 			     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1383 			     sprite_name(pipe, sprite), pipe_name(pipe));
1384 		}
1385 	} else if (INTEL_INFO(dev)->gen >= 7) {
1386 		u32 val = I915_READ(SPRCTL(pipe));
1387 		I915_STATE_WARN(val & SPRITE_ENABLE,
1388 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1389 		     plane_name(pipe), pipe_name(pipe));
1390 	} else if (INTEL_INFO(dev)->gen >= 5) {
1391 		u32 val = I915_READ(DVSCNTR(pipe));
1392 		I915_STATE_WARN(val & DVS_ENABLE,
1393 		     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1394 		     plane_name(pipe), pipe_name(pipe));
1395 	}
1396 }
1397 
1398 static void assert_vblank_disabled(struct drm_crtc *crtc)
1399 {
1400 	if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1401 		drm_crtc_vblank_put(crtc);
1402 }
1403 
1404 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1405 				    enum i915_pipe pipe)
1406 {
1407 	u32 val;
1408 	bool enabled;
1409 
1410 	val = I915_READ(PCH_TRANSCONF(pipe));
1411 	enabled = !!(val & TRANS_ENABLE);
1412 	I915_STATE_WARN(enabled,
1413 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
1414 	     pipe_name(pipe));
1415 }
1416 
1417 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1418 			    enum i915_pipe pipe, u32 port_sel, u32 val)
1419 {
1420 	if ((val & DP_PORT_EN) == 0)
1421 		return false;
1422 
1423 	if (HAS_PCH_CPT(dev_priv)) {
1424 		u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1425 		if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1426 			return false;
1427 	} else if (IS_CHERRYVIEW(dev_priv)) {
1428 		if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1429 			return false;
1430 	} else {
1431 		if ((val & DP_PIPE_MASK) != (pipe << 30))
1432 			return false;
1433 	}
1434 	return true;
1435 }
1436 
1437 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1438 			      enum i915_pipe pipe, u32 val)
1439 {
1440 	if ((val & SDVO_ENABLE) == 0)
1441 		return false;
1442 
1443 	if (HAS_PCH_CPT(dev_priv)) {
1444 		if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1445 			return false;
1446 	} else if (IS_CHERRYVIEW(dev_priv)) {
1447 		if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1448 			return false;
1449 	} else {
1450 		if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1451 			return false;
1452 	}
1453 	return true;
1454 }
1455 
1456 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1457 			      enum i915_pipe pipe, u32 val)
1458 {
1459 	if ((val & LVDS_PORT_EN) == 0)
1460 		return false;
1461 
1462 	if (HAS_PCH_CPT(dev_priv)) {
1463 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1464 			return false;
1465 	} else {
1466 		if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1467 			return false;
1468 	}
1469 	return true;
1470 }
1471 
1472 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1473 			      enum i915_pipe pipe, u32 val)
1474 {
1475 	if ((val & ADPA_DAC_ENABLE) == 0)
1476 		return false;
1477 	if (HAS_PCH_CPT(dev_priv)) {
1478 		if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1479 			return false;
1480 	} else {
1481 		if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1482 			return false;
1483 	}
1484 	return true;
1485 }
1486 
1487 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1488 				   enum i915_pipe pipe, i915_reg_t reg,
1489 				   u32 port_sel)
1490 {
1491 	u32 val = I915_READ(reg);
1492 	I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1493 	     "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1494 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1495 
1496 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1497 	     && (val & DP_PIPEB_SELECT),
1498 	     "IBX PCH dp port still using transcoder B\n");
1499 }
1500 
1501 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1502 				     enum i915_pipe pipe, i915_reg_t reg)
1503 {
1504 	u32 val = I915_READ(reg);
1505 	I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1506 	     "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1507 	     i915_mmio_reg_offset(reg), pipe_name(pipe));
1508 
1509 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1510 	     && (val & SDVO_PIPE_B_SELECT),
1511 	     "IBX PCH hdmi port still using transcoder B\n");
1512 }
1513 
1514 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1515 				      enum i915_pipe pipe)
1516 {
1517 	u32 val;
1518 
1519 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1520 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1521 	assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1522 
1523 	val = I915_READ(PCH_ADPA);
1524 	I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1525 	     "PCH VGA enabled on transcoder %c, should be disabled\n",
1526 	     pipe_name(pipe));
1527 
1528 	val = I915_READ(PCH_LVDS);
1529 	I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1530 	     "PCH LVDS enabled on transcoder %c, should be disabled\n",
1531 	     pipe_name(pipe));
1532 
1533 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1534 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1535 	assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1536 }
1537 
1538 static void _vlv_enable_pll(struct intel_crtc *crtc,
1539 			    const struct intel_crtc_state *pipe_config)
1540 {
1541 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1542 	enum i915_pipe pipe = crtc->pipe;
1543 
1544 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1545 	POSTING_READ(DPLL(pipe));
1546 	udelay(150);
1547 
1548 	if (intel_wait_for_register(dev_priv,
1549 				    DPLL(pipe),
1550 				    DPLL_LOCK_VLV,
1551 				    DPLL_LOCK_VLV,
1552 				    1))
1553 		DRM_ERROR("DPLL %d failed to lock\n", pipe);
1554 }
1555 
1556 static void vlv_enable_pll(struct intel_crtc *crtc,
1557 			   const struct intel_crtc_state *pipe_config)
1558 {
1559 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1560 	enum i915_pipe pipe = crtc->pipe;
1561 
1562 	assert_pipe_disabled(dev_priv, pipe);
1563 
1564 	/* PLL is protected by panel, make sure we can write it */
1565 	assert_panel_unlocked(dev_priv, pipe);
1566 
1567 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1568 		_vlv_enable_pll(crtc, pipe_config);
1569 
1570 	I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1571 	POSTING_READ(DPLL_MD(pipe));
1572 }
1573 
1574 
1575 static void _chv_enable_pll(struct intel_crtc *crtc,
1576 			    const struct intel_crtc_state *pipe_config)
1577 {
1578 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1579 	enum i915_pipe pipe = crtc->pipe;
1580 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1581 	u32 tmp;
1582 
1583 	mutex_lock(&dev_priv->sb_lock);
1584 
1585 	/* Enable back the 10bit clock to display controller */
1586 	tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1587 	tmp |= DPIO_DCLKP_EN;
1588 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1589 
1590 	mutex_unlock(&dev_priv->sb_lock);
1591 
1592 	/*
1593 	 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1594 	 */
1595 	udelay(1);
1596 
1597 	/* Enable PLL */
1598 	I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1599 
1600 	/* Check PLL is locked */
1601 	if (intel_wait_for_register(dev_priv,
1602 				    DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1603 				    1))
1604 		DRM_ERROR("PLL %d failed to lock\n", pipe);
1605 }
1606 
1607 static void chv_enable_pll(struct intel_crtc *crtc,
1608 			   const struct intel_crtc_state *pipe_config)
1609 {
1610 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1611 	enum i915_pipe pipe = crtc->pipe;
1612 
1613 	assert_pipe_disabled(dev_priv, pipe);
1614 
1615 	/* PLL is protected by panel, make sure we can write it */
1616 	assert_panel_unlocked(dev_priv, pipe);
1617 
1618 	if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1619 		_chv_enable_pll(crtc, pipe_config);
1620 
1621 	if (pipe != PIPE_A) {
1622 		/*
1623 		 * WaPixelRepeatModeFixForC0:chv
1624 		 *
1625 		 * DPLLCMD is AWOL. Use chicken bits to propagate
1626 		 * the value from DPLLBMD to either pipe B or C.
1627 		 */
1628 		I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1629 		I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1630 		I915_WRITE(CBR4_VLV, 0);
1631 		dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1632 
1633 		/*
1634 		 * DPLLB VGA mode also seems to cause problems.
1635 		 * We should always have it disabled.
1636 		 */
1637 		WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1638 	} else {
1639 		I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1640 		POSTING_READ(DPLL_MD(pipe));
1641 	}
1642 }
1643 
1644 static int intel_num_dvo_pipes(struct drm_device *dev)
1645 {
1646 	struct intel_crtc *crtc;
1647 	int count = 0;
1648 
1649 	for_each_intel_crtc(dev, crtc)
1650 		count += crtc->base.state->active &&
1651 			intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1652 
1653 	return count;
1654 }
1655 
1656 static void i9xx_enable_pll(struct intel_crtc *crtc)
1657 {
1658 	struct drm_device *dev = crtc->base.dev;
1659 	struct drm_i915_private *dev_priv = dev->dev_private;
1660 	i915_reg_t reg = DPLL(crtc->pipe);
1661 	u32 dpll = crtc->config->dpll_hw_state.dpll;
1662 
1663 	assert_pipe_disabled(dev_priv, crtc->pipe);
1664 
1665 	/* PLL is protected by panel, make sure we can write it */
1666 	if (IS_MOBILE(dev) && !IS_I830(dev))
1667 		assert_panel_unlocked(dev_priv, crtc->pipe);
1668 
1669 	/* Enable DVO 2x clock on both PLLs if necessary */
1670 	if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1671 		/*
1672 		 * It appears to be important that we don't enable this
1673 		 * for the current pipe before otherwise configuring the
1674 		 * PLL. No idea how this should be handled if multiple
1675 		 * DVO outputs are enabled simultaneosly.
1676 		 */
1677 		dpll |= DPLL_DVO_2X_MODE;
1678 		I915_WRITE(DPLL(!crtc->pipe),
1679 			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1680 	}
1681 
1682 	/*
1683 	 * Apparently we need to have VGA mode enabled prior to changing
1684 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1685 	 * dividers, even though the register value does change.
1686 	 */
1687 	I915_WRITE(reg, 0);
1688 
1689 	I915_WRITE(reg, dpll);
1690 
1691 	/* Wait for the clocks to stabilize. */
1692 	POSTING_READ(reg);
1693 	udelay(150);
1694 
1695 	if (INTEL_INFO(dev)->gen >= 4) {
1696 		I915_WRITE(DPLL_MD(crtc->pipe),
1697 			   crtc->config->dpll_hw_state.dpll_md);
1698 	} else {
1699 		/* The pixel multiplier can only be updated once the
1700 		 * DPLL is enabled and the clocks are stable.
1701 		 *
1702 		 * So write it again.
1703 		 */
1704 		I915_WRITE(reg, dpll);
1705 	}
1706 
1707 	/* We do this three times for luck */
1708 	I915_WRITE(reg, dpll);
1709 	POSTING_READ(reg);
1710 	udelay(150); /* wait for warmup */
1711 	I915_WRITE(reg, dpll);
1712 	POSTING_READ(reg);
1713 	udelay(150); /* wait for warmup */
1714 	I915_WRITE(reg, dpll);
1715 	POSTING_READ(reg);
1716 	udelay(150); /* wait for warmup */
1717 }
1718 
1719 /**
1720  * i9xx_disable_pll - disable a PLL
1721  * @dev_priv: i915 private structure
1722  * @pipe: pipe PLL to disable
1723  *
1724  * Disable the PLL for @pipe, making sure the pipe is off first.
1725  *
1726  * Note!  This is for pre-ILK only.
1727  */
1728 static void i9xx_disable_pll(struct intel_crtc *crtc)
1729 {
1730 	struct drm_device *dev = crtc->base.dev;
1731 	struct drm_i915_private *dev_priv = dev->dev_private;
1732 	enum i915_pipe pipe = crtc->pipe;
1733 
1734 	/* Disable DVO 2x clock on both PLLs if necessary */
1735 	if (IS_I830(dev) &&
1736 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1737 	    !intel_num_dvo_pipes(dev)) {
1738 		I915_WRITE(DPLL(PIPE_B),
1739 			   I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1740 		I915_WRITE(DPLL(PIPE_A),
1741 			   I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1742 	}
1743 
1744 	/* Don't disable pipe or pipe PLLs if needed */
1745 	if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1746 	    (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1747 		return;
1748 
1749 	/* Make sure the pipe isn't still relying on us */
1750 	assert_pipe_disabled(dev_priv, pipe);
1751 
1752 	I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1753 	POSTING_READ(DPLL(pipe));
1754 }
1755 
1756 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1757 {
1758 	u32 val;
1759 
1760 	/* Make sure the pipe isn't still relying on us */
1761 	assert_pipe_disabled(dev_priv, pipe);
1762 
1763 	val = DPLL_INTEGRATED_REF_CLK_VLV |
1764 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1765 	if (pipe != PIPE_A)
1766 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1767 
1768 	I915_WRITE(DPLL(pipe), val);
1769 	POSTING_READ(DPLL(pipe));
1770 }
1771 
1772 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe)
1773 {
1774 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
1775 	u32 val;
1776 
1777 	/* Make sure the pipe isn't still relying on us */
1778 	assert_pipe_disabled(dev_priv, pipe);
1779 
1780 	val = DPLL_SSC_REF_CLK_CHV |
1781 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1782 	if (pipe != PIPE_A)
1783 		val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1784 
1785 	I915_WRITE(DPLL(pipe), val);
1786 	POSTING_READ(DPLL(pipe));
1787 
1788 	mutex_lock(&dev_priv->sb_lock);
1789 
1790 	/* Disable 10bit clock to display controller */
1791 	val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1792 	val &= ~DPIO_DCLKP_EN;
1793 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1794 
1795 	mutex_unlock(&dev_priv->sb_lock);
1796 }
1797 
1798 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1799 			 struct intel_digital_port *dport,
1800 			 unsigned int expected_mask)
1801 {
1802 	u32 port_mask;
1803 	i915_reg_t dpll_reg;
1804 
1805 	switch (dport->port) {
1806 	case PORT_B:
1807 		port_mask = DPLL_PORTB_READY_MASK;
1808 		dpll_reg = DPLL(0);
1809 		break;
1810 	case PORT_C:
1811 		port_mask = DPLL_PORTC_READY_MASK;
1812 		dpll_reg = DPLL(0);
1813 		expected_mask <<= 4;
1814 		break;
1815 	case PORT_D:
1816 		port_mask = DPLL_PORTD_READY_MASK;
1817 		dpll_reg = DPIO_PHY_STATUS;
1818 		break;
1819 	default:
1820 		BUG();
1821 	}
1822 
1823 	if (intel_wait_for_register(dev_priv,
1824 				    dpll_reg, port_mask, expected_mask,
1825 				    1000))
1826 		WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1827 		     port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1828 }
1829 
1830 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1831 					   enum i915_pipe pipe)
1832 {
1833 	struct drm_device *dev = dev_priv->dev;
1834 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1835 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1836 	i915_reg_t reg;
1837 	uint32_t val, pipeconf_val;
1838 
1839 	/* Make sure PCH DPLL is enabled */
1840 	assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1841 
1842 	/* FDI must be feeding us bits for PCH ports */
1843 	assert_fdi_tx_enabled(dev_priv, pipe);
1844 	assert_fdi_rx_enabled(dev_priv, pipe);
1845 
1846 	if (HAS_PCH_CPT(dev)) {
1847 		/* Workaround: Set the timing override bit before enabling the
1848 		 * pch transcoder. */
1849 		reg = TRANS_CHICKEN2(pipe);
1850 		val = I915_READ(reg);
1851 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1852 		I915_WRITE(reg, val);
1853 	}
1854 
1855 	reg = PCH_TRANSCONF(pipe);
1856 	val = I915_READ(reg);
1857 	pipeconf_val = I915_READ(PIPECONF(pipe));
1858 
1859 	if (HAS_PCH_IBX(dev_priv)) {
1860 		/*
1861 		 * Make the BPC in transcoder be consistent with
1862 		 * that in pipeconf reg. For HDMI we must use 8bpc
1863 		 * here for both 8bpc and 12bpc.
1864 		 */
1865 		val &= ~PIPECONF_BPC_MASK;
1866 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1867 			val |= PIPECONF_8BPC;
1868 		else
1869 			val |= pipeconf_val & PIPECONF_BPC_MASK;
1870 	}
1871 
1872 	val &= ~TRANS_INTERLACE_MASK;
1873 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1874 		if (HAS_PCH_IBX(dev_priv) &&
1875 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1876 			val |= TRANS_LEGACY_INTERLACED_ILK;
1877 		else
1878 			val |= TRANS_INTERLACED;
1879 	else
1880 		val |= TRANS_PROGRESSIVE;
1881 
1882 	I915_WRITE(reg, val | TRANS_ENABLE);
1883 	if (intel_wait_for_register(dev_priv,
1884 				    reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1885 				    100))
1886 		DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1887 }
1888 
1889 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1890 				      enum transcoder cpu_transcoder)
1891 {
1892 	u32 val, pipeconf_val;
1893 
1894 	/* FDI must be feeding us bits for PCH ports */
1895 	assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder);
1896 	assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1897 
1898 	/* Workaround: set timing override bit. */
1899 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1900 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1901 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1902 
1903 	val = TRANS_ENABLE;
1904 	pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1905 
1906 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1907 	    PIPECONF_INTERLACED_ILK)
1908 		val |= TRANS_INTERLACED;
1909 	else
1910 		val |= TRANS_PROGRESSIVE;
1911 
1912 	I915_WRITE(LPT_TRANSCONF, val);
1913 	if (intel_wait_for_register(dev_priv,
1914 				    LPT_TRANSCONF,
1915 				    TRANS_STATE_ENABLE,
1916 				    TRANS_STATE_ENABLE,
1917 				    100))
1918 		DRM_ERROR("Failed to enable PCH transcoder\n");
1919 }
1920 
1921 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1922 					    enum i915_pipe pipe)
1923 {
1924 	struct drm_device *dev = dev_priv->dev;
1925 	i915_reg_t reg;
1926 	uint32_t val;
1927 
1928 	/* FDI relies on the transcoder */
1929 	assert_fdi_tx_disabled(dev_priv, pipe);
1930 	assert_fdi_rx_disabled(dev_priv, pipe);
1931 
1932 	/* Ports must be off as well */
1933 	assert_pch_ports_disabled(dev_priv, pipe);
1934 
1935 	reg = PCH_TRANSCONF(pipe);
1936 	val = I915_READ(reg);
1937 	val &= ~TRANS_ENABLE;
1938 	I915_WRITE(reg, val);
1939 	/* wait for PCH transcoder off, transcoder state */
1940 	if (intel_wait_for_register(dev_priv,
1941 				    reg, TRANS_STATE_ENABLE, 0,
1942 				    50))
1943 		DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1944 
1945 	if (HAS_PCH_CPT(dev)) {
1946 		/* Workaround: Clear the timing override chicken bit again. */
1947 		reg = TRANS_CHICKEN2(pipe);
1948 		val = I915_READ(reg);
1949 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1950 		I915_WRITE(reg, val);
1951 	}
1952 }
1953 
1954 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1955 {
1956 	u32 val;
1957 
1958 	val = I915_READ(LPT_TRANSCONF);
1959 	val &= ~TRANS_ENABLE;
1960 	I915_WRITE(LPT_TRANSCONF, val);
1961 	/* wait for PCH transcoder off, transcoder state */
1962 	if (intel_wait_for_register(dev_priv,
1963 				    LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1964 				    50))
1965 		DRM_ERROR("Failed to disable PCH transcoder\n");
1966 
1967 	/* Workaround: clear timing override bit. */
1968 	val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1969 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1970 	I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1971 }
1972 
1973 /**
1974  * intel_enable_pipe - enable a pipe, asserting requirements
1975  * @crtc: crtc responsible for the pipe
1976  *
1977  * Enable @crtc's pipe, making sure that various hardware specific requirements
1978  * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1979  */
1980 static void intel_enable_pipe(struct intel_crtc *crtc)
1981 {
1982 	struct drm_device *dev = crtc->base.dev;
1983 	struct drm_i915_private *dev_priv = dev->dev_private;
1984 	enum i915_pipe pipe = crtc->pipe;
1985 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1986 	enum i915_pipe pch_transcoder;
1987 	i915_reg_t reg;
1988 	u32 val;
1989 
1990 	DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1991 
1992 	assert_planes_disabled(dev_priv, pipe);
1993 	assert_cursor_disabled(dev_priv, pipe);
1994 	assert_sprites_disabled(dev_priv, pipe);
1995 
1996 	if (HAS_PCH_LPT(dev_priv))
1997 		pch_transcoder = TRANSCODER_A;
1998 	else
1999 		pch_transcoder = pipe;
2000 
2001 	/*
2002 	 * A pipe without a PLL won't actually be able to drive bits from
2003 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
2004 	 * need the check.
2005 	 */
2006 	if (HAS_GMCH_DISPLAY(dev_priv))
2007 		if (crtc->config->has_dsi_encoder)
2008 			assert_dsi_pll_enabled(dev_priv);
2009 		else
2010 			assert_pll_enabled(dev_priv, pipe);
2011 	else {
2012 		if (crtc->config->has_pch_encoder) {
2013 			/* if driving the PCH, we need FDI enabled */
2014 			assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2015 			assert_fdi_tx_pll_enabled(dev_priv,
2016 						  (enum i915_pipe) cpu_transcoder);
2017 		}
2018 		/* FIXME: assert CPU port conditions for SNB+ */
2019 	}
2020 
2021 	reg = PIPECONF(cpu_transcoder);
2022 	val = I915_READ(reg);
2023 	if (val & PIPECONF_ENABLE) {
2024 		WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2025 			  (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2026 		return;
2027 	}
2028 
2029 	I915_WRITE(reg, val | PIPECONF_ENABLE);
2030 	POSTING_READ(reg);
2031 
2032 	/*
2033 	 * Until the pipe starts DSL will read as 0, which would cause
2034 	 * an apparent vblank timestamp jump, which messes up also the
2035 	 * frame count when it's derived from the timestamps. So let's
2036 	 * wait for the pipe to start properly before we call
2037 	 * drm_crtc_vblank_on()
2038 	 */
2039 	if (dev->max_vblank_count == 0 &&
2040 	    wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2041 		DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2042 }
2043 
2044 /**
2045  * intel_disable_pipe - disable a pipe, asserting requirements
2046  * @crtc: crtc whose pipes is to be disabled
2047  *
2048  * Disable the pipe of @crtc, making sure that various hardware
2049  * specific requirements are met, if applicable, e.g. plane
2050  * disabled, panel fitter off, etc.
2051  *
2052  * Will wait until the pipe has shut down before returning.
2053  */
2054 static void intel_disable_pipe(struct intel_crtc *crtc)
2055 {
2056 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2057 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2058 	enum i915_pipe pipe = crtc->pipe;
2059 	i915_reg_t reg;
2060 	u32 val;
2061 
2062 	DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2063 
2064 	/*
2065 	 * Make sure planes won't keep trying to pump pixels to us,
2066 	 * or we might hang the display.
2067 	 */
2068 	assert_planes_disabled(dev_priv, pipe);
2069 	assert_cursor_disabled(dev_priv, pipe);
2070 	assert_sprites_disabled(dev_priv, pipe);
2071 
2072 	reg = PIPECONF(cpu_transcoder);
2073 	val = I915_READ(reg);
2074 	if ((val & PIPECONF_ENABLE) == 0)
2075 		return;
2076 
2077 	/*
2078 	 * Double wide has implications for planes
2079 	 * so best keep it disabled when not needed.
2080 	 */
2081 	if (crtc->config->double_wide)
2082 		val &= ~PIPECONF_DOUBLE_WIDE;
2083 
2084 	/* Don't disable pipe or pipe PLLs if needed */
2085 	if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2086 	    !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2087 		val &= ~PIPECONF_ENABLE;
2088 
2089 	I915_WRITE(reg, val);
2090 	if ((val & PIPECONF_ENABLE) == 0)
2091 		intel_wait_for_pipe_off(crtc);
2092 }
2093 
2094 static bool need_vtd_wa(struct drm_device *dev)
2095 {
2096 #ifdef CONFIG_INTEL_IOMMU
2097 	if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2098 		return true;
2099 #endif
2100 	return false;
2101 }
2102 
2103 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2104 {
2105 	return IS_GEN2(dev_priv) ? 2048 : 4096;
2106 }
2107 
2108 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2109 					   uint64_t fb_modifier, unsigned int cpp)
2110 {
2111 	switch (fb_modifier) {
2112 	case DRM_FORMAT_MOD_NONE:
2113 		return cpp;
2114 	case I915_FORMAT_MOD_X_TILED:
2115 		if (IS_GEN2(dev_priv))
2116 			return 128;
2117 		else
2118 			return 512;
2119 	case I915_FORMAT_MOD_Y_TILED:
2120 		if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2121 			return 128;
2122 		else
2123 			return 512;
2124 	case I915_FORMAT_MOD_Yf_TILED:
2125 		switch (cpp) {
2126 		case 1:
2127 			return 64;
2128 		case 2:
2129 		case 4:
2130 			return 128;
2131 		case 8:
2132 		case 16:
2133 			return 256;
2134 		default:
2135 			MISSING_CASE(cpp);
2136 			return cpp;
2137 		}
2138 		break;
2139 	default:
2140 		MISSING_CASE(fb_modifier);
2141 		return cpp;
2142 	}
2143 }
2144 
2145 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2146 			       uint64_t fb_modifier, unsigned int cpp)
2147 {
2148 	if (fb_modifier == DRM_FORMAT_MOD_NONE)
2149 		return 1;
2150 	else
2151 		return intel_tile_size(dev_priv) /
2152 			intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2153 }
2154 
2155 /* Return the tile dimensions in pixel units */
2156 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2157 			    unsigned int *tile_width,
2158 			    unsigned int *tile_height,
2159 			    uint64_t fb_modifier,
2160 			    unsigned int cpp)
2161 {
2162 	unsigned int tile_width_bytes =
2163 		intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2164 
2165 	*tile_width = tile_width_bytes / cpp;
2166 	*tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2167 }
2168 
2169 unsigned int
2170 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2171 		      uint32_t pixel_format, uint64_t fb_modifier)
2172 {
2173 	unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2174 	unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2175 
2176 	return ALIGN(height, tile_height);
2177 }
2178 
2179 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2180 {
2181 	unsigned int size = 0;
2182 	int i;
2183 
2184 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2185 		size += rot_info->plane[i].width * rot_info->plane[i].height;
2186 
2187 	return size;
2188 }
2189 
2190 static void
2191 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2192 			struct drm_framebuffer *fb,
2193 			unsigned int rotation)
2194 {
2195 	if (intel_rotation_90_or_270(rotation)) {
2196 		*view = i915_ggtt_view_rotated;
2197 		view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2198 	} else {
2199 		*view = i915_ggtt_view_normal;
2200 	}
2201 }
2202 
2203 static void
2204 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2205 		   struct drm_framebuffer *fb)
2206 {
2207 	struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2208 	unsigned int tile_size, tile_width, tile_height, cpp;
2209 
2210 	tile_size = intel_tile_size(dev_priv);
2211 
2212 	cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2213 	intel_tile_dims(dev_priv, &tile_width, &tile_height,
2214 			fb->modifier[0], cpp);
2215 
2216 	info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2217 	info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
2218 
2219 	if (info->pixel_format == DRM_FORMAT_NV12) {
2220 		cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2221 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2222 				fb->modifier[1], cpp);
2223 
2224 		info->uv_offset = fb->offsets[1];
2225 		info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2226 		info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
2227 	}
2228 }
2229 
2230 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2231 {
2232 	if (INTEL_INFO(dev_priv)->gen >= 9)
2233 		return 256 * 1024;
2234 	else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2235 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2236 		return 128 * 1024;
2237 	else if (INTEL_INFO(dev_priv)->gen >= 4)
2238 		return 4 * 1024;
2239 	else
2240 		return 0;
2241 }
2242 
2243 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2244 					 uint64_t fb_modifier)
2245 {
2246 	switch (fb_modifier) {
2247 	case DRM_FORMAT_MOD_NONE:
2248 		return intel_linear_alignment(dev_priv);
2249 	case I915_FORMAT_MOD_X_TILED:
2250 		if (INTEL_INFO(dev_priv)->gen >= 9)
2251 			return 256 * 1024;
2252 		return 0;
2253 	case I915_FORMAT_MOD_Y_TILED:
2254 	case I915_FORMAT_MOD_Yf_TILED:
2255 		return 1 * 1024 * 1024;
2256 	default:
2257 		MISSING_CASE(fb_modifier);
2258 		return 0;
2259 	}
2260 }
2261 
2262 int
2263 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2264 			   unsigned int rotation)
2265 {
2266 	struct drm_device *dev = fb->dev;
2267 	struct drm_i915_private *dev_priv = dev->dev_private;
2268 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2269 	struct i915_ggtt_view view;
2270 	u32 alignment;
2271 	int ret;
2272 
2273 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2274 
2275 	alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2276 
2277 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2278 
2279 	/* Note that the w/a also requires 64 PTE of padding following the
2280 	 * bo. We currently fill all unused PTE with the shadow page and so
2281 	 * we should always have valid PTE following the scanout preventing
2282 	 * the VT-d warning.
2283 	 */
2284 	if (need_vtd_wa(dev) && alignment < 256 * 1024)
2285 		alignment = 256 * 1024;
2286 
2287 	/*
2288 	 * Global gtt pte registers are special registers which actually forward
2289 	 * writes to a chunk of system memory. Which means that there is no risk
2290 	 * that the register values disappear as soon as we call
2291 	 * intel_runtime_pm_put(), so it is correct to wrap only the
2292 	 * pin/unpin/fence and not more.
2293 	 */
2294 	intel_runtime_pm_get(dev_priv);
2295 
2296 	ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2297 						   &view);
2298 	if (ret)
2299 		goto err_pm;
2300 
2301 	/* Install a fence for tiled scan-out. Pre-i965 always needs a
2302 	 * fence, whereas 965+ only requires a fence if using
2303 	 * framebuffer compression.  For simplicity, we always install
2304 	 * a fence as the cost is not that onerous.
2305 	 */
2306 	if (view.type == I915_GGTT_VIEW_NORMAL) {
2307 		ret = i915_gem_object_get_fence(obj);
2308 		if (ret == -EDEADLK) {
2309 			/*
2310 			 * -EDEADLK means there are no free fences
2311 			 * no pending flips.
2312 			 *
2313 			 * This is propagated to atomic, but it uses
2314 			 * -EDEADLK to force a locking recovery, so
2315 			 * change the returned error to -EBUSY.
2316 			 */
2317 			ret = -EBUSY;
2318 			goto err_unpin;
2319 		} else if (ret)
2320 			goto err_unpin;
2321 
2322 		i915_gem_object_pin_fence(obj);
2323 	}
2324 
2325 	intel_runtime_pm_put(dev_priv);
2326 	return 0;
2327 
2328 err_unpin:
2329 	i915_gem_object_unpin_from_display_plane(obj, &view);
2330 err_pm:
2331 	intel_runtime_pm_put(dev_priv);
2332 	return ret;
2333 }
2334 
2335 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2336 {
2337 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2338 	struct i915_ggtt_view view;
2339 
2340 	WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2341 
2342 	intel_fill_fb_ggtt_view(&view, fb, rotation);
2343 
2344 	if (view.type == I915_GGTT_VIEW_NORMAL)
2345 		i915_gem_object_unpin_fence(obj);
2346 
2347 	i915_gem_object_unpin_from_display_plane(obj, &view);
2348 }
2349 
2350 /*
2351  * Adjust the tile offset by moving the difference into
2352  * the x/y offsets.
2353  *
2354  * Input tile dimensions and pitch must already be
2355  * rotated to match x and y, and in pixel units.
2356  */
2357 static u32 intel_adjust_tile_offset(int *x, int *y,
2358 				    unsigned int tile_width,
2359 				    unsigned int tile_height,
2360 				    unsigned int tile_size,
2361 				    unsigned int pitch_tiles,
2362 				    u32 old_offset,
2363 				    u32 new_offset)
2364 {
2365 	unsigned int tiles;
2366 
2367 	WARN_ON(old_offset & (tile_size - 1));
2368 	WARN_ON(new_offset & (tile_size - 1));
2369 	WARN_ON(new_offset > old_offset);
2370 
2371 	tiles = (old_offset - new_offset) / tile_size;
2372 
2373 	*y += tiles / pitch_tiles * tile_height;
2374 	*x += tiles % pitch_tiles * tile_width;
2375 
2376 	return new_offset;
2377 }
2378 
2379 /*
2380  * Computes the linear offset to the base tile and adjusts
2381  * x, y. bytes per pixel is assumed to be a power-of-two.
2382  *
2383  * In the 90/270 rotated case, x and y are assumed
2384  * to be already rotated to match the rotated GTT view, and
2385  * pitch is the tile_height aligned framebuffer height.
2386  */
2387 u32 intel_compute_tile_offset(int *x, int *y,
2388 			      const struct drm_framebuffer *fb, int plane,
2389 			      unsigned int pitch,
2390 			      unsigned int rotation)
2391 {
2392 	const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2393 	uint64_t fb_modifier = fb->modifier[plane];
2394 	unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2395 	u32 offset, offset_aligned, alignment;
2396 
2397 	alignment = intel_surf_alignment(dev_priv, fb_modifier);
2398 	if (alignment)
2399 		alignment--;
2400 
2401 	if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2402 		unsigned int tile_size, tile_width, tile_height;
2403 		unsigned int tile_rows, tiles, pitch_tiles;
2404 
2405 		tile_size = intel_tile_size(dev_priv);
2406 		intel_tile_dims(dev_priv, &tile_width, &tile_height,
2407 				fb_modifier, cpp);
2408 
2409 		if (intel_rotation_90_or_270(rotation)) {
2410 			pitch_tiles = pitch / tile_height;
2411 			swap(tile_width, tile_height);
2412 		} else {
2413 			pitch_tiles = pitch / (tile_width * cpp);
2414 		}
2415 
2416 		tile_rows = *y / tile_height;
2417 		*y %= tile_height;
2418 
2419 		tiles = *x / tile_width;
2420 		*x %= tile_width;
2421 
2422 		offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2423 		offset_aligned = offset & ~alignment;
2424 
2425 		intel_adjust_tile_offset(x, y, tile_width, tile_height,
2426 					 tile_size, pitch_tiles,
2427 					 offset, offset_aligned);
2428 	} else {
2429 		offset = *y * pitch + *x * cpp;
2430 		offset_aligned = offset & ~alignment;
2431 
2432 		*y = (offset & alignment) / pitch;
2433 		*x = ((offset & alignment) - *y * pitch) / cpp;
2434 	}
2435 
2436 	return offset_aligned;
2437 }
2438 
2439 static int i9xx_format_to_fourcc(int format)
2440 {
2441 	switch (format) {
2442 	case DISPPLANE_8BPP:
2443 		return DRM_FORMAT_C8;
2444 	case DISPPLANE_BGRX555:
2445 		return DRM_FORMAT_XRGB1555;
2446 	case DISPPLANE_BGRX565:
2447 		return DRM_FORMAT_RGB565;
2448 	default:
2449 	case DISPPLANE_BGRX888:
2450 		return DRM_FORMAT_XRGB8888;
2451 	case DISPPLANE_RGBX888:
2452 		return DRM_FORMAT_XBGR8888;
2453 	case DISPPLANE_BGRX101010:
2454 		return DRM_FORMAT_XRGB2101010;
2455 	case DISPPLANE_RGBX101010:
2456 		return DRM_FORMAT_XBGR2101010;
2457 	}
2458 }
2459 
2460 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2461 {
2462 	switch (format) {
2463 	case PLANE_CTL_FORMAT_RGB_565:
2464 		return DRM_FORMAT_RGB565;
2465 	default:
2466 	case PLANE_CTL_FORMAT_XRGB_8888:
2467 		if (rgb_order) {
2468 			if (alpha)
2469 				return DRM_FORMAT_ABGR8888;
2470 			else
2471 				return DRM_FORMAT_XBGR8888;
2472 		} else {
2473 			if (alpha)
2474 				return DRM_FORMAT_ARGB8888;
2475 			else
2476 				return DRM_FORMAT_XRGB8888;
2477 		}
2478 	case PLANE_CTL_FORMAT_XRGB_2101010:
2479 		if (rgb_order)
2480 			return DRM_FORMAT_XBGR2101010;
2481 		else
2482 			return DRM_FORMAT_XRGB2101010;
2483 	}
2484 }
2485 
2486 static bool
2487 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2488 			      struct intel_initial_plane_config *plane_config)
2489 {
2490 	struct drm_device *dev = crtc->base.dev;
2491 	struct drm_i915_private *dev_priv = to_i915(dev);
2492 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2493 	struct drm_i915_gem_object *obj = NULL;
2494 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2495 	struct drm_framebuffer *fb = &plane_config->fb->base;
2496 	u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2497 	u32 size_aligned = round_up(plane_config->base + plane_config->size,
2498 				    PAGE_SIZE);
2499 
2500 	size_aligned -= base_aligned;
2501 
2502 	if (plane_config->size == 0)
2503 		return false;
2504 
2505 	/* If the FB is too big, just don't use it since fbdev is not very
2506 	 * important and we should probably use that space with FBC or other
2507 	 * features. */
2508 	if (size_aligned * 2 > ggtt->stolen_usable_size)
2509 		return false;
2510 
2511 	mutex_lock(&dev->struct_mutex);
2512 
2513 	obj = i915_gem_object_create_stolen_for_preallocated(dev,
2514 							     base_aligned,
2515 							     base_aligned,
2516 							     size_aligned);
2517 	if (!obj) {
2518 		mutex_unlock(&dev->struct_mutex);
2519 		return false;
2520 	}
2521 
2522 	obj->tiling_mode = plane_config->tiling;
2523 	if (obj->tiling_mode == I915_TILING_X)
2524 		obj->stride = fb->pitches[0];
2525 
2526 	mode_cmd.pixel_format = fb->pixel_format;
2527 	mode_cmd.width = fb->width;
2528 	mode_cmd.height = fb->height;
2529 	mode_cmd.pitches[0] = fb->pitches[0];
2530 	mode_cmd.modifier[0] = fb->modifier[0];
2531 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2532 
2533 	if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2534 				   &mode_cmd, obj)) {
2535 		DRM_DEBUG_KMS("intel fb init failed\n");
2536 		goto out_unref_obj;
2537 	}
2538 
2539 	mutex_unlock(&dev->struct_mutex);
2540 
2541 	DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2542 	return true;
2543 
2544 out_unref_obj:
2545 	drm_gem_object_unreference(&obj->base);
2546 	mutex_unlock(&dev->struct_mutex);
2547 	return false;
2548 }
2549 
2550 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2551 static void
2552 update_state_fb(struct drm_plane *plane)
2553 {
2554 	if (plane->fb == plane->state->fb)
2555 		return;
2556 
2557 	if (plane->state->fb)
2558 		drm_framebuffer_unreference(plane->state->fb);
2559 	plane->state->fb = plane->fb;
2560 	if (plane->state->fb)
2561 		drm_framebuffer_reference(plane->state->fb);
2562 }
2563 
2564 static void
2565 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2566 			     struct intel_initial_plane_config *plane_config)
2567 {
2568 	struct drm_device *dev = intel_crtc->base.dev;
2569 	struct drm_i915_private *dev_priv = dev->dev_private;
2570 	struct drm_crtc *c;
2571 	struct intel_crtc *i;
2572 	struct drm_i915_gem_object *obj;
2573 	struct drm_plane *primary = intel_crtc->base.primary;
2574 	struct drm_plane_state *plane_state = primary->state;
2575 	struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2576 	struct intel_plane *intel_plane = to_intel_plane(primary);
2577 	struct intel_plane_state *intel_state =
2578 		to_intel_plane_state(plane_state);
2579 	struct drm_framebuffer *fb;
2580 
2581 	if (!plane_config->fb)
2582 		return;
2583 
2584 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2585 		fb = &plane_config->fb->base;
2586 		goto valid_fb;
2587 	}
2588 
2589 	kfree(plane_config->fb);
2590 
2591 	/*
2592 	 * Failed to alloc the obj, check to see if we should share
2593 	 * an fb with another CRTC instead
2594 	 */
2595 	for_each_crtc(dev, c) {
2596 		i = to_intel_crtc(c);
2597 
2598 		if (c == &intel_crtc->base)
2599 			continue;
2600 
2601 		if (!i->active)
2602 			continue;
2603 
2604 		fb = c->primary->fb;
2605 		if (!fb)
2606 			continue;
2607 
2608 		obj = intel_fb_obj(fb);
2609 		if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2610 			drm_framebuffer_reference(fb);
2611 			goto valid_fb;
2612 		}
2613 	}
2614 
2615 	/*
2616 	 * We've failed to reconstruct the BIOS FB.  Current display state
2617 	 * indicates that the primary plane is visible, but has a NULL FB,
2618 	 * which will lead to problems later if we don't fix it up.  The
2619 	 * simplest solution is to just disable the primary plane now and
2620 	 * pretend the BIOS never had it enabled.
2621 	 */
2622 	to_intel_plane_state(plane_state)->visible = false;
2623 	crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2624 	intel_pre_disable_primary_noatomic(&intel_crtc->base);
2625 	intel_plane->disable_plane(primary, &intel_crtc->base);
2626 
2627 	return;
2628 
2629 valid_fb:
2630 	plane_state->src_x = 0;
2631 	plane_state->src_y = 0;
2632 	plane_state->src_w = fb->width << 16;
2633 	plane_state->src_h = fb->height << 16;
2634 
2635 	plane_state->crtc_x = 0;
2636 	plane_state->crtc_y = 0;
2637 	plane_state->crtc_w = fb->width;
2638 	plane_state->crtc_h = fb->height;
2639 
2640 	intel_state->src.x1 = plane_state->src_x;
2641 	intel_state->src.y1 = plane_state->src_y;
2642 	intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2643 	intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2644 	intel_state->dst.x1 = plane_state->crtc_x;
2645 	intel_state->dst.y1 = plane_state->crtc_y;
2646 	intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2647 	intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2648 
2649 	obj = intel_fb_obj(fb);
2650 	if (obj->tiling_mode != I915_TILING_NONE)
2651 		dev_priv->preserve_bios_swizzle = true;
2652 
2653 	drm_framebuffer_reference(fb);
2654 	primary->fb = primary->state->fb = fb;
2655 	primary->crtc = primary->state->crtc = &intel_crtc->base;
2656 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2657 	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2658 }
2659 
2660 static void i9xx_update_primary_plane(struct drm_plane *primary,
2661 				      const struct intel_crtc_state *crtc_state,
2662 				      const struct intel_plane_state *plane_state)
2663 {
2664 	struct drm_device *dev = primary->dev;
2665 	struct drm_i915_private *dev_priv = dev->dev_private;
2666 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2667 	struct drm_framebuffer *fb = plane_state->base.fb;
2668 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2669 	int plane = intel_crtc->plane;
2670 	u32 linear_offset;
2671 	u32 dspcntr;
2672 	i915_reg_t reg = DSPCNTR(plane);
2673 	unsigned int rotation = plane_state->base.rotation;
2674 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2675 	int x = plane_state->src.x1 >> 16;
2676 	int y = plane_state->src.y1 >> 16;
2677 
2678 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2679 
2680 	dspcntr |= DISPLAY_PLANE_ENABLE;
2681 
2682 	if (INTEL_INFO(dev)->gen < 4) {
2683 		if (intel_crtc->pipe == PIPE_B)
2684 			dspcntr |= DISPPLANE_SEL_PIPE_B;
2685 
2686 		/* pipesrc and dspsize control the size that is scaled from,
2687 		 * which should always be the user's requested size.
2688 		 */
2689 		I915_WRITE(DSPSIZE(plane),
2690 			   ((crtc_state->pipe_src_h - 1) << 16) |
2691 			   (crtc_state->pipe_src_w - 1));
2692 		I915_WRITE(DSPPOS(plane), 0);
2693 	} else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2694 		I915_WRITE(PRIMSIZE(plane),
2695 			   ((crtc_state->pipe_src_h - 1) << 16) |
2696 			   (crtc_state->pipe_src_w - 1));
2697 		I915_WRITE(PRIMPOS(plane), 0);
2698 		I915_WRITE(PRIMCNSTALPHA(plane), 0);
2699 	}
2700 
2701 	switch (fb->pixel_format) {
2702 	case DRM_FORMAT_C8:
2703 		dspcntr |= DISPPLANE_8BPP;
2704 		break;
2705 	case DRM_FORMAT_XRGB1555:
2706 		dspcntr |= DISPPLANE_BGRX555;
2707 		break;
2708 	case DRM_FORMAT_RGB565:
2709 		dspcntr |= DISPPLANE_BGRX565;
2710 		break;
2711 	case DRM_FORMAT_XRGB8888:
2712 		dspcntr |= DISPPLANE_BGRX888;
2713 		break;
2714 	case DRM_FORMAT_XBGR8888:
2715 		dspcntr |= DISPPLANE_RGBX888;
2716 		break;
2717 	case DRM_FORMAT_XRGB2101010:
2718 		dspcntr |= DISPPLANE_BGRX101010;
2719 		break;
2720 	case DRM_FORMAT_XBGR2101010:
2721 		dspcntr |= DISPPLANE_RGBX101010;
2722 		break;
2723 	default:
2724 		BUG();
2725 	}
2726 
2727 	if (INTEL_INFO(dev)->gen >= 4 &&
2728 	    obj->tiling_mode != I915_TILING_NONE)
2729 		dspcntr |= DISPPLANE_TILED;
2730 
2731 	if (IS_G4X(dev))
2732 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2733 
2734 	linear_offset = y * fb->pitches[0] + x * cpp;
2735 
2736 	if (INTEL_INFO(dev)->gen >= 4) {
2737 		intel_crtc->dspaddr_offset =
2738 			intel_compute_tile_offset(&x, &y, fb, 0,
2739 						  fb->pitches[0], rotation);
2740 		linear_offset -= intel_crtc->dspaddr_offset;
2741 	} else {
2742 		intel_crtc->dspaddr_offset = linear_offset;
2743 	}
2744 
2745 	if (rotation == DRM_ROTATE_180) {
2746 		dspcntr |= DISPPLANE_ROTATE_180;
2747 
2748 		x += (crtc_state->pipe_src_w - 1);
2749 		y += (crtc_state->pipe_src_h - 1);
2750 
2751 		/* Finding the last pixel of the last line of the display
2752 		data and adding to linear_offset*/
2753 		linear_offset +=
2754 			(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2755 			(crtc_state->pipe_src_w - 1) * cpp;
2756 	}
2757 
2758 	intel_crtc->adjusted_x = x;
2759 	intel_crtc->adjusted_y = y;
2760 
2761 	I915_WRITE(reg, dspcntr);
2762 
2763 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2764 	if (INTEL_INFO(dev)->gen >= 4) {
2765 		I915_WRITE(DSPSURF(plane),
2766 			   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2767 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2768 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2769 	} else
2770 		I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2771 	POSTING_READ(reg);
2772 }
2773 
2774 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2775 				       struct drm_crtc *crtc)
2776 {
2777 	struct drm_device *dev = crtc->dev;
2778 	struct drm_i915_private *dev_priv = dev->dev_private;
2779 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2780 	int plane = intel_crtc->plane;
2781 
2782 	I915_WRITE(DSPCNTR(plane), 0);
2783 	if (INTEL_INFO(dev_priv)->gen >= 4)
2784 		I915_WRITE(DSPSURF(plane), 0);
2785 	else
2786 		I915_WRITE(DSPADDR(plane), 0);
2787 	POSTING_READ(DSPCNTR(plane));
2788 }
2789 
2790 static void ironlake_update_primary_plane(struct drm_plane *primary,
2791 					  const struct intel_crtc_state *crtc_state,
2792 					  const struct intel_plane_state *plane_state)
2793 {
2794 	struct drm_device *dev = primary->dev;
2795 	struct drm_i915_private *dev_priv = dev->dev_private;
2796 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2797 	struct drm_framebuffer *fb = plane_state->base.fb;
2798 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2799 	int plane = intel_crtc->plane;
2800 	u32 linear_offset;
2801 	u32 dspcntr;
2802 	i915_reg_t reg = DSPCNTR(plane);
2803 	unsigned int rotation = plane_state->base.rotation;
2804 	int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2805 	int x = plane_state->src.x1 >> 16;
2806 	int y = plane_state->src.y1 >> 16;
2807 
2808 	dspcntr = DISPPLANE_GAMMA_ENABLE;
2809 	dspcntr |= DISPLAY_PLANE_ENABLE;
2810 
2811 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2812 		dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2813 
2814 	switch (fb->pixel_format) {
2815 	case DRM_FORMAT_C8:
2816 		dspcntr |= DISPPLANE_8BPP;
2817 		break;
2818 	case DRM_FORMAT_RGB565:
2819 		dspcntr |= DISPPLANE_BGRX565;
2820 		break;
2821 	case DRM_FORMAT_XRGB8888:
2822 		dspcntr |= DISPPLANE_BGRX888;
2823 		break;
2824 	case DRM_FORMAT_XBGR8888:
2825 		dspcntr |= DISPPLANE_RGBX888;
2826 		break;
2827 	case DRM_FORMAT_XRGB2101010:
2828 		dspcntr |= DISPPLANE_BGRX101010;
2829 		break;
2830 	case DRM_FORMAT_XBGR2101010:
2831 		dspcntr |= DISPPLANE_RGBX101010;
2832 		break;
2833 	default:
2834 		BUG();
2835 	}
2836 
2837 	if (obj->tiling_mode != I915_TILING_NONE)
2838 		dspcntr |= DISPPLANE_TILED;
2839 
2840 	if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2841 		dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2842 
2843 	linear_offset = y * fb->pitches[0] + x * cpp;
2844 	intel_crtc->dspaddr_offset =
2845 		intel_compute_tile_offset(&x, &y, fb, 0,
2846 					  fb->pitches[0], rotation);
2847 	linear_offset -= intel_crtc->dspaddr_offset;
2848 	if (rotation == DRM_ROTATE_180) {
2849 		dspcntr |= DISPPLANE_ROTATE_180;
2850 
2851 		if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2852 			x += (crtc_state->pipe_src_w - 1);
2853 			y += (crtc_state->pipe_src_h - 1);
2854 
2855 			/* Finding the last pixel of the last line of the display
2856 			data and adding to linear_offset*/
2857 			linear_offset +=
2858 				(crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2859 				(crtc_state->pipe_src_w - 1) * cpp;
2860 		}
2861 	}
2862 
2863 	intel_crtc->adjusted_x = x;
2864 	intel_crtc->adjusted_y = y;
2865 
2866 	I915_WRITE(reg, dspcntr);
2867 
2868 	I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2869 	I915_WRITE(DSPSURF(plane),
2870 		   i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2871 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2872 		I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2873 	} else {
2874 		I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2875 		I915_WRITE(DSPLINOFF(plane), linear_offset);
2876 	}
2877 	POSTING_READ(reg);
2878 }
2879 
2880 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2881 			      uint64_t fb_modifier, uint32_t pixel_format)
2882 {
2883 	if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2884 		return 64;
2885 	} else {
2886 		int cpp = drm_format_plane_cpp(pixel_format, 0);
2887 
2888 		return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2889 	}
2890 }
2891 
2892 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2893 			   struct drm_i915_gem_object *obj,
2894 			   unsigned int plane)
2895 {
2896 	struct i915_ggtt_view view;
2897 	struct i915_vma *vma;
2898 	u64 offset;
2899 
2900 	intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2901 				intel_plane->base.state->rotation);
2902 
2903 	vma = i915_gem_obj_to_ggtt_view(obj, &view);
2904 	if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2905 		view.type))
2906 		return -1;
2907 
2908 	offset = vma->node.start;
2909 
2910 	if (plane == 1) {
2911 		offset += vma->ggtt_view.params.rotated.uv_start_page *
2912 			  PAGE_SIZE;
2913 	}
2914 
2915 	WARN_ON(upper_32_bits(offset));
2916 
2917 	return lower_32_bits(offset);
2918 }
2919 
2920 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2921 {
2922 	struct drm_device *dev = intel_crtc->base.dev;
2923 	struct drm_i915_private *dev_priv = dev->dev_private;
2924 
2925 	I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2926 	I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2927 	I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2928 }
2929 
2930 /*
2931  * This function detaches (aka. unbinds) unused scalers in hardware
2932  */
2933 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2934 {
2935 	struct intel_crtc_scaler_state *scaler_state;
2936 	int i;
2937 
2938 	scaler_state = &intel_crtc->config->scaler_state;
2939 
2940 	/* loop through and disable scalers that aren't in use */
2941 	for (i = 0; i < intel_crtc->num_scalers; i++) {
2942 		if (!scaler_state->scalers[i].in_use)
2943 			skl_detach_scaler(intel_crtc, i);
2944 	}
2945 }
2946 
2947 u32 skl_plane_ctl_format(uint32_t pixel_format)
2948 {
2949 	switch (pixel_format) {
2950 	case DRM_FORMAT_C8:
2951 		return PLANE_CTL_FORMAT_INDEXED;
2952 	case DRM_FORMAT_RGB565:
2953 		return PLANE_CTL_FORMAT_RGB_565;
2954 	case DRM_FORMAT_XBGR8888:
2955 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
2956 	case DRM_FORMAT_XRGB8888:
2957 		return PLANE_CTL_FORMAT_XRGB_8888;
2958 	/*
2959 	 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2960 	 * to be already pre-multiplied. We need to add a knob (or a different
2961 	 * DRM_FORMAT) for user-space to configure that.
2962 	 */
2963 	case DRM_FORMAT_ABGR8888:
2964 		return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
2965 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2966 	case DRM_FORMAT_ARGB8888:
2967 		return PLANE_CTL_FORMAT_XRGB_8888 |
2968 			PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2969 	case DRM_FORMAT_XRGB2101010:
2970 		return PLANE_CTL_FORMAT_XRGB_2101010;
2971 	case DRM_FORMAT_XBGR2101010:
2972 		return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
2973 	case DRM_FORMAT_YUYV:
2974 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
2975 	case DRM_FORMAT_YVYU:
2976 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
2977 	case DRM_FORMAT_UYVY:
2978 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
2979 	case DRM_FORMAT_VYUY:
2980 		return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
2981 	default:
2982 		MISSING_CASE(pixel_format);
2983 	}
2984 
2985 	return 0;
2986 }
2987 
2988 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2989 {
2990 	switch (fb_modifier) {
2991 	case DRM_FORMAT_MOD_NONE:
2992 		break;
2993 	case I915_FORMAT_MOD_X_TILED:
2994 		return PLANE_CTL_TILED_X;
2995 	case I915_FORMAT_MOD_Y_TILED:
2996 		return PLANE_CTL_TILED_Y;
2997 	case I915_FORMAT_MOD_Yf_TILED:
2998 		return PLANE_CTL_TILED_YF;
2999 	default:
3000 		MISSING_CASE(fb_modifier);
3001 	}
3002 
3003 	return 0;
3004 }
3005 
3006 u32 skl_plane_ctl_rotation(unsigned int rotation)
3007 {
3008 	switch (rotation) {
3009 	case DRM_ROTATE_0:
3010 		break;
3011 	/*
3012 	 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3013 	 * while i915 HW rotation is clockwise, thats why this swapping.
3014 	 */
3015 	case DRM_ROTATE_90:
3016 		return PLANE_CTL_ROTATE_270;
3017 	case DRM_ROTATE_180:
3018 		return PLANE_CTL_ROTATE_180;
3019 	case DRM_ROTATE_270:
3020 		return PLANE_CTL_ROTATE_90;
3021 	default:
3022 		MISSING_CASE(rotation);
3023 	}
3024 
3025 	return 0;
3026 }
3027 
3028 static void skylake_update_primary_plane(struct drm_plane *plane,
3029 					 const struct intel_crtc_state *crtc_state,
3030 					 const struct intel_plane_state *plane_state)
3031 {
3032 	struct drm_device *dev = plane->dev;
3033 	struct drm_i915_private *dev_priv = dev->dev_private;
3034 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3035 	struct drm_framebuffer *fb = plane_state->base.fb;
3036 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3037 	int pipe = intel_crtc->pipe;
3038 	u32 plane_ctl, stride_div, stride;
3039 	u32 tile_height, plane_offset, plane_size;
3040 	unsigned int rotation = plane_state->base.rotation;
3041 	int x_offset, y_offset;
3042 	u32 surf_addr;
3043 	int scaler_id = plane_state->scaler_id;
3044 	int src_x = plane_state->src.x1 >> 16;
3045 	int src_y = plane_state->src.y1 >> 16;
3046 	int src_w = drm_rect_width(&plane_state->src) >> 16;
3047 	int src_h = drm_rect_height(&plane_state->src) >> 16;
3048 	int dst_x = plane_state->dst.x1;
3049 	int dst_y = plane_state->dst.y1;
3050 	int dst_w = drm_rect_width(&plane_state->dst);
3051 	int dst_h = drm_rect_height(&plane_state->dst);
3052 
3053 	plane_ctl = PLANE_CTL_ENABLE |
3054 		    PLANE_CTL_PIPE_GAMMA_ENABLE |
3055 		    PLANE_CTL_PIPE_CSC_ENABLE;
3056 
3057 	plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3058 	plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3059 	plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3060 	plane_ctl |= skl_plane_ctl_rotation(rotation);
3061 
3062 	stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3063 					       fb->pixel_format);
3064 	surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3065 
3066 	WARN_ON(drm_rect_width(&plane_state->src) == 0);
3067 
3068 	if (intel_rotation_90_or_270(rotation)) {
3069 		int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3070 
3071 		/* stride = Surface height in tiles */
3072 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3073 		stride = DIV_ROUND_UP(fb->height, tile_height);
3074 		x_offset = stride * tile_height - src_y - src_h;
3075 		y_offset = src_x;
3076 		plane_size = (src_w - 1) << 16 | (src_h - 1);
3077 	} else {
3078 		stride = fb->pitches[0] / stride_div;
3079 		x_offset = src_x;
3080 		y_offset = src_y;
3081 		plane_size = (src_h - 1) << 16 | (src_w - 1);
3082 	}
3083 	plane_offset = y_offset << 16 | x_offset;
3084 
3085 	intel_crtc->adjusted_x = x_offset;
3086 	intel_crtc->adjusted_y = y_offset;
3087 
3088 	I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3089 	I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3090 	I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3091 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3092 
3093 	if (scaler_id >= 0) {
3094 		uint32_t ps_ctrl = 0;
3095 
3096 		WARN_ON(!dst_w || !dst_h);
3097 		ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3098 			crtc_state->scaler_state.scalers[scaler_id].mode;
3099 		I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3100 		I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3101 		I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3102 		I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3103 		I915_WRITE(PLANE_POS(pipe, 0), 0);
3104 	} else {
3105 		I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3106 	}
3107 
3108 	I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3109 
3110 	POSTING_READ(PLANE_SURF(pipe, 0));
3111 }
3112 
3113 static void skylake_disable_primary_plane(struct drm_plane *primary,
3114 					  struct drm_crtc *crtc)
3115 {
3116 	struct drm_device *dev = crtc->dev;
3117 	struct drm_i915_private *dev_priv = dev->dev_private;
3118 	int pipe = to_intel_crtc(crtc)->pipe;
3119 
3120 	I915_WRITE(PLANE_CTL(pipe, 0), 0);
3121 	I915_WRITE(PLANE_SURF(pipe, 0), 0);
3122 	POSTING_READ(PLANE_SURF(pipe, 0));
3123 }
3124 
3125 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3126 static int
3127 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3128 			   int x, int y, enum mode_set_atomic state)
3129 {
3130 	/* Support for kgdboc is disabled, this needs a major rework. */
3131 	DRM_ERROR("legacy panic handler not supported any more.\n");
3132 
3133 	return -ENODEV;
3134 }
3135 
3136 static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3137 {
3138 	struct intel_crtc *crtc;
3139 
3140 	for_each_intel_crtc(dev_priv->dev, crtc)
3141 		intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3142 }
3143 
3144 static void intel_update_primary_planes(struct drm_device *dev)
3145 {
3146 	struct drm_crtc *crtc;
3147 
3148 	for_each_crtc(dev, crtc) {
3149 		struct intel_plane *plane = to_intel_plane(crtc->primary);
3150 		struct intel_plane_state *plane_state;
3151 
3152 		drm_modeset_lock_crtc(crtc, &plane->base);
3153 		plane_state = to_intel_plane_state(plane->base.state);
3154 
3155 		if (plane_state->visible)
3156 			plane->update_plane(&plane->base,
3157 					    to_intel_crtc_state(crtc->state),
3158 					    plane_state);
3159 
3160 		drm_modeset_unlock_crtc(crtc);
3161 	}
3162 }
3163 
3164 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3165 {
3166 	/* no reset support for gen2 */
3167 	if (IS_GEN2(dev_priv))
3168 		return;
3169 
3170 	/* reset doesn't touch the display */
3171 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3172 		return;
3173 
3174 	drm_modeset_lock_all(dev_priv->dev);
3175 	/*
3176 	 * Disabling the crtcs gracefully seems nicer. Also the
3177 	 * g33 docs say we should at least disable all the planes.
3178 	 */
3179 	intel_display_suspend(dev_priv->dev);
3180 }
3181 
3182 void intel_finish_reset(struct drm_i915_private *dev_priv)
3183 {
3184 	/*
3185 	 * Flips in the rings will be nuked by the reset,
3186 	 * so complete all pending flips so that user space
3187 	 * will get its events and not get stuck.
3188 	 */
3189 	intel_complete_page_flips(dev_priv);
3190 
3191 	/* no reset support for gen2 */
3192 	if (IS_GEN2(dev_priv))
3193 		return;
3194 
3195 	/* reset doesn't touch the display */
3196 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3197 		/*
3198 		 * Flips in the rings have been nuked by the reset,
3199 		 * so update the base address of all primary
3200 		 * planes to the the last fb to make sure we're
3201 		 * showing the correct fb after a reset.
3202 		 *
3203 		 * FIXME: Atomic will make this obsolete since we won't schedule
3204 		 * CS-based flips (which might get lost in gpu resets) any more.
3205 		 */
3206 		intel_update_primary_planes(dev_priv->dev);
3207 		return;
3208 	}
3209 
3210 	/*
3211 	 * The display has been reset as well,
3212 	 * so need a full re-initialization.
3213 	 */
3214 	intel_runtime_pm_disable_interrupts(dev_priv);
3215 	intel_runtime_pm_enable_interrupts(dev_priv);
3216 
3217 	intel_modeset_init_hw(dev_priv->dev);
3218 
3219 	spin_lock_irq(&dev_priv->irq_lock);
3220 	if (dev_priv->display.hpd_irq_setup)
3221 		dev_priv->display.hpd_irq_setup(dev_priv);
3222 	spin_unlock_irq(&dev_priv->irq_lock);
3223 
3224 	intel_display_resume(dev_priv->dev);
3225 
3226 	intel_hpd_init(dev_priv);
3227 
3228 	drm_modeset_unlock_all(dev_priv->dev);
3229 }
3230 
3231 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3232 {
3233 	struct drm_device *dev = crtc->dev;
3234 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3235 	unsigned reset_counter;
3236 	bool pending;
3237 
3238 	reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3239 	if (intel_crtc->reset_counter != reset_counter)
3240 		return false;
3241 
3242 	spin_lock_irq(&dev->event_lock);
3243 	pending = to_intel_crtc(crtc)->flip_work != NULL;
3244 	spin_unlock_irq(&dev->event_lock);
3245 
3246 	return pending;
3247 }
3248 
3249 static void intel_update_pipe_config(struct intel_crtc *crtc,
3250 				     struct intel_crtc_state *old_crtc_state)
3251 {
3252 	struct drm_device *dev = crtc->base.dev;
3253 	struct drm_i915_private *dev_priv = dev->dev_private;
3254 	struct intel_crtc_state *pipe_config =
3255 		to_intel_crtc_state(crtc->base.state);
3256 
3257 	/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3258 	crtc->base.mode = crtc->base.state->mode;
3259 
3260 	DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3261 		      old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3262 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3263 
3264 	/*
3265 	 * Update pipe size and adjust fitter if needed: the reason for this is
3266 	 * that in compute_mode_changes we check the native mode (not the pfit
3267 	 * mode) to see if we can flip rather than do a full mode set. In the
3268 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
3269 	 * pfit state, we'll end up with a big fb scanned out into the wrong
3270 	 * sized surface.
3271 	 */
3272 
3273 	I915_WRITE(PIPESRC(crtc->pipe),
3274 		   ((pipe_config->pipe_src_w - 1) << 16) |
3275 		   (pipe_config->pipe_src_h - 1));
3276 
3277 	/* on skylake this is done by detaching scalers */
3278 	if (INTEL_INFO(dev)->gen >= 9) {
3279 		skl_detach_scalers(crtc);
3280 
3281 		if (pipe_config->pch_pfit.enabled)
3282 			skylake_pfit_enable(crtc);
3283 	} else if (HAS_PCH_SPLIT(dev)) {
3284 		if (pipe_config->pch_pfit.enabled)
3285 			ironlake_pfit_enable(crtc);
3286 		else if (old_crtc_state->pch_pfit.enabled)
3287 			ironlake_pfit_disable(crtc, true);
3288 	}
3289 }
3290 
3291 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3292 {
3293 	struct drm_device *dev = crtc->dev;
3294 	struct drm_i915_private *dev_priv = dev->dev_private;
3295 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3296 	int pipe = intel_crtc->pipe;
3297 	i915_reg_t reg;
3298 	u32 temp;
3299 
3300 	/* enable normal train */
3301 	reg = FDI_TX_CTL(pipe);
3302 	temp = I915_READ(reg);
3303 	if (IS_IVYBRIDGE(dev)) {
3304 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3305 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3306 	} else {
3307 		temp &= ~FDI_LINK_TRAIN_NONE;
3308 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3309 	}
3310 	I915_WRITE(reg, temp);
3311 
3312 	reg = FDI_RX_CTL(pipe);
3313 	temp = I915_READ(reg);
3314 	if (HAS_PCH_CPT(dev)) {
3315 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3316 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3317 	} else {
3318 		temp &= ~FDI_LINK_TRAIN_NONE;
3319 		temp |= FDI_LINK_TRAIN_NONE;
3320 	}
3321 	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3322 
3323 	/* wait one idle pattern time */
3324 	POSTING_READ(reg);
3325 	udelay(1000);
3326 
3327 	/* IVB wants error correction enabled */
3328 	if (IS_IVYBRIDGE(dev))
3329 		I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3330 			   FDI_FE_ERRC_ENABLE);
3331 }
3332 
3333 /* The FDI link training functions for ILK/Ibexpeak. */
3334 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3335 {
3336 	struct drm_device *dev = crtc->dev;
3337 	struct drm_i915_private *dev_priv = dev->dev_private;
3338 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3339 	int pipe = intel_crtc->pipe;
3340 	i915_reg_t reg;
3341 	u32 temp, tries;
3342 
3343 	/* FDI needs bits from pipe first */
3344 	assert_pipe_enabled(dev_priv, pipe);
3345 
3346 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3347 	   for train result */
3348 	reg = FDI_RX_IMR(pipe);
3349 	temp = I915_READ(reg);
3350 	temp &= ~FDI_RX_SYMBOL_LOCK;
3351 	temp &= ~FDI_RX_BIT_LOCK;
3352 	I915_WRITE(reg, temp);
3353 	I915_READ(reg);
3354 	udelay(150);
3355 
3356 	/* enable CPU FDI TX and PCH FDI RX */
3357 	reg = FDI_TX_CTL(pipe);
3358 	temp = I915_READ(reg);
3359 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3360 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3361 	temp &= ~FDI_LINK_TRAIN_NONE;
3362 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3363 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3364 
3365 	reg = FDI_RX_CTL(pipe);
3366 	temp = I915_READ(reg);
3367 	temp &= ~FDI_LINK_TRAIN_NONE;
3368 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3369 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3370 
3371 	POSTING_READ(reg);
3372 	udelay(150);
3373 
3374 	/* Ironlake workaround, enable clock pointer after FDI enable*/
3375 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3376 	I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3377 		   FDI_RX_PHASE_SYNC_POINTER_EN);
3378 
3379 	reg = FDI_RX_IIR(pipe);
3380 	for (tries = 0; tries < 5; tries++) {
3381 		temp = I915_READ(reg);
3382 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3383 
3384 		if ((temp & FDI_RX_BIT_LOCK)) {
3385 			DRM_DEBUG_KMS("FDI train 1 done.\n");
3386 			I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3387 			break;
3388 		}
3389 	}
3390 	if (tries == 5)
3391 		DRM_ERROR("FDI train 1 fail!\n");
3392 
3393 	/* Train 2 */
3394 	reg = FDI_TX_CTL(pipe);
3395 	temp = I915_READ(reg);
3396 	temp &= ~FDI_LINK_TRAIN_NONE;
3397 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3398 	I915_WRITE(reg, temp);
3399 
3400 	reg = FDI_RX_CTL(pipe);
3401 	temp = I915_READ(reg);
3402 	temp &= ~FDI_LINK_TRAIN_NONE;
3403 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3404 	I915_WRITE(reg, temp);
3405 
3406 	POSTING_READ(reg);
3407 	udelay(150);
3408 
3409 	reg = FDI_RX_IIR(pipe);
3410 	for (tries = 0; tries < 5; tries++) {
3411 		temp = I915_READ(reg);
3412 		DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3413 
3414 		if (temp & FDI_RX_SYMBOL_LOCK) {
3415 			I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3416 			DRM_DEBUG_KMS("FDI train 2 done.\n");
3417 			break;
3418 		}
3419 	}
3420 	if (tries == 5)
3421 		DRM_ERROR("FDI train 2 fail!\n");
3422 
3423 	DRM_DEBUG_KMS("FDI train done\n");
3424 
3425 }
3426 
3427 static const int snb_b_fdi_train_param[] = {
3428 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3429 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3430 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3431 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3432 };
3433 
3434 /* The FDI link training functions for SNB/Cougarpoint. */
3435 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3436 {
3437 	struct drm_device *dev = crtc->dev;
3438 	struct drm_i915_private *dev_priv = dev->dev_private;
3439 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3440 	int pipe = intel_crtc->pipe;
3441 	i915_reg_t reg;
3442 	u32 temp, i, retry;
3443 
3444 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3445 	   for train result */
3446 	reg = FDI_RX_IMR(pipe);
3447 	temp = I915_READ(reg);
3448 	temp &= ~FDI_RX_SYMBOL_LOCK;
3449 	temp &= ~FDI_RX_BIT_LOCK;
3450 	I915_WRITE(reg, temp);
3451 
3452 	POSTING_READ(reg);
3453 	udelay(150);
3454 
3455 	/* enable CPU FDI TX and PCH FDI RX */
3456 	reg = FDI_TX_CTL(pipe);
3457 	temp = I915_READ(reg);
3458 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
3459 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3460 	temp &= ~FDI_LINK_TRAIN_NONE;
3461 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3462 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3463 	/* SNB-B */
3464 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3465 	I915_WRITE(reg, temp | FDI_TX_ENABLE);
3466 
3467 	I915_WRITE(FDI_RX_MISC(pipe),
3468 		   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3469 
3470 	reg = FDI_RX_CTL(pipe);
3471 	temp = I915_READ(reg);
3472 	if (HAS_PCH_CPT(dev)) {
3473 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3474 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3475 	} else {
3476 		temp &= ~FDI_LINK_TRAIN_NONE;
3477 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3478 	}
3479 	I915_WRITE(reg, temp | FDI_RX_ENABLE);
3480 
3481 	POSTING_READ(reg);
3482 	udelay(150);
3483 
3484 	for (i = 0; i < 4; i++) {
3485 		reg = FDI_TX_CTL(pipe);
3486 		temp = I915_READ(reg);
3487 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3488 		temp |= snb_b_fdi_train_param[i];
3489 		I915_WRITE(reg, temp);
3490 
3491 		POSTING_READ(reg);
3492 		udelay(500);
3493 
3494 		for (retry = 0; retry < 5; retry++) {
3495 			reg = FDI_RX_IIR(pipe);
3496 			temp = I915_READ(reg);
3497 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3498 			if (temp & FDI_RX_BIT_LOCK) {
3499 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3500 				DRM_DEBUG_KMS("FDI train 1 done.\n");
3501 				break;
3502 			}
3503 			udelay(50);
3504 		}
3505 		if (retry < 5)
3506 			break;
3507 	}
3508 	if (i == 4)
3509 		DRM_ERROR("FDI train 1 fail!\n");
3510 
3511 	/* Train 2 */
3512 	reg = FDI_TX_CTL(pipe);
3513 	temp = I915_READ(reg);
3514 	temp &= ~FDI_LINK_TRAIN_NONE;
3515 	temp |= FDI_LINK_TRAIN_PATTERN_2;
3516 	if (IS_GEN6(dev)) {
3517 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3518 		/* SNB-B */
3519 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3520 	}
3521 	I915_WRITE(reg, temp);
3522 
3523 	reg = FDI_RX_CTL(pipe);
3524 	temp = I915_READ(reg);
3525 	if (HAS_PCH_CPT(dev)) {
3526 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3527 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3528 	} else {
3529 		temp &= ~FDI_LINK_TRAIN_NONE;
3530 		temp |= FDI_LINK_TRAIN_PATTERN_2;
3531 	}
3532 	I915_WRITE(reg, temp);
3533 
3534 	POSTING_READ(reg);
3535 	udelay(150);
3536 
3537 	for (i = 0; i < 4; i++) {
3538 		reg = FDI_TX_CTL(pipe);
3539 		temp = I915_READ(reg);
3540 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3541 		temp |= snb_b_fdi_train_param[i];
3542 		I915_WRITE(reg, temp);
3543 
3544 		POSTING_READ(reg);
3545 		udelay(500);
3546 
3547 		for (retry = 0; retry < 5; retry++) {
3548 			reg = FDI_RX_IIR(pipe);
3549 			temp = I915_READ(reg);
3550 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3551 			if (temp & FDI_RX_SYMBOL_LOCK) {
3552 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3553 				DRM_DEBUG_KMS("FDI train 2 done.\n");
3554 				break;
3555 			}
3556 			udelay(50);
3557 		}
3558 		if (retry < 5)
3559 			break;
3560 	}
3561 	if (i == 4)
3562 		DRM_ERROR("FDI train 2 fail!\n");
3563 
3564 	DRM_DEBUG_KMS("FDI train done.\n");
3565 }
3566 
3567 /* Manual link training for Ivy Bridge A0 parts */
3568 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3569 {
3570 	struct drm_device *dev = crtc->dev;
3571 	struct drm_i915_private *dev_priv = dev->dev_private;
3572 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3573 	int pipe = intel_crtc->pipe;
3574 	i915_reg_t reg;
3575 	u32 temp, i, j;
3576 
3577 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3578 	   for train result */
3579 	reg = FDI_RX_IMR(pipe);
3580 	temp = I915_READ(reg);
3581 	temp &= ~FDI_RX_SYMBOL_LOCK;
3582 	temp &= ~FDI_RX_BIT_LOCK;
3583 	I915_WRITE(reg, temp);
3584 
3585 	POSTING_READ(reg);
3586 	udelay(150);
3587 
3588 	DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3589 		      I915_READ(FDI_RX_IIR(pipe)));
3590 
3591 	/* Try each vswing and preemphasis setting twice before moving on */
3592 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3593 		/* disable first in case we need to retry */
3594 		reg = FDI_TX_CTL(pipe);
3595 		temp = I915_READ(reg);
3596 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3597 		temp &= ~FDI_TX_ENABLE;
3598 		I915_WRITE(reg, temp);
3599 
3600 		reg = FDI_RX_CTL(pipe);
3601 		temp = I915_READ(reg);
3602 		temp &= ~FDI_LINK_TRAIN_AUTO;
3603 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3604 		temp &= ~FDI_RX_ENABLE;
3605 		I915_WRITE(reg, temp);
3606 
3607 		/* enable CPU FDI TX and PCH FDI RX */
3608 		reg = FDI_TX_CTL(pipe);
3609 		temp = I915_READ(reg);
3610 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
3611 		temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3612 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3613 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3614 		temp |= snb_b_fdi_train_param[j/2];
3615 		temp |= FDI_COMPOSITE_SYNC;
3616 		I915_WRITE(reg, temp | FDI_TX_ENABLE);
3617 
3618 		I915_WRITE(FDI_RX_MISC(pipe),
3619 			   FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3620 
3621 		reg = FDI_RX_CTL(pipe);
3622 		temp = I915_READ(reg);
3623 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3624 		temp |= FDI_COMPOSITE_SYNC;
3625 		I915_WRITE(reg, temp | FDI_RX_ENABLE);
3626 
3627 		POSTING_READ(reg);
3628 		udelay(1); /* should be 0.5us */
3629 
3630 		for (i = 0; i < 4; i++) {
3631 			reg = FDI_RX_IIR(pipe);
3632 			temp = I915_READ(reg);
3633 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3634 
3635 			if (temp & FDI_RX_BIT_LOCK ||
3636 			    (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3637 				I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3638 				DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3639 					      i);
3640 				break;
3641 			}
3642 			udelay(1); /* should be 0.5us */
3643 		}
3644 		if (i == 4) {
3645 			DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3646 			continue;
3647 		}
3648 
3649 		/* Train 2 */
3650 		reg = FDI_TX_CTL(pipe);
3651 		temp = I915_READ(reg);
3652 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3653 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3654 		I915_WRITE(reg, temp);
3655 
3656 		reg = FDI_RX_CTL(pipe);
3657 		temp = I915_READ(reg);
3658 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3659 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3660 		I915_WRITE(reg, temp);
3661 
3662 		POSTING_READ(reg);
3663 		udelay(2); /* should be 1.5us */
3664 
3665 		for (i = 0; i < 4; i++) {
3666 			reg = FDI_RX_IIR(pipe);
3667 			temp = I915_READ(reg);
3668 			DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3669 
3670 			if (temp & FDI_RX_SYMBOL_LOCK ||
3671 			    (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3672 				I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3673 				DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3674 					      i);
3675 				goto train_done;
3676 			}
3677 			udelay(2); /* should be 1.5us */
3678 		}
3679 		if (i == 4)
3680 			DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3681 	}
3682 
3683 train_done:
3684 	DRM_DEBUG_KMS("FDI train done.\n");
3685 }
3686 
3687 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3688 {
3689 	struct drm_device *dev = intel_crtc->base.dev;
3690 	struct drm_i915_private *dev_priv = dev->dev_private;
3691 	int pipe = intel_crtc->pipe;
3692 	i915_reg_t reg;
3693 	u32 temp;
3694 
3695 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3696 	reg = FDI_RX_CTL(pipe);
3697 	temp = I915_READ(reg);
3698 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3699 	temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3700 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3701 	I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3702 
3703 	POSTING_READ(reg);
3704 	udelay(200);
3705 
3706 	/* Switch from Rawclk to PCDclk */
3707 	temp = I915_READ(reg);
3708 	I915_WRITE(reg, temp | FDI_PCDCLK);
3709 
3710 	POSTING_READ(reg);
3711 	udelay(200);
3712 
3713 	/* Enable CPU FDI TX PLL, always on for Ironlake */
3714 	reg = FDI_TX_CTL(pipe);
3715 	temp = I915_READ(reg);
3716 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3717 		I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3718 
3719 		POSTING_READ(reg);
3720 		udelay(100);
3721 	}
3722 }
3723 
3724 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3725 {
3726 	struct drm_device *dev = intel_crtc->base.dev;
3727 	struct drm_i915_private *dev_priv = dev->dev_private;
3728 	int pipe = intel_crtc->pipe;
3729 	i915_reg_t reg;
3730 	u32 temp;
3731 
3732 	/* Switch from PCDclk to Rawclk */
3733 	reg = FDI_RX_CTL(pipe);
3734 	temp = I915_READ(reg);
3735 	I915_WRITE(reg, temp & ~FDI_PCDCLK);
3736 
3737 	/* Disable CPU FDI TX PLL */
3738 	reg = FDI_TX_CTL(pipe);
3739 	temp = I915_READ(reg);
3740 	I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3741 
3742 	POSTING_READ(reg);
3743 	udelay(100);
3744 
3745 	reg = FDI_RX_CTL(pipe);
3746 	temp = I915_READ(reg);
3747 	I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3748 
3749 	/* Wait for the clocks to turn off. */
3750 	POSTING_READ(reg);
3751 	udelay(100);
3752 }
3753 
3754 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3755 {
3756 	struct drm_device *dev = crtc->dev;
3757 	struct drm_i915_private *dev_priv = dev->dev_private;
3758 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3759 	int pipe = intel_crtc->pipe;
3760 	i915_reg_t reg;
3761 	u32 temp;
3762 
3763 	/* disable CPU FDI tx and PCH FDI rx */
3764 	reg = FDI_TX_CTL(pipe);
3765 	temp = I915_READ(reg);
3766 	I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3767 	POSTING_READ(reg);
3768 
3769 	reg = FDI_RX_CTL(pipe);
3770 	temp = I915_READ(reg);
3771 	temp &= ~(0x7 << 16);
3772 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3773 	I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3774 
3775 	POSTING_READ(reg);
3776 	udelay(100);
3777 
3778 	/* Ironlake workaround, disable clock pointer after downing FDI */
3779 	if (HAS_PCH_IBX(dev))
3780 		I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3781 
3782 	/* still set train pattern 1 */
3783 	reg = FDI_TX_CTL(pipe);
3784 	temp = I915_READ(reg);
3785 	temp &= ~FDI_LINK_TRAIN_NONE;
3786 	temp |= FDI_LINK_TRAIN_PATTERN_1;
3787 	I915_WRITE(reg, temp);
3788 
3789 	reg = FDI_RX_CTL(pipe);
3790 	temp = I915_READ(reg);
3791 	if (HAS_PCH_CPT(dev)) {
3792 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3793 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3794 	} else {
3795 		temp &= ~FDI_LINK_TRAIN_NONE;
3796 		temp |= FDI_LINK_TRAIN_PATTERN_1;
3797 	}
3798 	/* BPC in FDI rx is consistent with that in PIPECONF */
3799 	temp &= ~(0x07 << 16);
3800 	temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3801 	I915_WRITE(reg, temp);
3802 
3803 	POSTING_READ(reg);
3804 	udelay(100);
3805 }
3806 
3807 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3808 {
3809 	struct intel_crtc *crtc;
3810 
3811 	/* Note that we don't need to be called with mode_config.lock here
3812 	 * as our list of CRTC objects is static for the lifetime of the
3813 	 * device and so cannot disappear as we iterate. Similarly, we can
3814 	 * happily treat the predicates as racy, atomic checks as userspace
3815 	 * cannot claim and pin a new fb without at least acquring the
3816 	 * struct_mutex and so serialising with us.
3817 	 */
3818 	for_each_intel_crtc(dev, crtc) {
3819 		if (atomic_read(&crtc->unpin_work_count) == 0)
3820 			continue;
3821 
3822 		if (crtc->flip_work)
3823 			intel_wait_for_vblank(dev, crtc->pipe);
3824 
3825 		return true;
3826 	}
3827 
3828 	return false;
3829 }
3830 
3831 static void page_flip_completed(struct intel_crtc *intel_crtc)
3832 {
3833 	struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3834 	struct intel_flip_work *work = intel_crtc->flip_work;
3835 
3836 	intel_crtc->flip_work = NULL;
3837 
3838 	if (work->event)
3839 		drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3840 
3841 	drm_crtc_vblank_put(&intel_crtc->base);
3842 
3843 	wake_up_all(&dev_priv->pending_flip_queue);
3844 	queue_work(dev_priv->wq, &work->unpin_work);
3845 
3846 	trace_i915_flip_complete(intel_crtc->plane,
3847 				 work->pending_flip_obj);
3848 }
3849 
3850 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3851 {
3852 	struct drm_device *dev = crtc->dev;
3853 	struct drm_i915_private *dev_priv = dev->dev_private;
3854 	long ret;
3855 
3856 	WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3857 
3858 	ret = wait_event_interruptible_timeout(
3859 					dev_priv->pending_flip_queue,
3860 					!intel_crtc_has_pending_flip(crtc),
3861 					60*HZ);
3862 
3863 	if (ret < 0)
3864 		return ret;
3865 
3866 	if (ret == 0) {
3867 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3868 		struct intel_flip_work *work;
3869 
3870 		spin_lock_irq(&dev->event_lock);
3871 		work = intel_crtc->flip_work;
3872 		if (work && !is_mmio_work(work)) {
3873 			WARN_ONCE(1, "Removing stuck page flip\n");
3874 			page_flip_completed(intel_crtc);
3875 		}
3876 		spin_unlock_irq(&dev->event_lock);
3877 	}
3878 
3879 	return 0;
3880 }
3881 
3882 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3883 {
3884 	u32 temp;
3885 
3886 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3887 
3888 	mutex_lock(&dev_priv->sb_lock);
3889 
3890 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3891 	temp |= SBI_SSCCTL_DISABLE;
3892 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3893 
3894 	mutex_unlock(&dev_priv->sb_lock);
3895 }
3896 
3897 /* Program iCLKIP clock to the desired frequency */
3898 static void lpt_program_iclkip(struct drm_crtc *crtc)
3899 {
3900 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3901 	int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3902 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
3903 	u32 temp;
3904 
3905 	lpt_disable_iclkip(dev_priv);
3906 
3907 	/* The iCLK virtual clock root frequency is in MHz,
3908 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
3909 	 * divisors, it is necessary to divide one by another, so we
3910 	 * convert the virtual clock precision to KHz here for higher
3911 	 * precision.
3912 	 */
3913 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3914 		u32 iclk_virtual_root_freq = 172800 * 1000;
3915 		u32 iclk_pi_range = 64;
3916 		u32 desired_divisor;
3917 
3918 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3919 						    clock << auxdiv);
3920 		divsel = (desired_divisor / iclk_pi_range) - 2;
3921 		phaseinc = desired_divisor % iclk_pi_range;
3922 
3923 		/*
3924 		 * Near 20MHz is a corner case which is
3925 		 * out of range for the 7-bit divisor
3926 		 */
3927 		if (divsel <= 0x7f)
3928 			break;
3929 	}
3930 
3931 	/* This should not happen with any sane values */
3932 	WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3933 		~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3934 	WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3935 		~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3936 
3937 	DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3938 			clock,
3939 			auxdiv,
3940 			divsel,
3941 			phasedir,
3942 			phaseinc);
3943 
3944 	mutex_lock(&dev_priv->sb_lock);
3945 
3946 	/* Program SSCDIVINTPHASE6 */
3947 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3948 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3949 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3950 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3951 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3952 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3953 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3954 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3955 
3956 	/* Program SSCAUXDIV */
3957 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3958 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3959 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3960 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3961 
3962 	/* Enable modulator and associated divider */
3963 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3964 	temp &= ~SBI_SSCCTL_DISABLE;
3965 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3966 
3967 	mutex_unlock(&dev_priv->sb_lock);
3968 
3969 	/* Wait for initialization time */
3970 	udelay(24);
3971 
3972 	I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3973 }
3974 
3975 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3976 {
3977 	u32 divsel, phaseinc, auxdiv;
3978 	u32 iclk_virtual_root_freq = 172800 * 1000;
3979 	u32 iclk_pi_range = 64;
3980 	u32 desired_divisor;
3981 	u32 temp;
3982 
3983 	if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3984 		return 0;
3985 
3986 	mutex_lock(&dev_priv->sb_lock);
3987 
3988 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3989 	if (temp & SBI_SSCCTL_DISABLE) {
3990 		mutex_unlock(&dev_priv->sb_lock);
3991 		return 0;
3992 	}
3993 
3994 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3995 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3996 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3997 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3998 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3999 
4000 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4001 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
4002 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
4003 
4004 	mutex_unlock(&dev_priv->sb_lock);
4005 
4006 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
4007 
4008 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4009 				 desired_divisor << auxdiv);
4010 }
4011 
4012 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4013 						enum i915_pipe pch_transcoder)
4014 {
4015 	struct drm_device *dev = crtc->base.dev;
4016 	struct drm_i915_private *dev_priv = dev->dev_private;
4017 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4018 
4019 	I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4020 		   I915_READ(HTOTAL(cpu_transcoder)));
4021 	I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4022 		   I915_READ(HBLANK(cpu_transcoder)));
4023 	I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4024 		   I915_READ(HSYNC(cpu_transcoder)));
4025 
4026 	I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4027 		   I915_READ(VTOTAL(cpu_transcoder)));
4028 	I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4029 		   I915_READ(VBLANK(cpu_transcoder)));
4030 	I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4031 		   I915_READ(VSYNC(cpu_transcoder)));
4032 	I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4033 		   I915_READ(VSYNCSHIFT(cpu_transcoder)));
4034 }
4035 
4036 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4037 {
4038 	struct drm_i915_private *dev_priv = dev->dev_private;
4039 	uint32_t temp;
4040 
4041 	temp = I915_READ(SOUTH_CHICKEN1);
4042 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4043 		return;
4044 
4045 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4046 	WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4047 
4048 	temp &= ~FDI_BC_BIFURCATION_SELECT;
4049 	if (enable)
4050 		temp |= FDI_BC_BIFURCATION_SELECT;
4051 
4052 	DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4053 	I915_WRITE(SOUTH_CHICKEN1, temp);
4054 	POSTING_READ(SOUTH_CHICKEN1);
4055 }
4056 
4057 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4058 {
4059 	struct drm_device *dev = intel_crtc->base.dev;
4060 
4061 	switch (intel_crtc->pipe) {
4062 	case PIPE_A:
4063 		break;
4064 	case PIPE_B:
4065 		if (intel_crtc->config->fdi_lanes > 2)
4066 			cpt_set_fdi_bc_bifurcation(dev, false);
4067 		else
4068 			cpt_set_fdi_bc_bifurcation(dev, true);
4069 
4070 		break;
4071 	case PIPE_C:
4072 		cpt_set_fdi_bc_bifurcation(dev, true);
4073 
4074 		break;
4075 	default:
4076 		BUG();
4077 	}
4078 }
4079 
4080 /* Return which DP Port should be selected for Transcoder DP control */
4081 static enum port
4082 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4083 {
4084 	struct drm_device *dev = crtc->dev;
4085 	struct intel_encoder *encoder;
4086 
4087 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4088 		if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4089 		    encoder->type == INTEL_OUTPUT_EDP)
4090 			return enc_to_dig_port(&encoder->base)->port;
4091 	}
4092 
4093 	return -1;
4094 }
4095 
4096 /*
4097  * Enable PCH resources required for PCH ports:
4098  *   - PCH PLLs
4099  *   - FDI training & RX/TX
4100  *   - update transcoder timings
4101  *   - DP transcoding bits
4102  *   - transcoder
4103  */
4104 static void ironlake_pch_enable(struct drm_crtc *crtc)
4105 {
4106 	struct drm_device *dev = crtc->dev;
4107 	struct drm_i915_private *dev_priv = dev->dev_private;
4108 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4109 	int pipe = intel_crtc->pipe;
4110 	u32 temp;
4111 
4112 	assert_pch_transcoder_disabled(dev_priv, pipe);
4113 
4114 	if (IS_IVYBRIDGE(dev))
4115 		ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4116 
4117 	/* Write the TU size bits before fdi link training, so that error
4118 	 * detection works. */
4119 	I915_WRITE(FDI_RX_TUSIZE1(pipe),
4120 		   I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4121 
4122 	/* For PCH output, training FDI link */
4123 	dev_priv->display.fdi_link_train(crtc);
4124 
4125 	/* We need to program the right clock selection before writing the pixel
4126 	 * mutliplier into the DPLL. */
4127 	if (HAS_PCH_CPT(dev)) {
4128 		u32 sel;
4129 
4130 		temp = I915_READ(PCH_DPLL_SEL);
4131 		temp |= TRANS_DPLL_ENABLE(pipe);
4132 		sel = TRANS_DPLLB_SEL(pipe);
4133 		if (intel_crtc->config->shared_dpll ==
4134 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4135 			temp |= sel;
4136 		else
4137 			temp &= ~sel;
4138 		I915_WRITE(PCH_DPLL_SEL, temp);
4139 	}
4140 
4141 	/* XXX: pch pll's can be enabled any time before we enable the PCH
4142 	 * transcoder, and we actually should do this to not upset any PCH
4143 	 * transcoder that already use the clock when we share it.
4144 	 *
4145 	 * Note that enable_shared_dpll tries to do the right thing, but
4146 	 * get_shared_dpll unconditionally resets the pll - we need that to have
4147 	 * the right LVDS enable sequence. */
4148 	intel_enable_shared_dpll(intel_crtc);
4149 
4150 	/* set transcoder timing, panel must allow it */
4151 	assert_panel_unlocked(dev_priv, pipe);
4152 	ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4153 
4154 	intel_fdi_normal_train(crtc);
4155 
4156 	/* For PCH DP, enable TRANS_DP_CTL */
4157 	if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4158 		const struct drm_display_mode *adjusted_mode =
4159 			&intel_crtc->config->base.adjusted_mode;
4160 		u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4161 		i915_reg_t reg = TRANS_DP_CTL(pipe);
4162 		temp = I915_READ(reg);
4163 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
4164 			  TRANS_DP_SYNC_MASK |
4165 			  TRANS_DP_BPC_MASK);
4166 		temp |= TRANS_DP_OUTPUT_ENABLE;
4167 		temp |= bpc << 9; /* same format but at 11:9 */
4168 
4169 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4170 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4171 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4172 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4173 
4174 		switch (intel_trans_dp_port_sel(crtc)) {
4175 		case PORT_B:
4176 			temp |= TRANS_DP_PORT_SEL_B;
4177 			break;
4178 		case PORT_C:
4179 			temp |= TRANS_DP_PORT_SEL_C;
4180 			break;
4181 		case PORT_D:
4182 			temp |= TRANS_DP_PORT_SEL_D;
4183 			break;
4184 		default:
4185 			BUG();
4186 		}
4187 
4188 		I915_WRITE(reg, temp);
4189 	}
4190 
4191 	ironlake_enable_pch_transcoder(dev_priv, pipe);
4192 }
4193 
4194 static void lpt_pch_enable(struct drm_crtc *crtc)
4195 {
4196 	struct drm_device *dev = crtc->dev;
4197 	struct drm_i915_private *dev_priv = dev->dev_private;
4198 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4199 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4200 
4201 	assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4202 
4203 	lpt_program_iclkip(crtc);
4204 
4205 	/* Set transcoder timing. */
4206 	ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4207 
4208 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4209 }
4210 
4211 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4212 {
4213 	struct drm_i915_private *dev_priv = dev->dev_private;
4214 	i915_reg_t dslreg = PIPEDSL(pipe);
4215 	u32 temp;
4216 
4217 	temp = I915_READ(dslreg);
4218 	udelay(500);
4219 	if (wait_for(I915_READ(dslreg) != temp, 5)) {
4220 		if (wait_for(I915_READ(dslreg) != temp, 5))
4221 			DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4222 	}
4223 }
4224 
4225 static int
4226 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4227 		  unsigned scaler_user, int *scaler_id, unsigned int rotation,
4228 		  int src_w, int src_h, int dst_w, int dst_h)
4229 {
4230 	struct intel_crtc_scaler_state *scaler_state =
4231 		&crtc_state->scaler_state;
4232 	struct intel_crtc *intel_crtc =
4233 		to_intel_crtc(crtc_state->base.crtc);
4234 	int need_scaling;
4235 
4236 	need_scaling = intel_rotation_90_or_270(rotation) ?
4237 		(src_h != dst_w || src_w != dst_h):
4238 		(src_w != dst_w || src_h != dst_h);
4239 
4240 	/*
4241 	 * if plane is being disabled or scaler is no more required or force detach
4242 	 *  - free scaler binded to this plane/crtc
4243 	 *  - in order to do this, update crtc->scaler_usage
4244 	 *
4245 	 * Here scaler state in crtc_state is set free so that
4246 	 * scaler can be assigned to other user. Actual register
4247 	 * update to free the scaler is done in plane/panel-fit programming.
4248 	 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4249 	 */
4250 	if (force_detach || !need_scaling) {
4251 		if (*scaler_id >= 0) {
4252 			scaler_state->scaler_users &= ~(1 << scaler_user);
4253 			scaler_state->scalers[*scaler_id].in_use = 0;
4254 
4255 			DRM_DEBUG_KMS("scaler_user index %u.%u: "
4256 				"Staged freeing scaler id %d scaler_users = 0x%x\n",
4257 				intel_crtc->pipe, scaler_user, *scaler_id,
4258 				scaler_state->scaler_users);
4259 			*scaler_id = -1;
4260 		}
4261 		return 0;
4262 	}
4263 
4264 	/* range checks */
4265 	if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4266 		dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4267 
4268 		src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4269 		dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4270 		DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4271 			"size is out of scaler range\n",
4272 			intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4273 		return -EINVAL;
4274 	}
4275 
4276 	/* mark this plane as a scaler user in crtc_state */
4277 	scaler_state->scaler_users |= (1 << scaler_user);
4278 	DRM_DEBUG_KMS("scaler_user index %u.%u: "
4279 		"staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4280 		intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4281 		scaler_state->scaler_users);
4282 
4283 	return 0;
4284 }
4285 
4286 /**
4287  * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4288  *
4289  * @state: crtc's scaler state
4290  *
4291  * Return
4292  *     0 - scaler_usage updated successfully
4293  *    error - requested scaling cannot be supported or other error condition
4294  */
4295 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4296 {
4297 	struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4298 	const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4299 
4300 	DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4301 		      intel_crtc->base.base.id, intel_crtc->base.name,
4302 		      intel_crtc->pipe, SKL_CRTC_INDEX);
4303 
4304 	return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4305 		&state->scaler_state.scaler_id, DRM_ROTATE_0,
4306 		state->pipe_src_w, state->pipe_src_h,
4307 		adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4308 }
4309 
4310 /**
4311  * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4312  *
4313  * @state: crtc's scaler state
4314  * @plane_state: atomic plane state to update
4315  *
4316  * Return
4317  *     0 - scaler_usage updated successfully
4318  *    error - requested scaling cannot be supported or other error condition
4319  */
4320 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4321 				   struct intel_plane_state *plane_state)
4322 {
4323 
4324 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4325 	struct intel_plane *intel_plane =
4326 		to_intel_plane(plane_state->base.plane);
4327 	struct drm_framebuffer *fb = plane_state->base.fb;
4328 	int ret;
4329 
4330 	bool force_detach = !fb || !plane_state->visible;
4331 
4332 	DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4333 		      intel_plane->base.base.id, intel_plane->base.name,
4334 		      intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4335 
4336 	ret = skl_update_scaler(crtc_state, force_detach,
4337 				drm_plane_index(&intel_plane->base),
4338 				&plane_state->scaler_id,
4339 				plane_state->base.rotation,
4340 				drm_rect_width(&plane_state->src) >> 16,
4341 				drm_rect_height(&plane_state->src) >> 16,
4342 				drm_rect_width(&plane_state->dst),
4343 				drm_rect_height(&plane_state->dst));
4344 
4345 	if (ret || plane_state->scaler_id < 0)
4346 		return ret;
4347 
4348 	/* check colorkey */
4349 	if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4350 		DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4351 			      intel_plane->base.base.id,
4352 			      intel_plane->base.name);
4353 		return -EINVAL;
4354 	}
4355 
4356 	/* Check src format */
4357 	switch (fb->pixel_format) {
4358 	case DRM_FORMAT_RGB565:
4359 	case DRM_FORMAT_XBGR8888:
4360 	case DRM_FORMAT_XRGB8888:
4361 	case DRM_FORMAT_ABGR8888:
4362 	case DRM_FORMAT_ARGB8888:
4363 	case DRM_FORMAT_XRGB2101010:
4364 	case DRM_FORMAT_XBGR2101010:
4365 	case DRM_FORMAT_YUYV:
4366 	case DRM_FORMAT_YVYU:
4367 	case DRM_FORMAT_UYVY:
4368 	case DRM_FORMAT_VYUY:
4369 		break;
4370 	default:
4371 		DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4372 			      intel_plane->base.base.id, intel_plane->base.name,
4373 			      fb->base.id, fb->pixel_format);
4374 		return -EINVAL;
4375 	}
4376 
4377 	return 0;
4378 }
4379 
4380 static void skylake_scaler_disable(struct intel_crtc *crtc)
4381 {
4382 	int i;
4383 
4384 	for (i = 0; i < crtc->num_scalers; i++)
4385 		skl_detach_scaler(crtc, i);
4386 }
4387 
4388 static void skylake_pfit_enable(struct intel_crtc *crtc)
4389 {
4390 	struct drm_device *dev = crtc->base.dev;
4391 	struct drm_i915_private *dev_priv = dev->dev_private;
4392 	int pipe = crtc->pipe;
4393 	struct intel_crtc_scaler_state *scaler_state =
4394 		&crtc->config->scaler_state;
4395 
4396 	DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4397 
4398 	if (crtc->config->pch_pfit.enabled) {
4399 		int id;
4400 
4401 		if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4402 			DRM_ERROR("Requesting pfit without getting a scaler first\n");
4403 			return;
4404 		}
4405 
4406 		id = scaler_state->scaler_id;
4407 		I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4408 			PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4409 		I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4410 		I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4411 
4412 		DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4413 	}
4414 }
4415 
4416 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4417 {
4418 	struct drm_device *dev = crtc->base.dev;
4419 	struct drm_i915_private *dev_priv = dev->dev_private;
4420 	int pipe = crtc->pipe;
4421 
4422 	if (crtc->config->pch_pfit.enabled) {
4423 		/* Force use of hard-coded filter coefficients
4424 		 * as some pre-programmed values are broken,
4425 		 * e.g. x201.
4426 		 */
4427 		if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4428 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4429 						 PF_PIPE_SEL_IVB(pipe));
4430 		else
4431 			I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4432 		I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4433 		I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4434 	}
4435 }
4436 
4437 void hsw_enable_ips(struct intel_crtc *crtc)
4438 {
4439 	struct drm_device *dev = crtc->base.dev;
4440 	struct drm_i915_private *dev_priv = dev->dev_private;
4441 
4442 	if (!crtc->config->ips_enabled)
4443 		return;
4444 
4445 	/*
4446 	 * We can only enable IPS after we enable a plane and wait for a vblank
4447 	 * This function is called from post_plane_update, which is run after
4448 	 * a vblank wait.
4449 	 */
4450 
4451 	assert_plane_enabled(dev_priv, crtc->plane);
4452 	if (IS_BROADWELL(dev)) {
4453 		mutex_lock(&dev_priv->rps.hw_lock);
4454 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4455 		mutex_unlock(&dev_priv->rps.hw_lock);
4456 		/* Quoting Art Runyan: "its not safe to expect any particular
4457 		 * value in IPS_CTL bit 31 after enabling IPS through the
4458 		 * mailbox." Moreover, the mailbox may return a bogus state,
4459 		 * so we need to just enable it and continue on.
4460 		 */
4461 	} else {
4462 		I915_WRITE(IPS_CTL, IPS_ENABLE);
4463 		/* The bit only becomes 1 in the next vblank, so this wait here
4464 		 * is essentially intel_wait_for_vblank. If we don't have this
4465 		 * and don't wait for vblanks until the end of crtc_enable, then
4466 		 * the HW state readout code will complain that the expected
4467 		 * IPS_CTL value is not the one we read. */
4468 		if (intel_wait_for_register(dev_priv,
4469 					    IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4470 					    50))
4471 			DRM_ERROR("Timed out waiting for IPS enable\n");
4472 	}
4473 }
4474 
4475 void hsw_disable_ips(struct intel_crtc *crtc)
4476 {
4477 	struct drm_device *dev = crtc->base.dev;
4478 	struct drm_i915_private *dev_priv = dev->dev_private;
4479 
4480 	if (!crtc->config->ips_enabled)
4481 		return;
4482 
4483 	assert_plane_enabled(dev_priv, crtc->plane);
4484 	if (IS_BROADWELL(dev)) {
4485 		mutex_lock(&dev_priv->rps.hw_lock);
4486 		WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4487 		mutex_unlock(&dev_priv->rps.hw_lock);
4488 		/* wait for pcode to finish disabling IPS, which may take up to 42ms */
4489 		if (intel_wait_for_register(dev_priv,
4490 					    IPS_CTL, IPS_ENABLE, 0,
4491 					    42))
4492 			DRM_ERROR("Timed out waiting for IPS disable\n");
4493 	} else {
4494 		I915_WRITE(IPS_CTL, 0);
4495 		POSTING_READ(IPS_CTL);
4496 	}
4497 
4498 	/* We need to wait for a vblank before we can disable the plane. */
4499 	intel_wait_for_vblank(dev, crtc->pipe);
4500 }
4501 
4502 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4503 {
4504 	if (intel_crtc->overlay) {
4505 		struct drm_device *dev = intel_crtc->base.dev;
4506 		struct drm_i915_private *dev_priv = dev->dev_private;
4507 
4508 		mutex_lock(&dev->struct_mutex);
4509 		dev_priv->mm.interruptible = false;
4510 		(void) intel_overlay_switch_off(intel_crtc->overlay);
4511 		dev_priv->mm.interruptible = true;
4512 		mutex_unlock(&dev->struct_mutex);
4513 	}
4514 
4515 	/* Let userspace switch the overlay on again. In most cases userspace
4516 	 * has to recompute where to put it anyway.
4517 	 */
4518 }
4519 
4520 /**
4521  * intel_post_enable_primary - Perform operations after enabling primary plane
4522  * @crtc: the CRTC whose primary plane was just enabled
4523  *
4524  * Performs potentially sleeping operations that must be done after the primary
4525  * plane is enabled, such as updating FBC and IPS.  Note that this may be
4526  * called due to an explicit primary plane update, or due to an implicit
4527  * re-enable that is caused when a sprite plane is updated to no longer
4528  * completely hide the primary plane.
4529  */
4530 static void
4531 intel_post_enable_primary(struct drm_crtc *crtc)
4532 {
4533 	struct drm_device *dev = crtc->dev;
4534 	struct drm_i915_private *dev_priv = dev->dev_private;
4535 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4536 	int pipe = intel_crtc->pipe;
4537 
4538 	/*
4539 	 * FIXME IPS should be fine as long as one plane is
4540 	 * enabled, but in practice it seems to have problems
4541 	 * when going from primary only to sprite only and vice
4542 	 * versa.
4543 	 */
4544 	hsw_enable_ips(intel_crtc);
4545 
4546 	/*
4547 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4548 	 * So don't enable underrun reporting before at least some planes
4549 	 * are enabled.
4550 	 * FIXME: Need to fix the logic to work when we turn off all planes
4551 	 * but leave the pipe running.
4552 	 */
4553 	if (IS_GEN2(dev))
4554 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4555 
4556 	/* Underruns don't always raise interrupts, so check manually. */
4557 	intel_check_cpu_fifo_underruns(dev_priv);
4558 	intel_check_pch_fifo_underruns(dev_priv);
4559 }
4560 
4561 /* FIXME move all this to pre_plane_update() with proper state tracking */
4562 static void
4563 intel_pre_disable_primary(struct drm_crtc *crtc)
4564 {
4565 	struct drm_device *dev = crtc->dev;
4566 	struct drm_i915_private *dev_priv = dev->dev_private;
4567 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4568 	int pipe = intel_crtc->pipe;
4569 
4570 	/*
4571 	 * Gen2 reports pipe underruns whenever all planes are disabled.
4572 	 * So diasble underrun reporting before all the planes get disabled.
4573 	 * FIXME: Need to fix the logic to work when we turn off all planes
4574 	 * but leave the pipe running.
4575 	 */
4576 	if (IS_GEN2(dev))
4577 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4578 
4579 	/*
4580 	 * FIXME IPS should be fine as long as one plane is
4581 	 * enabled, but in practice it seems to have problems
4582 	 * when going from primary only to sprite only and vice
4583 	 * versa.
4584 	 */
4585 	hsw_disable_ips(intel_crtc);
4586 }
4587 
4588 /* FIXME get rid of this and use pre_plane_update */
4589 static void
4590 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4591 {
4592 	struct drm_device *dev = crtc->dev;
4593 	struct drm_i915_private *dev_priv = dev->dev_private;
4594 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4595 	int pipe = intel_crtc->pipe;
4596 
4597 	intel_pre_disable_primary(crtc);
4598 
4599 	/*
4600 	 * Vblank time updates from the shadow to live plane control register
4601 	 * are blocked if the memory self-refresh mode is active at that
4602 	 * moment. So to make sure the plane gets truly disabled, disable
4603 	 * first the self-refresh mode. The self-refresh enable bit in turn
4604 	 * will be checked/applied by the HW only at the next frame start
4605 	 * event which is after the vblank start event, so we need to have a
4606 	 * wait-for-vblank between disabling the plane and the pipe.
4607 	 */
4608 	if (HAS_GMCH_DISPLAY(dev)) {
4609 		intel_set_memory_cxsr(dev_priv, false);
4610 		dev_priv->wm.vlv.cxsr = false;
4611 		intel_wait_for_vblank(dev, pipe);
4612 	}
4613 }
4614 
4615 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4616 {
4617 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4618 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4619 	struct intel_crtc_state *pipe_config =
4620 		to_intel_crtc_state(crtc->base.state);
4621 	struct drm_device *dev = crtc->base.dev;
4622 	struct drm_plane *primary = crtc->base.primary;
4623 	struct drm_plane_state *old_pri_state =
4624 		drm_atomic_get_existing_plane_state(old_state, primary);
4625 
4626 	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4627 
4628 	crtc->wm.cxsr_allowed = true;
4629 
4630 	if (pipe_config->update_wm_post && pipe_config->base.active)
4631 		intel_update_watermarks(&crtc->base);
4632 
4633 	if (old_pri_state) {
4634 		struct intel_plane_state *primary_state =
4635 			to_intel_plane_state(primary->state);
4636 		struct intel_plane_state *old_primary_state =
4637 			to_intel_plane_state(old_pri_state);
4638 
4639 		intel_fbc_post_update(crtc);
4640 
4641 		if (primary_state->visible &&
4642 		    (needs_modeset(&pipe_config->base) ||
4643 		     !old_primary_state->visible))
4644 			intel_post_enable_primary(&crtc->base);
4645 	}
4646 }
4647 
4648 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4649 {
4650 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4651 	struct drm_device *dev = crtc->base.dev;
4652 	struct drm_i915_private *dev_priv = dev->dev_private;
4653 	struct intel_crtc_state *pipe_config =
4654 		to_intel_crtc_state(crtc->base.state);
4655 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
4656 	struct drm_plane *primary = crtc->base.primary;
4657 	struct drm_plane_state *old_pri_state =
4658 		drm_atomic_get_existing_plane_state(old_state, primary);
4659 	bool modeset = needs_modeset(&pipe_config->base);
4660 
4661 	if (old_pri_state) {
4662 		struct intel_plane_state *primary_state =
4663 			to_intel_plane_state(primary->state);
4664 		struct intel_plane_state *old_primary_state =
4665 			to_intel_plane_state(old_pri_state);
4666 
4667 		intel_fbc_pre_update(crtc, pipe_config, primary_state);
4668 
4669 		if (old_primary_state->visible &&
4670 		    (modeset || !primary_state->visible))
4671 			intel_pre_disable_primary(&crtc->base);
4672 	}
4673 
4674 	if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
4675 		crtc->wm.cxsr_allowed = false;
4676 
4677 		/*
4678 		 * Vblank time updates from the shadow to live plane control register
4679 		 * are blocked if the memory self-refresh mode is active at that
4680 		 * moment. So to make sure the plane gets truly disabled, disable
4681 		 * first the self-refresh mode. The self-refresh enable bit in turn
4682 		 * will be checked/applied by the HW only at the next frame start
4683 		 * event which is after the vblank start event, so we need to have a
4684 		 * wait-for-vblank between disabling the plane and the pipe.
4685 		 */
4686 		if (old_crtc_state->base.active) {
4687 			intel_set_memory_cxsr(dev_priv, false);
4688 			dev_priv->wm.vlv.cxsr = false;
4689 			intel_wait_for_vblank(dev, crtc->pipe);
4690 		}
4691 	}
4692 
4693 	/*
4694 	 * IVB workaround: must disable low power watermarks for at least
4695 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
4696 	 * when scaling is disabled.
4697 	 *
4698 	 * WaCxSRDisabledForSpriteScaling:ivb
4699 	 */
4700 	if (pipe_config->disable_lp_wm) {
4701 		ilk_disable_lp_wm(dev);
4702 		intel_wait_for_vblank(dev, crtc->pipe);
4703 	}
4704 
4705 	/*
4706 	 * If we're doing a modeset, we're done.  No need to do any pre-vblank
4707 	 * watermark programming here.
4708 	 */
4709 	if (needs_modeset(&pipe_config->base))
4710 		return;
4711 
4712 	/*
4713 	 * For platforms that support atomic watermarks, program the
4714 	 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
4715 	 * will be the intermediate values that are safe for both pre- and
4716 	 * post- vblank; when vblank happens, the 'active' values will be set
4717 	 * to the final 'target' values and we'll do this again to get the
4718 	 * optimal watermarks.  For gen9+ platforms, the values we program here
4719 	 * will be the final target values which will get automatically latched
4720 	 * at vblank time; no further programming will be necessary.
4721 	 *
4722 	 * If a platform hasn't been transitioned to atomic watermarks yet,
4723 	 * we'll continue to update watermarks the old way, if flags tell
4724 	 * us to.
4725 	 */
4726 	if (dev_priv->display.initial_watermarks != NULL)
4727 		dev_priv->display.initial_watermarks(pipe_config);
4728 	else if (pipe_config->update_wm_pre)
4729 		intel_update_watermarks(&crtc->base);
4730 }
4731 
4732 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4733 {
4734 	struct drm_device *dev = crtc->dev;
4735 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4736 	struct drm_plane *p;
4737 	int pipe = intel_crtc->pipe;
4738 
4739 	intel_crtc_dpms_overlay_disable(intel_crtc);
4740 
4741 	drm_for_each_plane_mask(p, dev, plane_mask)
4742 		to_intel_plane(p)->disable_plane(p, crtc);
4743 
4744 	/*
4745 	 * FIXME: Once we grow proper nuclear flip support out of this we need
4746 	 * to compute the mask of flip planes precisely. For the time being
4747 	 * consider this a flip to a NULL plane.
4748 	 */
4749 	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4750 }
4751 
4752 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4753 {
4754 	struct drm_device *dev = crtc->dev;
4755 	struct drm_i915_private *dev_priv = dev->dev_private;
4756 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4757 	struct intel_encoder *encoder;
4758 	int pipe = intel_crtc->pipe;
4759 	struct intel_crtc_state *pipe_config =
4760 		to_intel_crtc_state(crtc->state);
4761 
4762 	if (WARN_ON(intel_crtc->active))
4763 		return;
4764 
4765 	/*
4766 	 * Sometimes spurious CPU pipe underruns happen during FDI
4767 	 * training, at least with VGA+HDMI cloning. Suppress them.
4768 	 *
4769 	 * On ILK we get an occasional spurious CPU pipe underruns
4770 	 * between eDP port A enable and vdd enable. Also PCH port
4771 	 * enable seems to result in the occasional CPU pipe underrun.
4772 	 *
4773 	 * Spurious PCH underruns also occur during PCH enabling.
4774 	 */
4775 	if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4776 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4777 	if (intel_crtc->config->has_pch_encoder)
4778 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4779 
4780 	if (intel_crtc->config->has_pch_encoder)
4781 		intel_prepare_shared_dpll(intel_crtc);
4782 
4783 	if (intel_crtc->config->has_dp_encoder)
4784 		intel_dp_set_m_n(intel_crtc, M1_N1);
4785 
4786 	intel_set_pipe_timings(intel_crtc);
4787 	intel_set_pipe_src_size(intel_crtc);
4788 
4789 	if (intel_crtc->config->has_pch_encoder) {
4790 		intel_cpu_transcoder_set_m_n(intel_crtc,
4791 				     &intel_crtc->config->fdi_m_n, NULL);
4792 	}
4793 
4794 	ironlake_set_pipeconf(crtc);
4795 
4796 	intel_crtc->active = true;
4797 
4798 	for_each_encoder_on_crtc(dev, crtc, encoder)
4799 		if (encoder->pre_enable)
4800 			encoder->pre_enable(encoder);
4801 
4802 	if (intel_crtc->config->has_pch_encoder) {
4803 		/* Note: FDI PLL enabling _must_ be done before we enable the
4804 		 * cpu pipes, hence this is separate from all the other fdi/pch
4805 		 * enabling. */
4806 		ironlake_fdi_pll_enable(intel_crtc);
4807 	} else {
4808 		assert_fdi_tx_disabled(dev_priv, pipe);
4809 		assert_fdi_rx_disabled(dev_priv, pipe);
4810 	}
4811 
4812 	ironlake_pfit_enable(intel_crtc);
4813 
4814 	/*
4815 	 * On ILK+ LUT must be loaded before the pipe is running but with
4816 	 * clocks enabled
4817 	 */
4818 	intel_color_load_luts(&pipe_config->base);
4819 
4820 	if (dev_priv->display.initial_watermarks != NULL)
4821 		dev_priv->display.initial_watermarks(intel_crtc->config);
4822 	intel_enable_pipe(intel_crtc);
4823 
4824 	if (intel_crtc->config->has_pch_encoder)
4825 		ironlake_pch_enable(crtc);
4826 
4827 	assert_vblank_disabled(crtc);
4828 	drm_crtc_vblank_on(crtc);
4829 
4830 	for_each_encoder_on_crtc(dev, crtc, encoder)
4831 		encoder->enable(encoder);
4832 
4833 	if (HAS_PCH_CPT(dev))
4834 		cpt_verify_modeset(dev, intel_crtc->pipe);
4835 
4836 	/* Must wait for vblank to avoid spurious PCH FIFO underruns */
4837 	if (intel_crtc->config->has_pch_encoder)
4838 		intel_wait_for_vblank(dev, pipe);
4839 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4840 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4841 }
4842 
4843 /* IPS only exists on ULT machines and is tied to pipe A. */
4844 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4845 {
4846 	return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4847 }
4848 
4849 static void haswell_crtc_enable(struct drm_crtc *crtc)
4850 {
4851 	struct drm_device *dev = crtc->dev;
4852 	struct drm_i915_private *dev_priv = dev->dev_private;
4853 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4854 	struct intel_encoder *encoder;
4855 	int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4856 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4857 	struct intel_crtc_state *pipe_config =
4858 		to_intel_crtc_state(crtc->state);
4859 
4860 	if (WARN_ON(intel_crtc->active))
4861 		return;
4862 
4863 	if (intel_crtc->config->has_pch_encoder)
4864 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4865 						      false);
4866 
4867 	for_each_encoder_on_crtc(dev, crtc, encoder)
4868 		if (encoder->pre_pll_enable)
4869 			encoder->pre_pll_enable(encoder);
4870 
4871 	if (intel_crtc->config->shared_dpll)
4872 		intel_enable_shared_dpll(intel_crtc);
4873 
4874 	if (intel_crtc->config->has_dp_encoder)
4875 		intel_dp_set_m_n(intel_crtc, M1_N1);
4876 
4877 	if (!intel_crtc->config->has_dsi_encoder)
4878 		intel_set_pipe_timings(intel_crtc);
4879 
4880 	intel_set_pipe_src_size(intel_crtc);
4881 
4882 	if (cpu_transcoder != TRANSCODER_EDP &&
4883 	    !transcoder_is_dsi(cpu_transcoder)) {
4884 		I915_WRITE(PIPE_MULT(cpu_transcoder),
4885 			   intel_crtc->config->pixel_multiplier - 1);
4886 	}
4887 
4888 	if (intel_crtc->config->has_pch_encoder) {
4889 		intel_cpu_transcoder_set_m_n(intel_crtc,
4890 				     &intel_crtc->config->fdi_m_n, NULL);
4891 	}
4892 
4893 	if (!intel_crtc->config->has_dsi_encoder)
4894 		haswell_set_pipeconf(crtc);
4895 
4896 	haswell_set_pipemisc(crtc);
4897 
4898 	intel_color_set_csc(&pipe_config->base);
4899 
4900 	intel_crtc->active = true;
4901 
4902 	if (intel_crtc->config->has_pch_encoder)
4903 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4904 	else
4905 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4906 
4907 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4908 		if (encoder->pre_enable)
4909 			encoder->pre_enable(encoder);
4910 	}
4911 
4912 	if (intel_crtc->config->has_pch_encoder)
4913 		dev_priv->display.fdi_link_train(crtc);
4914 
4915 	if (!intel_crtc->config->has_dsi_encoder)
4916 		intel_ddi_enable_pipe_clock(intel_crtc);
4917 
4918 	if (INTEL_INFO(dev)->gen >= 9)
4919 		skylake_pfit_enable(intel_crtc);
4920 	else
4921 		ironlake_pfit_enable(intel_crtc);
4922 
4923 	/*
4924 	 * On ILK+ LUT must be loaded before the pipe is running but with
4925 	 * clocks enabled
4926 	 */
4927 	intel_color_load_luts(&pipe_config->base);
4928 
4929 	intel_ddi_set_pipe_settings(crtc);
4930 	if (!intel_crtc->config->has_dsi_encoder)
4931 		intel_ddi_enable_transcoder_func(crtc);
4932 
4933 	if (dev_priv->display.initial_watermarks != NULL)
4934 		dev_priv->display.initial_watermarks(pipe_config);
4935 	else
4936 		intel_update_watermarks(crtc);
4937 
4938 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
4939 	if (!intel_crtc->config->has_dsi_encoder)
4940 		intel_enable_pipe(intel_crtc);
4941 
4942 	if (intel_crtc->config->has_pch_encoder)
4943 		lpt_pch_enable(crtc);
4944 
4945 	if (intel_crtc->config->dp_encoder_is_mst)
4946 		intel_ddi_set_vc_payload_alloc(crtc, true);
4947 
4948 	assert_vblank_disabled(crtc);
4949 	drm_crtc_vblank_on(crtc);
4950 
4951 	for_each_encoder_on_crtc(dev, crtc, encoder) {
4952 		encoder->enable(encoder);
4953 		intel_opregion_notify_encoder(encoder, true);
4954 	}
4955 
4956 	if (intel_crtc->config->has_pch_encoder) {
4957 		intel_wait_for_vblank(dev, pipe);
4958 		intel_wait_for_vblank(dev, pipe);
4959 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4960 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4961 						      true);
4962 	}
4963 
4964 	/* If we change the relative order between pipe/planes enabling, we need
4965 	 * to change the workaround. */
4966 	hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4967 	if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4968 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4969 		intel_wait_for_vblank(dev, hsw_workaround_pipe);
4970 	}
4971 }
4972 
4973 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4974 {
4975 	struct drm_device *dev = crtc->base.dev;
4976 	struct drm_i915_private *dev_priv = dev->dev_private;
4977 	int pipe = crtc->pipe;
4978 
4979 	/* To avoid upsetting the power well on haswell only disable the pfit if
4980 	 * it's in use. The hw state code will make sure we get this right. */
4981 	if (force || crtc->config->pch_pfit.enabled) {
4982 		I915_WRITE(PF_CTL(pipe), 0);
4983 		I915_WRITE(PF_WIN_POS(pipe), 0);
4984 		I915_WRITE(PF_WIN_SZ(pipe), 0);
4985 	}
4986 }
4987 
4988 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4989 {
4990 	struct drm_device *dev = crtc->dev;
4991 	struct drm_i915_private *dev_priv = dev->dev_private;
4992 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4993 	struct intel_encoder *encoder;
4994 	int pipe = intel_crtc->pipe;
4995 
4996 	/*
4997 	 * Sometimes spurious CPU pipe underruns happen when the
4998 	 * pipe is already disabled, but FDI RX/TX is still enabled.
4999 	 * Happens at least with VGA+HDMI cloning. Suppress them.
5000 	 */
5001 	if (intel_crtc->config->has_pch_encoder) {
5002 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5003 		intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5004 	}
5005 
5006 	for_each_encoder_on_crtc(dev, crtc, encoder)
5007 		encoder->disable(encoder);
5008 
5009 	drm_crtc_vblank_off(crtc);
5010 	assert_vblank_disabled(crtc);
5011 
5012 	intel_disable_pipe(intel_crtc);
5013 
5014 	ironlake_pfit_disable(intel_crtc, false);
5015 
5016 	if (intel_crtc->config->has_pch_encoder)
5017 		ironlake_fdi_disable(crtc);
5018 
5019 	for_each_encoder_on_crtc(dev, crtc, encoder)
5020 		if (encoder->post_disable)
5021 			encoder->post_disable(encoder);
5022 
5023 	if (intel_crtc->config->has_pch_encoder) {
5024 		ironlake_disable_pch_transcoder(dev_priv, pipe);
5025 
5026 		if (HAS_PCH_CPT(dev)) {
5027 			i915_reg_t reg;
5028 			u32 temp;
5029 
5030 			/* disable TRANS_DP_CTL */
5031 			reg = TRANS_DP_CTL(pipe);
5032 			temp = I915_READ(reg);
5033 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5034 				  TRANS_DP_PORT_SEL_MASK);
5035 			temp |= TRANS_DP_PORT_SEL_NONE;
5036 			I915_WRITE(reg, temp);
5037 
5038 			/* disable DPLL_SEL */
5039 			temp = I915_READ(PCH_DPLL_SEL);
5040 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5041 			I915_WRITE(PCH_DPLL_SEL, temp);
5042 		}
5043 
5044 		ironlake_fdi_pll_disable(intel_crtc);
5045 	}
5046 
5047 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5048 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5049 }
5050 
5051 static void haswell_crtc_disable(struct drm_crtc *crtc)
5052 {
5053 	struct drm_device *dev = crtc->dev;
5054 	struct drm_i915_private *dev_priv = dev->dev_private;
5055 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5056 	struct intel_encoder *encoder;
5057 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5058 
5059 	if (intel_crtc->config->has_pch_encoder)
5060 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5061 						      false);
5062 
5063 	for_each_encoder_on_crtc(dev, crtc, encoder) {
5064 		intel_opregion_notify_encoder(encoder, false);
5065 		encoder->disable(encoder);
5066 	}
5067 
5068 	drm_crtc_vblank_off(crtc);
5069 	assert_vblank_disabled(crtc);
5070 
5071 	/* XXX: Do the pipe assertions at the right place for BXT DSI. */
5072 	if (!intel_crtc->config->has_dsi_encoder)
5073 		intel_disable_pipe(intel_crtc);
5074 
5075 	if (intel_crtc->config->dp_encoder_is_mst)
5076 		intel_ddi_set_vc_payload_alloc(crtc, false);
5077 
5078 	if (!intel_crtc->config->has_dsi_encoder)
5079 		intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5080 
5081 	if (INTEL_INFO(dev)->gen >= 9)
5082 		skylake_scaler_disable(intel_crtc);
5083 	else
5084 		ironlake_pfit_disable(intel_crtc, false);
5085 
5086 	if (!intel_crtc->config->has_dsi_encoder)
5087 		intel_ddi_disable_pipe_clock(intel_crtc);
5088 
5089 	for_each_encoder_on_crtc(dev, crtc, encoder)
5090 		if (encoder->post_disable)
5091 			encoder->post_disable(encoder);
5092 
5093 	if (intel_crtc->config->has_pch_encoder) {
5094 		lpt_disable_pch_transcoder(dev_priv);
5095 		lpt_disable_iclkip(dev_priv);
5096 		intel_ddi_fdi_disable(crtc);
5097 
5098 		intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5099 						      true);
5100 	}
5101 }
5102 
5103 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5104 {
5105 	struct drm_device *dev = crtc->base.dev;
5106 	struct drm_i915_private *dev_priv = dev->dev_private;
5107 	struct intel_crtc_state *pipe_config = crtc->config;
5108 
5109 	if (!pipe_config->gmch_pfit.control)
5110 		return;
5111 
5112 	/*
5113 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
5114 	 * according to register description and PRM.
5115 	 */
5116 	WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5117 	assert_pipe_disabled(dev_priv, crtc->pipe);
5118 
5119 	I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5120 	I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5121 
5122 	/* Border color in case we don't scale up to the full screen. Black by
5123 	 * default, change to something else for debugging. */
5124 	I915_WRITE(BCLRPAT(crtc->pipe), 0);
5125 }
5126 
5127 static enum intel_display_power_domain port_to_power_domain(enum port port)
5128 {
5129 	switch (port) {
5130 	case PORT_A:
5131 		return POWER_DOMAIN_PORT_DDI_A_LANES;
5132 	case PORT_B:
5133 		return POWER_DOMAIN_PORT_DDI_B_LANES;
5134 	case PORT_C:
5135 		return POWER_DOMAIN_PORT_DDI_C_LANES;
5136 	case PORT_D:
5137 		return POWER_DOMAIN_PORT_DDI_D_LANES;
5138 	case PORT_E:
5139 		return POWER_DOMAIN_PORT_DDI_E_LANES;
5140 	default:
5141 		MISSING_CASE(port);
5142 		return POWER_DOMAIN_PORT_OTHER;
5143 	}
5144 }
5145 
5146 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5147 {
5148 	switch (port) {
5149 	case PORT_A:
5150 		return POWER_DOMAIN_AUX_A;
5151 	case PORT_B:
5152 		return POWER_DOMAIN_AUX_B;
5153 	case PORT_C:
5154 		return POWER_DOMAIN_AUX_C;
5155 	case PORT_D:
5156 		return POWER_DOMAIN_AUX_D;
5157 	case PORT_E:
5158 		/* FIXME: Check VBT for actual wiring of PORT E */
5159 		return POWER_DOMAIN_AUX_D;
5160 	default:
5161 		MISSING_CASE(port);
5162 		return POWER_DOMAIN_AUX_A;
5163 	}
5164 }
5165 
5166 enum intel_display_power_domain
5167 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5168 {
5169 	struct drm_device *dev = intel_encoder->base.dev;
5170 	struct intel_digital_port *intel_dig_port;
5171 
5172 	switch (intel_encoder->type) {
5173 	case INTEL_OUTPUT_UNKNOWN:
5174 		/* Only DDI platforms should ever use this output type */
5175 		WARN_ON_ONCE(!HAS_DDI(dev));
5176 	case INTEL_OUTPUT_DISPLAYPORT:
5177 	case INTEL_OUTPUT_HDMI:
5178 	case INTEL_OUTPUT_EDP:
5179 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5180 		return port_to_power_domain(intel_dig_port->port);
5181 	case INTEL_OUTPUT_DP_MST:
5182 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5183 		return port_to_power_domain(intel_dig_port->port);
5184 	case INTEL_OUTPUT_ANALOG:
5185 		return POWER_DOMAIN_PORT_CRT;
5186 	case INTEL_OUTPUT_DSI:
5187 		return POWER_DOMAIN_PORT_DSI;
5188 	default:
5189 		return POWER_DOMAIN_PORT_OTHER;
5190 	}
5191 }
5192 
5193 enum intel_display_power_domain
5194 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5195 {
5196 	struct drm_device *dev = intel_encoder->base.dev;
5197 	struct intel_digital_port *intel_dig_port;
5198 
5199 	switch (intel_encoder->type) {
5200 	case INTEL_OUTPUT_UNKNOWN:
5201 	case INTEL_OUTPUT_HDMI:
5202 		/*
5203 		 * Only DDI platforms should ever use these output types.
5204 		 * We can get here after the HDMI detect code has already set
5205 		 * the type of the shared encoder. Since we can't be sure
5206 		 * what's the status of the given connectors, play safe and
5207 		 * run the DP detection too.
5208 		 */
5209 		WARN_ON_ONCE(!HAS_DDI(dev));
5210 	case INTEL_OUTPUT_DISPLAYPORT:
5211 	case INTEL_OUTPUT_EDP:
5212 		intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5213 		return port_to_aux_power_domain(intel_dig_port->port);
5214 	case INTEL_OUTPUT_DP_MST:
5215 		intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5216 		return port_to_aux_power_domain(intel_dig_port->port);
5217 	default:
5218 		MISSING_CASE(intel_encoder->type);
5219 		return POWER_DOMAIN_AUX_A;
5220 	}
5221 }
5222 
5223 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5224 					    struct intel_crtc_state *crtc_state)
5225 {
5226 	struct drm_device *dev = crtc->dev;
5227 	struct drm_encoder *encoder;
5228 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5229 	enum i915_pipe pipe = intel_crtc->pipe;
5230 	unsigned long mask;
5231 	enum transcoder transcoder = crtc_state->cpu_transcoder;
5232 
5233 	if (!crtc_state->base.active)
5234 		return 0;
5235 
5236 	mask = BIT(POWER_DOMAIN_PIPE(pipe));
5237 	mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5238 	if (crtc_state->pch_pfit.enabled ||
5239 	    crtc_state->pch_pfit.force_thru)
5240 		mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5241 
5242 	drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5243 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5244 
5245 		mask |= BIT(intel_display_port_power_domain(intel_encoder));
5246 	}
5247 
5248 	if (crtc_state->shared_dpll)
5249 		mask |= BIT(POWER_DOMAIN_PLLS);
5250 
5251 	return mask;
5252 }
5253 
5254 static unsigned long
5255 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5256 			       struct intel_crtc_state *crtc_state)
5257 {
5258 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5259 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5260 	enum intel_display_power_domain domain;
5261 	unsigned long domains, new_domains, old_domains;
5262 
5263 	old_domains = intel_crtc->enabled_power_domains;
5264 	intel_crtc->enabled_power_domains = new_domains =
5265 		get_crtc_power_domains(crtc, crtc_state);
5266 
5267 	domains = new_domains & ~old_domains;
5268 
5269 	for_each_power_domain(domain, domains)
5270 		intel_display_power_get(dev_priv, domain);
5271 
5272 	return old_domains & ~new_domains;
5273 }
5274 
5275 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5276 				      unsigned long domains)
5277 {
5278 	enum intel_display_power_domain domain;
5279 
5280 	for_each_power_domain(domain, domains)
5281 		intel_display_power_put(dev_priv, domain);
5282 }
5283 
5284 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5285 {
5286 	int max_cdclk_freq = dev_priv->max_cdclk_freq;
5287 
5288 	if (INTEL_INFO(dev_priv)->gen >= 9 ||
5289 	    IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5290 		return max_cdclk_freq;
5291 	else if (IS_CHERRYVIEW(dev_priv))
5292 		return max_cdclk_freq*95/100;
5293 	else if (INTEL_INFO(dev_priv)->gen < 4)
5294 		return 2*max_cdclk_freq*90/100;
5295 	else
5296 		return max_cdclk_freq*90/100;
5297 }
5298 
5299 static int skl_calc_cdclk(int max_pixclk, int vco);
5300 
5301 static void intel_update_max_cdclk(struct drm_device *dev)
5302 {
5303 	struct drm_i915_private *dev_priv = dev->dev_private;
5304 
5305 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5306 		u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5307 		int max_cdclk, vco;
5308 
5309 		vco = dev_priv->skl_preferred_vco_freq;
5310 		WARN_ON(vco != 8100000 && vco != 8640000);
5311 
5312 		/*
5313 		 * Use the lower (vco 8640) cdclk values as a
5314 		 * first guess. skl_calc_cdclk() will correct it
5315 		 * if the preferred vco is 8100 instead.
5316 		 */
5317 		if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5318 			max_cdclk = 617143;
5319 		else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5320 			max_cdclk = 540000;
5321 		else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5322 			max_cdclk = 432000;
5323 		else
5324 			max_cdclk = 308571;
5325 
5326 		dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5327 	} else if (IS_BROXTON(dev)) {
5328 		dev_priv->max_cdclk_freq = 624000;
5329 	} else if (IS_BROADWELL(dev))  {
5330 		/*
5331 		 * FIXME with extra cooling we can allow
5332 		 * 540 MHz for ULX and 675 Mhz for ULT.
5333 		 * How can we know if extra cooling is
5334 		 * available? PCI ID, VTB, something else?
5335 		 */
5336 		if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5337 			dev_priv->max_cdclk_freq = 450000;
5338 		else if (IS_BDW_ULX(dev))
5339 			dev_priv->max_cdclk_freq = 450000;
5340 		else if (IS_BDW_ULT(dev))
5341 			dev_priv->max_cdclk_freq = 540000;
5342 		else
5343 			dev_priv->max_cdclk_freq = 675000;
5344 	} else if (IS_CHERRYVIEW(dev)) {
5345 		dev_priv->max_cdclk_freq = 320000;
5346 	} else if (IS_VALLEYVIEW(dev)) {
5347 		dev_priv->max_cdclk_freq = 400000;
5348 	} else {
5349 		/* otherwise assume cdclk is fixed */
5350 		dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5351 	}
5352 
5353 	dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5354 
5355 	DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5356 			 dev_priv->max_cdclk_freq);
5357 
5358 	DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5359 			 dev_priv->max_dotclk_freq);
5360 }
5361 
5362 static void intel_update_cdclk(struct drm_device *dev)
5363 {
5364 	struct drm_i915_private *dev_priv = dev->dev_private;
5365 
5366 	dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5367 
5368 	if (INTEL_GEN(dev_priv) >= 9)
5369 		DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5370 				 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5371 				 dev_priv->cdclk_pll.ref);
5372 	else
5373 		DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5374 				 dev_priv->cdclk_freq);
5375 
5376 	/*
5377 	 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5378 	 * Programmng [sic] note: bit[9:2] should be programmed to the number
5379 	 * of cdclk that generates 4MHz reference clock freq which is used to
5380 	 * generate GMBus clock. This will vary with the cdclk freq.
5381 	 */
5382 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5383 		I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5384 }
5385 
5386 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5387 static int skl_cdclk_decimal(int cdclk)
5388 {
5389 	return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5390 }
5391 
5392 static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5393 {
5394 	int ratio;
5395 
5396 	if (cdclk == dev_priv->cdclk_pll.ref)
5397 		return 0;
5398 
5399 	switch (cdclk) {
5400 	default:
5401 		MISSING_CASE(cdclk);
5402 	case 144000:
5403 	case 288000:
5404 	case 384000:
5405 	case 576000:
5406 		ratio = 60;
5407 		break;
5408 	case 624000:
5409 		ratio = 65;
5410 		break;
5411 	}
5412 
5413 	return dev_priv->cdclk_pll.ref * ratio;
5414 }
5415 
5416 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5417 {
5418 	I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5419 
5420 	/* Timeout 200us */
5421 	if (intel_wait_for_register(dev_priv,
5422 				    BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
5423 				    1))
5424 		DRM_ERROR("timeout waiting for DE PLL unlock\n");
5425 
5426 	dev_priv->cdclk_pll.vco = 0;
5427 }
5428 
5429 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5430 {
5431 	int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5432 	u32 val;
5433 
5434 	val = I915_READ(BXT_DE_PLL_CTL);
5435 	val &= ~BXT_DE_PLL_RATIO_MASK;
5436 	val |= BXT_DE_PLL_RATIO(ratio);
5437 	I915_WRITE(BXT_DE_PLL_CTL, val);
5438 
5439 	I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5440 
5441 	/* Timeout 200us */
5442 	if (intel_wait_for_register(dev_priv,
5443 				    BXT_DE_PLL_ENABLE,
5444 				    BXT_DE_PLL_LOCK,
5445 				    BXT_DE_PLL_LOCK,
5446 				    1))
5447 		DRM_ERROR("timeout waiting for DE PLL lock\n");
5448 
5449 	dev_priv->cdclk_pll.vco = vco;
5450 }
5451 
5452 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5453 {
5454 	u32 val, divider;
5455 	int vco, ret;
5456 
5457 	vco = bxt_de_pll_vco(dev_priv, cdclk);
5458 
5459 	DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5460 
5461 	/* cdclk = vco / 2 / div{1,1.5,2,4} */
5462 	switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5463 	case 8:
5464 		divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5465 		break;
5466 	case 4:
5467 		divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5468 		break;
5469 	case 3:
5470 		divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5471 		break;
5472 	case 2:
5473 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5474 		break;
5475 	default:
5476 		WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5477 		WARN_ON(vco != 0);
5478 
5479 		divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5480 		break;
5481 	}
5482 
5483 	/* Inform power controller of upcoming frequency change */
5484 	mutex_lock(&dev_priv->rps.hw_lock);
5485 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5486 				      0x80000000);
5487 	mutex_unlock(&dev_priv->rps.hw_lock);
5488 
5489 	if (ret) {
5490 		DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5491 			  ret, cdclk);
5492 		return;
5493 	}
5494 
5495 	if (dev_priv->cdclk_pll.vco != 0 &&
5496 	    dev_priv->cdclk_pll.vco != vco)
5497 		bxt_de_pll_disable(dev_priv);
5498 
5499 	if (dev_priv->cdclk_pll.vco != vco)
5500 		bxt_de_pll_enable(dev_priv, vco);
5501 
5502 	val = divider | skl_cdclk_decimal(cdclk);
5503 	/*
5504 	 * FIXME if only the cd2x divider needs changing, it could be done
5505 	 * without shutting off the pipe (if only one pipe is active).
5506 	 */
5507 	val |= BXT_CDCLK_CD2X_PIPE_NONE;
5508 	/*
5509 	 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5510 	 * enable otherwise.
5511 	 */
5512 	if (cdclk >= 500000)
5513 		val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5514 	I915_WRITE(CDCLK_CTL, val);
5515 
5516 	mutex_lock(&dev_priv->rps.hw_lock);
5517 	ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5518 				      DIV_ROUND_UP(cdclk, 25000));
5519 	mutex_unlock(&dev_priv->rps.hw_lock);
5520 
5521 	if (ret) {
5522 		DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5523 			  ret, cdclk);
5524 		return;
5525 	}
5526 
5527 	intel_update_cdclk(dev_priv->dev);
5528 }
5529 
5530 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5531 {
5532 	u32 cdctl, expected;
5533 
5534 	intel_update_cdclk(dev_priv->dev);
5535 
5536 	if (dev_priv->cdclk_pll.vco == 0 ||
5537 	    dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5538 		goto sanitize;
5539 
5540 	/* DPLL okay; verify the cdclock
5541 	 *
5542 	 * Some BIOS versions leave an incorrect decimal frequency value and
5543 	 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5544 	 * so sanitize this register.
5545 	 */
5546 	cdctl = I915_READ(CDCLK_CTL);
5547 	/*
5548 	 * Let's ignore the pipe field, since BIOS could have configured the
5549 	 * dividers both synching to an active pipe, or asynchronously
5550 	 * (PIPE_NONE).
5551 	 */
5552 	cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5553 
5554 	expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5555 		   skl_cdclk_decimal(dev_priv->cdclk_freq);
5556 	/*
5557 	 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5558 	 * enable otherwise.
5559 	 */
5560 	if (dev_priv->cdclk_freq >= 500000)
5561 		expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5562 
5563 	if (cdctl == expected)
5564 		/* All well; nothing to sanitize */
5565 		return;
5566 
5567 sanitize:
5568 	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5569 
5570 	/* force cdclk programming */
5571 	dev_priv->cdclk_freq = 0;
5572 
5573 	/* force full PLL disable + enable */
5574 	dev_priv->cdclk_pll.vco = -1;
5575 }
5576 
5577 void bxt_init_cdclk(struct drm_i915_private *dev_priv)
5578 {
5579 	bxt_sanitize_cdclk(dev_priv);
5580 
5581 	if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
5582 		return;
5583 
5584 	/*
5585 	 * FIXME:
5586 	 * - The initial CDCLK needs to be read from VBT.
5587 	 *   Need to make this change after VBT has changes for BXT.
5588 	 */
5589 	bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
5590 }
5591 
5592 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
5593 {
5594 	bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
5595 }
5596 
5597 static int skl_calc_cdclk(int max_pixclk, int vco)
5598 {
5599 	if (vco == 8640000) {
5600 		if (max_pixclk > 540000)
5601 			return 617143;
5602 		else if (max_pixclk > 432000)
5603 			return 540000;
5604 		else if (max_pixclk > 308571)
5605 			return 432000;
5606 		else
5607 			return 308571;
5608 	} else {
5609 		if (max_pixclk > 540000)
5610 			return 675000;
5611 		else if (max_pixclk > 450000)
5612 			return 540000;
5613 		else if (max_pixclk > 337500)
5614 			return 450000;
5615 		else
5616 			return 337500;
5617 	}
5618 }
5619 
5620 static void
5621 skl_dpll0_update(struct drm_i915_private *dev_priv)
5622 {
5623 	u32 val;
5624 
5625 	dev_priv->cdclk_pll.ref = 24000;
5626 	dev_priv->cdclk_pll.vco = 0;
5627 
5628 	val = I915_READ(LCPLL1_CTL);
5629 	if ((val & LCPLL_PLL_ENABLE) == 0)
5630 		return;
5631 
5632 	if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5633 		return;
5634 
5635 	val = I915_READ(DPLL_CTRL1);
5636 
5637 	if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5638 			    DPLL_CTRL1_SSC(SKL_DPLL0) |
5639 			    DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5640 		    DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5641 		return;
5642 
5643 	switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5644 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5645 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5646 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5647 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
5648 		dev_priv->cdclk_pll.vco = 8100000;
5649 		break;
5650 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5651 	case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
5652 		dev_priv->cdclk_pll.vco = 8640000;
5653 		break;
5654 	default:
5655 		MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5656 		break;
5657 	}
5658 }
5659 
5660 void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5661 {
5662 	bool changed = dev_priv->skl_preferred_vco_freq != vco;
5663 
5664 	dev_priv->skl_preferred_vco_freq = vco;
5665 
5666 	if (changed)
5667 		intel_update_max_cdclk(dev_priv->dev);
5668 }
5669 
5670 static void
5671 skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5672 {
5673 	int min_cdclk = skl_calc_cdclk(0, vco);
5674 	u32 val;
5675 
5676 	WARN_ON(vco != 8100000 && vco != 8640000);
5677 
5678 	/* select the minimum CDCLK before enabling DPLL 0 */
5679 	val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5680 	I915_WRITE(CDCLK_CTL, val);
5681 	POSTING_READ(CDCLK_CTL);
5682 
5683 	/*
5684 	 * We always enable DPLL0 with the lowest link rate possible, but still
5685 	 * taking into account the VCO required to operate the eDP panel at the
5686 	 * desired frequency. The usual DP link rates operate with a VCO of
5687 	 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5688 	 * The modeset code is responsible for the selection of the exact link
5689 	 * rate later on, with the constraint of choosing a frequency that
5690 	 * works with vco.
5691 	 */
5692 	val = I915_READ(DPLL_CTRL1);
5693 
5694 	val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5695 		 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5696 	val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5697 	if (vco == 8640000)
5698 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5699 					    SKL_DPLL0);
5700 	else
5701 		val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5702 					    SKL_DPLL0);
5703 
5704 	I915_WRITE(DPLL_CTRL1, val);
5705 	POSTING_READ(DPLL_CTRL1);
5706 
5707 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5708 
5709 	if (intel_wait_for_register(dev_priv,
5710 				    LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5711 				    5))
5712 		DRM_ERROR("DPLL0 not locked\n");
5713 
5714 	dev_priv->cdclk_pll.vco = vco;
5715 
5716 	/* We'll want to keep using the current vco from now on. */
5717 	skl_set_preferred_cdclk_vco(dev_priv, vco);
5718 }
5719 
5720 static void
5721 skl_dpll0_disable(struct drm_i915_private *dev_priv)
5722 {
5723 	I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5724 	if (intel_wait_for_register(dev_priv,
5725 				   LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
5726 				   1))
5727 		DRM_ERROR("Couldn't disable DPLL0\n");
5728 
5729 	dev_priv->cdclk_pll.vco = 0;
5730 }
5731 
5732 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5733 {
5734 	int ret;
5735 	u32 val;
5736 
5737 	/* inform PCU we want to change CDCLK */
5738 	val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5739 	mutex_lock(&dev_priv->rps.hw_lock);
5740 	ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5741 	mutex_unlock(&dev_priv->rps.hw_lock);
5742 
5743 	return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5744 }
5745 
5746 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5747 {
5748 	unsigned int i;
5749 
5750 	for (i = 0; i < 15; i++) {
5751 		if (skl_cdclk_pcu_ready(dev_priv))
5752 			return true;
5753 		udelay(10);
5754 	}
5755 
5756 	return false;
5757 }
5758 
5759 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5760 {
5761 	struct drm_device *dev = dev_priv->dev;
5762 	u32 freq_select, pcu_ack;
5763 
5764 	WARN_ON((cdclk == 24000) != (vco == 0));
5765 
5766 	DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5767 
5768 	if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5769 		DRM_ERROR("failed to inform PCU about cdclk change\n");
5770 		return;
5771 	}
5772 
5773 	/* set CDCLK_CTL */
5774 	switch (cdclk) {
5775 	case 450000:
5776 	case 432000:
5777 		freq_select = CDCLK_FREQ_450_432;
5778 		pcu_ack = 1;
5779 		break;
5780 	case 540000:
5781 		freq_select = CDCLK_FREQ_540;
5782 		pcu_ack = 2;
5783 		break;
5784 	case 308571:
5785 	case 337500:
5786 	default:
5787 		freq_select = CDCLK_FREQ_337_308;
5788 		pcu_ack = 0;
5789 		break;
5790 	case 617143:
5791 	case 675000:
5792 		freq_select = CDCLK_FREQ_675_617;
5793 		pcu_ack = 3;
5794 		break;
5795 	}
5796 
5797 	if (dev_priv->cdclk_pll.vco != 0 &&
5798 	    dev_priv->cdclk_pll.vco != vco)
5799 		skl_dpll0_disable(dev_priv);
5800 
5801 	if (dev_priv->cdclk_pll.vco != vco)
5802 		skl_dpll0_enable(dev_priv, vco);
5803 
5804 	I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5805 	POSTING_READ(CDCLK_CTL);
5806 
5807 	/* inform PCU of the change */
5808 	mutex_lock(&dev_priv->rps.hw_lock);
5809 	sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5810 	mutex_unlock(&dev_priv->rps.hw_lock);
5811 
5812 	intel_update_cdclk(dev);
5813 }
5814 
5815 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5816 
5817 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5818 {
5819 	skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5820 }
5821 
5822 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5823 {
5824 	int cdclk, vco;
5825 
5826 	skl_sanitize_cdclk(dev_priv);
5827 
5828 	if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
5829 		/*
5830 		 * Use the current vco as our initial
5831 		 * guess as to what the preferred vco is.
5832 		 */
5833 		if (dev_priv->skl_preferred_vco_freq == 0)
5834 			skl_set_preferred_cdclk_vco(dev_priv,
5835 						    dev_priv->cdclk_pll.vco);
5836 		return;
5837 	}
5838 
5839 	vco = dev_priv->skl_preferred_vco_freq;
5840 	if (vco == 0)
5841 		vco = 8100000;
5842 	cdclk = skl_calc_cdclk(0, vco);
5843 
5844 	skl_set_cdclk(dev_priv, cdclk, vco);
5845 }
5846 
5847 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5848 {
5849 	uint32_t cdctl, expected;
5850 
5851 	/*
5852 	 * check if the pre-os intialized the display
5853 	 * There is SWF18 scratchpad register defined which is set by the
5854 	 * pre-os which can be used by the OS drivers to check the status
5855 	 */
5856 	if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5857 		goto sanitize;
5858 
5859 	intel_update_cdclk(dev_priv->dev);
5860 	/* Is PLL enabled and locked ? */
5861 	if (dev_priv->cdclk_pll.vco == 0 ||
5862 	    dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5863 		goto sanitize;
5864 
5865 	/* DPLL okay; verify the cdclock
5866 	 *
5867 	 * Noticed in some instances that the freq selection is correct but
5868 	 * decimal part is programmed wrong from BIOS where pre-os does not
5869 	 * enable display. Verify the same as well.
5870 	 */
5871 	cdctl = I915_READ(CDCLK_CTL);
5872 	expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5873 		skl_cdclk_decimal(dev_priv->cdclk_freq);
5874 	if (cdctl == expected)
5875 		/* All well; nothing to sanitize */
5876 		return;
5877 
5878 sanitize:
5879 	DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5880 
5881 	/* force cdclk programming */
5882 	dev_priv->cdclk_freq = 0;
5883 	/* force full PLL disable + enable */
5884 	dev_priv->cdclk_pll.vco = -1;
5885 }
5886 
5887 /* Adjust CDclk dividers to allow high res or save power if possible */
5888 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5889 {
5890 	struct drm_i915_private *dev_priv = dev->dev_private;
5891 	u32 val, cmd;
5892 
5893 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5894 					!= dev_priv->cdclk_freq);
5895 
5896 	if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5897 		cmd = 2;
5898 	else if (cdclk == 266667)
5899 		cmd = 1;
5900 	else
5901 		cmd = 0;
5902 
5903 	mutex_lock(&dev_priv->rps.hw_lock);
5904 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5905 	val &= ~DSPFREQGUAR_MASK;
5906 	val |= (cmd << DSPFREQGUAR_SHIFT);
5907 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5908 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5909 		      DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5910 		     50)) {
5911 		DRM_ERROR("timed out waiting for CDclk change\n");
5912 	}
5913 	mutex_unlock(&dev_priv->rps.hw_lock);
5914 
5915 	mutex_lock(&dev_priv->sb_lock);
5916 
5917 	if (cdclk == 400000) {
5918 		u32 divider;
5919 
5920 		divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5921 
5922 		/* adjust cdclk divider */
5923 		val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5924 		val &= ~CCK_FREQUENCY_VALUES;
5925 		val |= divider;
5926 		vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5927 
5928 		if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5929 			      CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5930 			     50))
5931 			DRM_ERROR("timed out waiting for CDclk change\n");
5932 	}
5933 
5934 	/* adjust self-refresh exit latency value */
5935 	val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5936 	val &= ~0x7f;
5937 
5938 	/*
5939 	 * For high bandwidth configs, we set a higher latency in the bunit
5940 	 * so that the core display fetch happens in time to avoid underruns.
5941 	 */
5942 	if (cdclk == 400000)
5943 		val |= 4500 / 250; /* 4.5 usec */
5944 	else
5945 		val |= 3000 / 250; /* 3.0 usec */
5946 	vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5947 
5948 	mutex_unlock(&dev_priv->sb_lock);
5949 
5950 	intel_update_cdclk(dev);
5951 }
5952 
5953 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5954 {
5955 	struct drm_i915_private *dev_priv = dev->dev_private;
5956 	u32 val, cmd;
5957 
5958 	WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5959 						!= dev_priv->cdclk_freq);
5960 
5961 	switch (cdclk) {
5962 	case 333333:
5963 	case 320000:
5964 	case 266667:
5965 	case 200000:
5966 		break;
5967 	default:
5968 		MISSING_CASE(cdclk);
5969 		return;
5970 	}
5971 
5972 	/*
5973 	 * Specs are full of misinformation, but testing on actual
5974 	 * hardware has shown that we just need to write the desired
5975 	 * CCK divider into the Punit register.
5976 	 */
5977 	cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5978 
5979 	mutex_lock(&dev_priv->rps.hw_lock);
5980 	val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5981 	val &= ~DSPFREQGUAR_MASK_CHV;
5982 	val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5983 	vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5984 	if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5985 		      DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5986 		     50)) {
5987 		DRM_ERROR("timed out waiting for CDclk change\n");
5988 	}
5989 	mutex_unlock(&dev_priv->rps.hw_lock);
5990 
5991 	intel_update_cdclk(dev);
5992 }
5993 
5994 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5995 				 int max_pixclk)
5996 {
5997 	int freq_320 = (dev_priv->hpll_freq <<  1) % 320000 != 0 ? 333333 : 320000;
5998 	int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5999 
6000 	/*
6001 	 * Really only a few cases to deal with, as only 4 CDclks are supported:
6002 	 *   200MHz
6003 	 *   267MHz
6004 	 *   320/333MHz (depends on HPLL freq)
6005 	 *   400MHz (VLV only)
6006 	 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
6007 	 * of the lower bin and adjust if needed.
6008 	 *
6009 	 * We seem to get an unstable or solid color picture at 200MHz.
6010 	 * Not sure what's wrong. For now use 200MHz only when all pipes
6011 	 * are off.
6012 	 */
6013 	if (!IS_CHERRYVIEW(dev_priv) &&
6014 	    max_pixclk > freq_320*limit/100)
6015 		return 400000;
6016 	else if (max_pixclk > 266667*limit/100)
6017 		return freq_320;
6018 	else if (max_pixclk > 0)
6019 		return 266667;
6020 	else
6021 		return 200000;
6022 }
6023 
6024 static int bxt_calc_cdclk(int max_pixclk)
6025 {
6026 	if (max_pixclk > 576000)
6027 		return 624000;
6028 	else if (max_pixclk > 384000)
6029 		return 576000;
6030 	else if (max_pixclk > 288000)
6031 		return 384000;
6032 	else if (max_pixclk > 144000)
6033 		return 288000;
6034 	else
6035 		return 144000;
6036 }
6037 
6038 /* Compute the max pixel clock for new configuration. */
6039 static int intel_mode_max_pixclk(struct drm_device *dev,
6040 				 struct drm_atomic_state *state)
6041 {
6042 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6043 	struct drm_i915_private *dev_priv = dev->dev_private;
6044 	struct drm_crtc *crtc;
6045 	struct drm_crtc_state *crtc_state;
6046 	unsigned max_pixclk = 0, i;
6047 	enum i915_pipe pipe;
6048 
6049 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6050 	       sizeof(intel_state->min_pixclk));
6051 
6052 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
6053 		int pixclk = 0;
6054 
6055 		if (crtc_state->enable)
6056 			pixclk = crtc_state->adjusted_mode.crtc_clock;
6057 
6058 		intel_state->min_pixclk[i] = pixclk;
6059 	}
6060 
6061 	for_each_pipe(dev_priv, pipe)
6062 		max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6063 
6064 	return max_pixclk;
6065 }
6066 
6067 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6068 {
6069 	struct drm_device *dev = state->dev;
6070 	struct drm_i915_private *dev_priv = dev->dev_private;
6071 	int max_pixclk = intel_mode_max_pixclk(dev, state);
6072 	struct intel_atomic_state *intel_state =
6073 		to_intel_atomic_state(state);
6074 
6075 	intel_state->cdclk = intel_state->dev_cdclk =
6076 		valleyview_calc_cdclk(dev_priv, max_pixclk);
6077 
6078 	if (!intel_state->active_crtcs)
6079 		intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6080 
6081 	return 0;
6082 }
6083 
6084 static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
6085 {
6086 	int max_pixclk = ilk_max_pixel_rate(state);
6087 	struct intel_atomic_state *intel_state =
6088 		to_intel_atomic_state(state);
6089 
6090 	intel_state->cdclk = intel_state->dev_cdclk =
6091 		bxt_calc_cdclk(max_pixclk);
6092 
6093 	if (!intel_state->active_crtcs)
6094 		intel_state->dev_cdclk = bxt_calc_cdclk(0);
6095 
6096 	return 0;
6097 }
6098 
6099 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6100 {
6101 	unsigned int credits, default_credits;
6102 
6103 	if (IS_CHERRYVIEW(dev_priv))
6104 		default_credits = PFI_CREDIT(12);
6105 	else
6106 		default_credits = PFI_CREDIT(8);
6107 
6108 	if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6109 		/* CHV suggested value is 31 or 63 */
6110 		if (IS_CHERRYVIEW(dev_priv))
6111 			credits = PFI_CREDIT_63;
6112 		else
6113 			credits = PFI_CREDIT(15);
6114 	} else {
6115 		credits = default_credits;
6116 	}
6117 
6118 	/*
6119 	 * WA - write default credits before re-programming
6120 	 * FIXME: should we also set the resend bit here?
6121 	 */
6122 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6123 		   default_credits);
6124 
6125 	I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6126 		   credits | PFI_CREDIT_RESEND);
6127 
6128 	/*
6129 	 * FIXME is this guaranteed to clear
6130 	 * immediately or should we poll for it?
6131 	 */
6132 	WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6133 }
6134 
6135 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6136 {
6137 	struct drm_device *dev = old_state->dev;
6138 	struct drm_i915_private *dev_priv = dev->dev_private;
6139 	struct intel_atomic_state *old_intel_state =
6140 		to_intel_atomic_state(old_state);
6141 	unsigned req_cdclk = old_intel_state->dev_cdclk;
6142 
6143 	/*
6144 	 * FIXME: We can end up here with all power domains off, yet
6145 	 * with a CDCLK frequency other than the minimum. To account
6146 	 * for this take the PIPE-A power domain, which covers the HW
6147 	 * blocks needed for the following programming. This can be
6148 	 * removed once it's guaranteed that we get here either with
6149 	 * the minimum CDCLK set, or the required power domains
6150 	 * enabled.
6151 	 */
6152 	intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6153 
6154 	if (IS_CHERRYVIEW(dev))
6155 		cherryview_set_cdclk(dev, req_cdclk);
6156 	else
6157 		valleyview_set_cdclk(dev, req_cdclk);
6158 
6159 	vlv_program_pfi_credits(dev_priv);
6160 
6161 	intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6162 }
6163 
6164 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6165 {
6166 	struct drm_device *dev = crtc->dev;
6167 	struct drm_i915_private *dev_priv = to_i915(dev);
6168 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6169 	struct intel_encoder *encoder;
6170 	struct intel_crtc_state *pipe_config =
6171 		to_intel_crtc_state(crtc->state);
6172 	int pipe = intel_crtc->pipe;
6173 
6174 	if (WARN_ON(intel_crtc->active))
6175 		return;
6176 
6177 	if (intel_crtc->config->has_dp_encoder)
6178 		intel_dp_set_m_n(intel_crtc, M1_N1);
6179 
6180 	intel_set_pipe_timings(intel_crtc);
6181 	intel_set_pipe_src_size(intel_crtc);
6182 
6183 	if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6184 		struct drm_i915_private *dev_priv = dev->dev_private;
6185 
6186 		I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6187 		I915_WRITE(CHV_CANVAS(pipe), 0);
6188 	}
6189 
6190 	i9xx_set_pipeconf(intel_crtc);
6191 
6192 	intel_crtc->active = true;
6193 
6194 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6195 
6196 	for_each_encoder_on_crtc(dev, crtc, encoder)
6197 		if (encoder->pre_pll_enable)
6198 			encoder->pre_pll_enable(encoder);
6199 
6200 	if (IS_CHERRYVIEW(dev)) {
6201 		chv_prepare_pll(intel_crtc, intel_crtc->config);
6202 		chv_enable_pll(intel_crtc, intel_crtc->config);
6203 	} else {
6204 		vlv_prepare_pll(intel_crtc, intel_crtc->config);
6205 		vlv_enable_pll(intel_crtc, intel_crtc->config);
6206 	}
6207 
6208 	for_each_encoder_on_crtc(dev, crtc, encoder)
6209 		if (encoder->pre_enable)
6210 			encoder->pre_enable(encoder);
6211 
6212 	i9xx_pfit_enable(intel_crtc);
6213 
6214 	intel_color_load_luts(&pipe_config->base);
6215 
6216 	intel_update_watermarks(crtc);
6217 	intel_enable_pipe(intel_crtc);
6218 
6219 	assert_vblank_disabled(crtc);
6220 	drm_crtc_vblank_on(crtc);
6221 
6222 	for_each_encoder_on_crtc(dev, crtc, encoder)
6223 		encoder->enable(encoder);
6224 }
6225 
6226 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6227 {
6228 	struct drm_device *dev = crtc->base.dev;
6229 	struct drm_i915_private *dev_priv = dev->dev_private;
6230 
6231 	I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6232 	I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6233 }
6234 
6235 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6236 {
6237 	struct drm_device *dev = crtc->dev;
6238 	struct drm_i915_private *dev_priv = to_i915(dev);
6239 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6240 	struct intel_encoder *encoder;
6241 	struct intel_crtc_state *pipe_config =
6242 		to_intel_crtc_state(crtc->state);
6243 	enum i915_pipe pipe = intel_crtc->pipe;
6244 
6245 	if (WARN_ON(intel_crtc->active))
6246 		return;
6247 
6248 	i9xx_set_pll_dividers(intel_crtc);
6249 
6250 	if (intel_crtc->config->has_dp_encoder)
6251 		intel_dp_set_m_n(intel_crtc, M1_N1);
6252 
6253 	intel_set_pipe_timings(intel_crtc);
6254 	intel_set_pipe_src_size(intel_crtc);
6255 
6256 	i9xx_set_pipeconf(intel_crtc);
6257 
6258 	intel_crtc->active = true;
6259 
6260 	if (!IS_GEN2(dev))
6261 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6262 
6263 	for_each_encoder_on_crtc(dev, crtc, encoder)
6264 		if (encoder->pre_enable)
6265 			encoder->pre_enable(encoder);
6266 
6267 	i9xx_enable_pll(intel_crtc);
6268 
6269 	i9xx_pfit_enable(intel_crtc);
6270 
6271 	intel_color_load_luts(&pipe_config->base);
6272 
6273 	intel_update_watermarks(crtc);
6274 	intel_enable_pipe(intel_crtc);
6275 
6276 	assert_vblank_disabled(crtc);
6277 	drm_crtc_vblank_on(crtc);
6278 
6279 	for_each_encoder_on_crtc(dev, crtc, encoder)
6280 		encoder->enable(encoder);
6281 }
6282 
6283 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6284 {
6285 	struct drm_device *dev = crtc->base.dev;
6286 	struct drm_i915_private *dev_priv = dev->dev_private;
6287 
6288 	if (!crtc->config->gmch_pfit.control)
6289 		return;
6290 
6291 	assert_pipe_disabled(dev_priv, crtc->pipe);
6292 
6293 	DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6294 			 I915_READ(PFIT_CONTROL));
6295 	I915_WRITE(PFIT_CONTROL, 0);
6296 }
6297 
6298 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6299 {
6300 	struct drm_device *dev = crtc->dev;
6301 	struct drm_i915_private *dev_priv = dev->dev_private;
6302 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6303 	struct intel_encoder *encoder;
6304 	int pipe = intel_crtc->pipe;
6305 
6306 	/*
6307 	 * On gen2 planes are double buffered but the pipe isn't, so we must
6308 	 * wait for planes to fully turn off before disabling the pipe.
6309 	 */
6310 	if (IS_GEN2(dev))
6311 		intel_wait_for_vblank(dev, pipe);
6312 
6313 	for_each_encoder_on_crtc(dev, crtc, encoder)
6314 		encoder->disable(encoder);
6315 
6316 	drm_crtc_vblank_off(crtc);
6317 	assert_vblank_disabled(crtc);
6318 
6319 	intel_disable_pipe(intel_crtc);
6320 
6321 	i9xx_pfit_disable(intel_crtc);
6322 
6323 	for_each_encoder_on_crtc(dev, crtc, encoder)
6324 		if (encoder->post_disable)
6325 			encoder->post_disable(encoder);
6326 
6327 	if (!intel_crtc->config->has_dsi_encoder) {
6328 		if (IS_CHERRYVIEW(dev))
6329 			chv_disable_pll(dev_priv, pipe);
6330 		else if (IS_VALLEYVIEW(dev))
6331 			vlv_disable_pll(dev_priv, pipe);
6332 		else
6333 			i9xx_disable_pll(intel_crtc);
6334 	}
6335 
6336 	for_each_encoder_on_crtc(dev, crtc, encoder)
6337 		if (encoder->post_pll_disable)
6338 			encoder->post_pll_disable(encoder);
6339 
6340 	if (!IS_GEN2(dev))
6341 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6342 }
6343 
6344 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6345 {
6346 	struct intel_encoder *encoder;
6347 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6348 	struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6349 	enum intel_display_power_domain domain;
6350 	unsigned long domains;
6351 
6352 	if (!intel_crtc->active)
6353 		return;
6354 
6355 	if (to_intel_plane_state(crtc->primary->state)->visible) {
6356 		WARN_ON(intel_crtc->flip_work);
6357 
6358 		intel_pre_disable_primary_noatomic(crtc);
6359 
6360 		intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6361 		to_intel_plane_state(crtc->primary->state)->visible = false;
6362 	}
6363 
6364 	dev_priv->display.crtc_disable(crtc);
6365 
6366 	DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6367 		      crtc->base.id, crtc->name);
6368 
6369 	WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6370 	crtc->state->active = false;
6371 	intel_crtc->active = false;
6372 	crtc->enabled = false;
6373 	crtc->state->connector_mask = 0;
6374 	crtc->state->encoder_mask = 0;
6375 
6376 	for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6377 		encoder->base.crtc = NULL;
6378 
6379 	intel_fbc_disable(intel_crtc);
6380 	intel_update_watermarks(crtc);
6381 	intel_disable_shared_dpll(intel_crtc);
6382 
6383 	domains = intel_crtc->enabled_power_domains;
6384 	for_each_power_domain(domain, domains)
6385 		intel_display_power_put(dev_priv, domain);
6386 	intel_crtc->enabled_power_domains = 0;
6387 
6388 	dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6389 	dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6390 }
6391 
6392 /*
6393  * turn all crtc's off, but do not adjust state
6394  * This has to be paired with a call to intel_modeset_setup_hw_state.
6395  */
6396 int intel_display_suspend(struct drm_device *dev)
6397 {
6398 	struct drm_i915_private *dev_priv = to_i915(dev);
6399 	struct drm_atomic_state *state;
6400 	int ret;
6401 
6402 	state = drm_atomic_helper_suspend(dev);
6403 	ret = PTR_ERR_OR_ZERO(state);
6404 	if (ret)
6405 		DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6406 	else
6407 		dev_priv->modeset_restore_state = state;
6408 	return ret;
6409 }
6410 
6411 void intel_encoder_destroy(struct drm_encoder *encoder)
6412 {
6413 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6414 
6415 	drm_encoder_cleanup(encoder);
6416 	kfree(intel_encoder);
6417 }
6418 
6419 /* Cross check the actual hw state with our own modeset state tracking (and it's
6420  * internal consistency). */
6421 static void intel_connector_verify_state(struct intel_connector *connector)
6422 {
6423 	struct drm_crtc *crtc = connector->base.state->crtc;
6424 
6425 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6426 		      connector->base.base.id,
6427 		      connector->base.name);
6428 
6429 	if (connector->get_hw_state(connector)) {
6430 		struct intel_encoder *encoder = connector->encoder;
6431 		struct drm_connector_state *conn_state = connector->base.state;
6432 
6433 		I915_STATE_WARN(!crtc,
6434 			 "connector enabled without attached crtc\n");
6435 
6436 		if (!crtc)
6437 			return;
6438 
6439 		I915_STATE_WARN(!crtc->state->active,
6440 		      "connector is active, but attached crtc isn't\n");
6441 
6442 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6443 			return;
6444 
6445 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6446 			"atomic encoder doesn't match attached encoder\n");
6447 
6448 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6449 			"attached encoder crtc differs from connector crtc\n");
6450 	} else {
6451 		I915_STATE_WARN(crtc && crtc->state->active,
6452 			"attached crtc is active, but connector isn't\n");
6453 		I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6454 			"best encoder set without crtc!\n");
6455 	}
6456 }
6457 
6458 int intel_connector_init(struct intel_connector *connector)
6459 {
6460 	drm_atomic_helper_connector_reset(&connector->base);
6461 
6462 	if (!connector->base.state)
6463 		return -ENOMEM;
6464 
6465 	return 0;
6466 }
6467 
6468 struct intel_connector *intel_connector_alloc(void)
6469 {
6470 	struct intel_connector *connector;
6471 
6472 	connector = kzalloc(sizeof *connector, GFP_KERNEL);
6473 	if (!connector)
6474 		return NULL;
6475 
6476 	if (intel_connector_init(connector) < 0) {
6477 		kfree(connector);
6478 		return NULL;
6479 	}
6480 
6481 	return connector;
6482 }
6483 
6484 /* Simple connector->get_hw_state implementation for encoders that support only
6485  * one connector and no cloning and hence the encoder state determines the state
6486  * of the connector. */
6487 bool intel_connector_get_hw_state(struct intel_connector *connector)
6488 {
6489 	enum i915_pipe pipe = 0;
6490 	struct intel_encoder *encoder = connector->encoder;
6491 
6492 	return encoder->get_hw_state(encoder, &pipe);
6493 }
6494 
6495 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6496 {
6497 	if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6498 		return crtc_state->fdi_lanes;
6499 
6500 	return 0;
6501 }
6502 
6503 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe,
6504 				     struct intel_crtc_state *pipe_config)
6505 {
6506 	struct drm_atomic_state *state = pipe_config->base.state;
6507 	struct intel_crtc *other_crtc;
6508 	struct intel_crtc_state *other_crtc_state;
6509 
6510 	DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6511 		      pipe_name(pipe), pipe_config->fdi_lanes);
6512 	if (pipe_config->fdi_lanes > 4) {
6513 		DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6514 			      pipe_name(pipe), pipe_config->fdi_lanes);
6515 		return -EINVAL;
6516 	}
6517 
6518 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6519 		if (pipe_config->fdi_lanes > 2) {
6520 			DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6521 				      pipe_config->fdi_lanes);
6522 			return -EINVAL;
6523 		} else {
6524 			return 0;
6525 		}
6526 	}
6527 
6528 	if (INTEL_INFO(dev)->num_pipes == 2)
6529 		return 0;
6530 
6531 	/* Ivybridge 3 pipe is really complicated */
6532 	switch (pipe) {
6533 	case PIPE_A:
6534 		return 0;
6535 	case PIPE_B:
6536 		if (pipe_config->fdi_lanes <= 2)
6537 			return 0;
6538 
6539 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6540 		other_crtc_state =
6541 			intel_atomic_get_crtc_state(state, other_crtc);
6542 		if (IS_ERR(other_crtc_state))
6543 			return PTR_ERR(other_crtc_state);
6544 
6545 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6546 			DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6547 				      pipe_name(pipe), pipe_config->fdi_lanes);
6548 			return -EINVAL;
6549 		}
6550 		return 0;
6551 	case PIPE_C:
6552 		if (pipe_config->fdi_lanes > 2) {
6553 			DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6554 				      pipe_name(pipe), pipe_config->fdi_lanes);
6555 			return -EINVAL;
6556 		}
6557 
6558 		other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6559 		other_crtc_state =
6560 			intel_atomic_get_crtc_state(state, other_crtc);
6561 		if (IS_ERR(other_crtc_state))
6562 			return PTR_ERR(other_crtc_state);
6563 
6564 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6565 			DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6566 			return -EINVAL;
6567 		}
6568 		return 0;
6569 	default:
6570 		BUG();
6571 	}
6572 }
6573 
6574 #define RETRY 1
6575 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6576 				       struct intel_crtc_state *pipe_config)
6577 {
6578 	struct drm_device *dev = intel_crtc->base.dev;
6579 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6580 	int lane, link_bw, fdi_dotclock, ret;
6581 	bool needs_recompute = false;
6582 
6583 retry:
6584 	/* FDI is a binary signal running at ~2.7GHz, encoding
6585 	 * each output octet as 10 bits. The actual frequency
6586 	 * is stored as a divider into a 100MHz clock, and the
6587 	 * mode pixel clock is stored in units of 1KHz.
6588 	 * Hence the bw of each lane in terms of the mode signal
6589 	 * is:
6590 	 */
6591 	link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6592 
6593 	fdi_dotclock = adjusted_mode->crtc_clock;
6594 
6595 	lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6596 					   pipe_config->pipe_bpp);
6597 
6598 	pipe_config->fdi_lanes = lane;
6599 
6600 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6601 			       link_bw, &pipe_config->fdi_m_n);
6602 
6603 	ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6604 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6605 		pipe_config->pipe_bpp -= 2*3;
6606 		DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6607 			      pipe_config->pipe_bpp);
6608 		needs_recompute = true;
6609 		pipe_config->bw_constrained = true;
6610 
6611 		goto retry;
6612 	}
6613 
6614 	if (needs_recompute)
6615 		return RETRY;
6616 
6617 	return ret;
6618 }
6619 
6620 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6621 				     struct intel_crtc_state *pipe_config)
6622 {
6623 	if (pipe_config->pipe_bpp > 24)
6624 		return false;
6625 
6626 	/* HSW can handle pixel rate up to cdclk? */
6627 	if (IS_HASWELL(dev_priv))
6628 		return true;
6629 
6630 	/*
6631 	 * We compare against max which means we must take
6632 	 * the increased cdclk requirement into account when
6633 	 * calculating the new cdclk.
6634 	 *
6635 	 * Should measure whether using a lower cdclk w/o IPS
6636 	 */
6637 	return ilk_pipe_pixel_rate(pipe_config) <=
6638 		dev_priv->max_cdclk_freq * 95 / 100;
6639 }
6640 
6641 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6642 				   struct intel_crtc_state *pipe_config)
6643 {
6644 	struct drm_device *dev = crtc->base.dev;
6645 	struct drm_i915_private *dev_priv = dev->dev_private;
6646 
6647 	pipe_config->ips_enabled = i915.enable_ips &&
6648 		hsw_crtc_supports_ips(crtc) &&
6649 		pipe_config_supports_ips(dev_priv, pipe_config);
6650 }
6651 
6652 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6653 {
6654 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6655 
6656 	/* GDG double wide on either pipe, otherwise pipe A only */
6657 	return INTEL_INFO(dev_priv)->gen < 4 &&
6658 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6659 }
6660 
6661 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6662 				     struct intel_crtc_state *pipe_config)
6663 {
6664 	struct drm_device *dev = crtc->base.dev;
6665 	struct drm_i915_private *dev_priv = dev->dev_private;
6666 	const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6667 	int clock_limit = dev_priv->max_dotclk_freq;
6668 
6669 	if (INTEL_INFO(dev)->gen < 4) {
6670 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6671 
6672 		/*
6673 		 * Enable double wide mode when the dot clock
6674 		 * is > 90% of the (display) core speed.
6675 		 */
6676 		if (intel_crtc_supports_double_wide(crtc) &&
6677 		    adjusted_mode->crtc_clock > clock_limit) {
6678 			clock_limit = dev_priv->max_dotclk_freq;
6679 			pipe_config->double_wide = true;
6680 		}
6681 	}
6682 
6683 	if (adjusted_mode->crtc_clock > clock_limit) {
6684 		DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6685 			      adjusted_mode->crtc_clock, clock_limit,
6686 			      yesno(pipe_config->double_wide));
6687 		return -EINVAL;
6688 	}
6689 
6690 	/*
6691 	 * Pipe horizontal size must be even in:
6692 	 * - DVO ganged mode
6693 	 * - LVDS dual channel mode
6694 	 * - Double wide pipe
6695 	 */
6696 	if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6697 	     intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6698 		pipe_config->pipe_src_w &= ~1;
6699 
6700 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
6701 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6702 	 */
6703 	if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6704 		adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6705 		return -EINVAL;
6706 
6707 	if (HAS_IPS(dev))
6708 		hsw_compute_ips_config(crtc, pipe_config);
6709 
6710 	if (pipe_config->has_pch_encoder)
6711 		return ironlake_fdi_compute_config(crtc, pipe_config);
6712 
6713 	return 0;
6714 }
6715 
6716 static int skylake_get_display_clock_speed(struct drm_device *dev)
6717 {
6718 	struct drm_i915_private *dev_priv = to_i915(dev);
6719 	uint32_t cdctl;
6720 
6721 	skl_dpll0_update(dev_priv);
6722 
6723 	if (dev_priv->cdclk_pll.vco == 0)
6724 		return dev_priv->cdclk_pll.ref;
6725 
6726 	cdctl = I915_READ(CDCLK_CTL);
6727 
6728 	if (dev_priv->cdclk_pll.vco == 8640000) {
6729 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6730 		case CDCLK_FREQ_450_432:
6731 			return 432000;
6732 		case CDCLK_FREQ_337_308:
6733 			return 308571;
6734 		case CDCLK_FREQ_540:
6735 			return 540000;
6736 		case CDCLK_FREQ_675_617:
6737 			return 617143;
6738 		default:
6739 			MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6740 		}
6741 	} else {
6742 		switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6743 		case CDCLK_FREQ_450_432:
6744 			return 450000;
6745 		case CDCLK_FREQ_337_308:
6746 			return 337500;
6747 		case CDCLK_FREQ_540:
6748 			return 540000;
6749 		case CDCLK_FREQ_675_617:
6750 			return 675000;
6751 		default:
6752 			MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6753 		}
6754 	}
6755 
6756 	return dev_priv->cdclk_pll.ref;
6757 }
6758 
6759 static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6760 {
6761 	u32 val;
6762 
6763 	dev_priv->cdclk_pll.ref = 19200;
6764 	dev_priv->cdclk_pll.vco = 0;
6765 
6766 	val = I915_READ(BXT_DE_PLL_ENABLE);
6767 	if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
6768 		return;
6769 
6770 	if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6771 		return;
6772 
6773 	val = I915_READ(BXT_DE_PLL_CTL);
6774 	dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6775 		dev_priv->cdclk_pll.ref;
6776 }
6777 
6778 static int broxton_get_display_clock_speed(struct drm_device *dev)
6779 {
6780 	struct drm_i915_private *dev_priv = to_i915(dev);
6781 	u32 divider;
6782 	int div, vco;
6783 
6784 	bxt_de_pll_update(dev_priv);
6785 
6786 	vco = dev_priv->cdclk_pll.vco;
6787 	if (vco == 0)
6788 		return dev_priv->cdclk_pll.ref;
6789 
6790 	divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
6791 
6792 	switch (divider) {
6793 	case BXT_CDCLK_CD2X_DIV_SEL_1:
6794 		div = 2;
6795 		break;
6796 	case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6797 		div = 3;
6798 		break;
6799 	case BXT_CDCLK_CD2X_DIV_SEL_2:
6800 		div = 4;
6801 		break;
6802 	case BXT_CDCLK_CD2X_DIV_SEL_4:
6803 		div = 8;
6804 		break;
6805 	default:
6806 		MISSING_CASE(divider);
6807 		return dev_priv->cdclk_pll.ref;
6808 	}
6809 
6810 	return DIV_ROUND_CLOSEST(vco, div);
6811 }
6812 
6813 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6814 {
6815 	struct drm_i915_private *dev_priv = dev->dev_private;
6816 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6817 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6818 
6819 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6820 		return 800000;
6821 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6822 		return 450000;
6823 	else if (freq == LCPLL_CLK_FREQ_450)
6824 		return 450000;
6825 	else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6826 		return 540000;
6827 	else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6828 		return 337500;
6829 	else
6830 		return 675000;
6831 }
6832 
6833 static int haswell_get_display_clock_speed(struct drm_device *dev)
6834 {
6835 	struct drm_i915_private *dev_priv = dev->dev_private;
6836 	uint32_t lcpll = I915_READ(LCPLL_CTL);
6837 	uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6838 
6839 	if (lcpll & LCPLL_CD_SOURCE_FCLK)
6840 		return 800000;
6841 	else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6842 		return 450000;
6843 	else if (freq == LCPLL_CLK_FREQ_450)
6844 		return 450000;
6845 	else if (IS_HSW_ULT(dev))
6846 		return 337500;
6847 	else
6848 		return 540000;
6849 }
6850 
6851 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6852 {
6853 	return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6854 				      CCK_DISPLAY_CLOCK_CONTROL);
6855 }
6856 
6857 static int ilk_get_display_clock_speed(struct drm_device *dev)
6858 {
6859 	return 450000;
6860 }
6861 
6862 static int i945_get_display_clock_speed(struct drm_device *dev)
6863 {
6864 	return 400000;
6865 }
6866 
6867 static int i915_get_display_clock_speed(struct drm_device *dev)
6868 {
6869 	return 333333;
6870 }
6871 
6872 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6873 {
6874 	return 200000;
6875 }
6876 
6877 static int pnv_get_display_clock_speed(struct drm_device *dev)
6878 {
6879 	u16 gcfgc = 0;
6880 
6881 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6882 
6883 	switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6884 	case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6885 		return 266667;
6886 	case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6887 		return 333333;
6888 	case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6889 		return 444444;
6890 	case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6891 		return 200000;
6892 	default:
6893 		DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6894 	case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6895 		return 133333;
6896 	case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6897 		return 166667;
6898 	}
6899 }
6900 
6901 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6902 {
6903 	u16 gcfgc = 0;
6904 
6905 	pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6906 
6907 	if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6908 		return 133333;
6909 	else {
6910 		switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6911 		case GC_DISPLAY_CLOCK_333_MHZ:
6912 			return 333333;
6913 		default:
6914 		case GC_DISPLAY_CLOCK_190_200_MHZ:
6915 			return 190000;
6916 		}
6917 	}
6918 }
6919 
6920 static int i865_get_display_clock_speed(struct drm_device *dev)
6921 {
6922 	return 266667;
6923 }
6924 
6925 static int i85x_get_display_clock_speed(struct drm_device *dev)
6926 {
6927 	u16 hpllcc = 0;
6928 
6929 	/*
6930 	 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6931 	 * encoding is different :(
6932 	 * FIXME is this the right way to detect 852GM/852GMV?
6933 	 */
6934 	if (dev->pdev->revision == 0x1)
6935 		return 133333;
6936 
6937 	pci_bus_read_config_word(dev->pdev->bus,
6938 				 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6939 
6940 	/* Assume that the hardware is in the high speed state.  This
6941 	 * should be the default.
6942 	 */
6943 	switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6944 	case GC_CLOCK_133_200:
6945 	case GC_CLOCK_133_200_2:
6946 	case GC_CLOCK_100_200:
6947 		return 200000;
6948 	case GC_CLOCK_166_250:
6949 		return 250000;
6950 	case GC_CLOCK_100_133:
6951 		return 133333;
6952 	case GC_CLOCK_133_266:
6953 	case GC_CLOCK_133_266_2:
6954 	case GC_CLOCK_166_266:
6955 		return 266667;
6956 	}
6957 
6958 	/* Shouldn't happen */
6959 	return 0;
6960 }
6961 
6962 static int i830_get_display_clock_speed(struct drm_device *dev)
6963 {
6964 	return 133333;
6965 }
6966 
6967 static unsigned int intel_hpll_vco(struct drm_device *dev)
6968 {
6969 	struct drm_i915_private *dev_priv = dev->dev_private;
6970 	static const unsigned int blb_vco[8] = {
6971 		[0] = 3200000,
6972 		[1] = 4000000,
6973 		[2] = 5333333,
6974 		[3] = 4800000,
6975 		[4] = 6400000,
6976 	};
6977 	static const unsigned int pnv_vco[8] = {
6978 		[0] = 3200000,
6979 		[1] = 4000000,
6980 		[2] = 5333333,
6981 		[3] = 4800000,
6982 		[4] = 2666667,
6983 	};
6984 	static const unsigned int cl_vco[8] = {
6985 		[0] = 3200000,
6986 		[1] = 4000000,
6987 		[2] = 5333333,
6988 		[3] = 6400000,
6989 		[4] = 3333333,
6990 		[5] = 3566667,
6991 		[6] = 4266667,
6992 	};
6993 	static const unsigned int elk_vco[8] = {
6994 		[0] = 3200000,
6995 		[1] = 4000000,
6996 		[2] = 5333333,
6997 		[3] = 4800000,
6998 	};
6999 	static const unsigned int ctg_vco[8] = {
7000 		[0] = 3200000,
7001 		[1] = 4000000,
7002 		[2] = 5333333,
7003 		[3] = 6400000,
7004 		[4] = 2666667,
7005 		[5] = 4266667,
7006 	};
7007 	const unsigned int *vco_table;
7008 	unsigned int vco;
7009 	uint8_t tmp = 0;
7010 
7011 	/* FIXME other chipsets? */
7012 	if (IS_GM45(dev))
7013 		vco_table = ctg_vco;
7014 	else if (IS_G4X(dev))
7015 		vco_table = elk_vco;
7016 	else if (IS_CRESTLINE(dev))
7017 		vco_table = cl_vco;
7018 	else if (IS_PINEVIEW(dev))
7019 		vco_table = pnv_vco;
7020 	else if (IS_G33(dev))
7021 		vco_table = blb_vco;
7022 	else
7023 		return 0;
7024 
7025 	tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
7026 
7027 	vco = vco_table[tmp & 0x7];
7028 	if (vco == 0)
7029 		DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7030 	else
7031 		DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7032 
7033 	return vco;
7034 }
7035 
7036 static int gm45_get_display_clock_speed(struct drm_device *dev)
7037 {
7038 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7039 	uint16_t tmp = 0;
7040 
7041 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7042 
7043 	cdclk_sel = (tmp >> 12) & 0x1;
7044 
7045 	switch (vco) {
7046 	case 2666667:
7047 	case 4000000:
7048 	case 5333333:
7049 		return cdclk_sel ? 333333 : 222222;
7050 	case 3200000:
7051 		return cdclk_sel ? 320000 : 228571;
7052 	default:
7053 		DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7054 		return 222222;
7055 	}
7056 }
7057 
7058 static int i965gm_get_display_clock_speed(struct drm_device *dev)
7059 {
7060 	static const uint8_t div_3200[] = { 16, 10,  8 };
7061 	static const uint8_t div_4000[] = { 20, 12, 10 };
7062 	static const uint8_t div_5333[] = { 24, 16, 14 };
7063 	const uint8_t *div_table;
7064 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7065 	uint16_t tmp = 0;
7066 
7067 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7068 
7069 	cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7070 
7071 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7072 		goto fail;
7073 
7074 	switch (vco) {
7075 	case 3200000:
7076 		div_table = div_3200;
7077 		break;
7078 	case 4000000:
7079 		div_table = div_4000;
7080 		break;
7081 	case 5333333:
7082 		div_table = div_5333;
7083 		break;
7084 	default:
7085 		goto fail;
7086 	}
7087 
7088 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7089 
7090 fail:
7091 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7092 	return 200000;
7093 }
7094 
7095 static int g33_get_display_clock_speed(struct drm_device *dev)
7096 {
7097 	static const uint8_t div_3200[] = { 12, 10,  8,  7, 5, 16 };
7098 	static const uint8_t div_4000[] = { 14, 12, 10,  8, 6, 20 };
7099 	static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7100 	static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7101 	const uint8_t *div_table;
7102 	unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7103 	uint16_t tmp = 0;
7104 
7105 	pci_read_config_word(dev->pdev, GCFGC, &tmp);
7106 
7107 	cdclk_sel = (tmp >> 4) & 0x7;
7108 
7109 	if (cdclk_sel >= ARRAY_SIZE(div_3200))
7110 		goto fail;
7111 
7112 	switch (vco) {
7113 	case 3200000:
7114 		div_table = div_3200;
7115 		break;
7116 	case 4000000:
7117 		div_table = div_4000;
7118 		break;
7119 	case 4800000:
7120 		div_table = div_4800;
7121 		break;
7122 	case 5333333:
7123 		div_table = div_5333;
7124 		break;
7125 	default:
7126 		goto fail;
7127 	}
7128 
7129 	return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7130 
7131 fail:
7132 	DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7133 	return 190476;
7134 }
7135 
7136 static void
7137 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7138 {
7139 	while (*num > DATA_LINK_M_N_MASK ||
7140 	       *den > DATA_LINK_M_N_MASK) {
7141 		*num >>= 1;
7142 		*den >>= 1;
7143 	}
7144 }
7145 
7146 static void compute_m_n(unsigned int m, unsigned int n,
7147 			uint32_t *ret_m, uint32_t *ret_n)
7148 {
7149 	*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7150 	*ret_m = div_u64((uint64_t) m * *ret_n, n);
7151 	intel_reduce_m_n_ratio(ret_m, ret_n);
7152 }
7153 
7154 void
7155 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7156 		       int pixel_clock, int link_clock,
7157 		       struct intel_link_m_n *m_n)
7158 {
7159 	m_n->tu = 64;
7160 
7161 	compute_m_n(bits_per_pixel * pixel_clock,
7162 		    link_clock * nlanes * 8,
7163 		    &m_n->gmch_m, &m_n->gmch_n);
7164 
7165 	compute_m_n(pixel_clock, link_clock,
7166 		    &m_n->link_m, &m_n->link_n);
7167 }
7168 
7169 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7170 {
7171 	if (i915.panel_use_ssc >= 0)
7172 		return i915.panel_use_ssc != 0;
7173 	return dev_priv->vbt.lvds_use_ssc
7174 		&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7175 }
7176 
7177 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7178 {
7179 	return (1 << dpll->n) << 16 | dpll->m2;
7180 }
7181 
7182 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7183 {
7184 	return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7185 }
7186 
7187 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7188 				     struct intel_crtc_state *crtc_state,
7189 				     struct dpll *reduced_clock)
7190 {
7191 	struct drm_device *dev = crtc->base.dev;
7192 	u32 fp, fp2 = 0;
7193 
7194 	if (IS_PINEVIEW(dev)) {
7195 		fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7196 		if (reduced_clock)
7197 			fp2 = pnv_dpll_compute_fp(reduced_clock);
7198 	} else {
7199 		fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7200 		if (reduced_clock)
7201 			fp2 = i9xx_dpll_compute_fp(reduced_clock);
7202 	}
7203 
7204 	crtc_state->dpll_hw_state.fp0 = fp;
7205 
7206 	crtc->lowfreq_avail = false;
7207 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7208 	    reduced_clock) {
7209 		crtc_state->dpll_hw_state.fp1 = fp2;
7210 		crtc->lowfreq_avail = true;
7211 	} else {
7212 		crtc_state->dpll_hw_state.fp1 = fp;
7213 	}
7214 }
7215 
7216 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe
7217 		pipe)
7218 {
7219 	u32 reg_val;
7220 
7221 	/*
7222 	 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7223 	 * and set it to a reasonable value instead.
7224 	 */
7225 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7226 	reg_val &= 0xffffff00;
7227 	reg_val |= 0x00000030;
7228 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7229 
7230 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7231 	reg_val &= 0x8cffffff;
7232 	reg_val = 0x8c000000;
7233 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7234 
7235 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7236 	reg_val &= 0xffffff00;
7237 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7238 
7239 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7240 	reg_val &= 0x00ffffff;
7241 	reg_val |= 0xb0000000;
7242 	vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7243 }
7244 
7245 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7246 					 struct intel_link_m_n *m_n)
7247 {
7248 	struct drm_device *dev = crtc->base.dev;
7249 	struct drm_i915_private *dev_priv = dev->dev_private;
7250 	int pipe = crtc->pipe;
7251 
7252 	I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7253 	I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7254 	I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7255 	I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7256 }
7257 
7258 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7259 					 struct intel_link_m_n *m_n,
7260 					 struct intel_link_m_n *m2_n2)
7261 {
7262 	struct drm_device *dev = crtc->base.dev;
7263 	struct drm_i915_private *dev_priv = dev->dev_private;
7264 	int pipe = crtc->pipe;
7265 	enum transcoder transcoder = crtc->config->cpu_transcoder;
7266 
7267 	if (INTEL_INFO(dev)->gen >= 5) {
7268 		I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7269 		I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7270 		I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7271 		I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7272 		/* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7273 		 * for gen < 8) and if DRRS is supported (to make sure the
7274 		 * registers are not unnecessarily accessed).
7275 		 */
7276 		if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7277 			crtc->config->has_drrs) {
7278 			I915_WRITE(PIPE_DATA_M2(transcoder),
7279 					TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7280 			I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7281 			I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7282 			I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7283 		}
7284 	} else {
7285 		I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7286 		I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7287 		I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7288 		I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7289 	}
7290 }
7291 
7292 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7293 {
7294 	struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7295 
7296 	if (m_n == M1_N1) {
7297 		dp_m_n = &crtc->config->dp_m_n;
7298 		dp_m2_n2 = &crtc->config->dp_m2_n2;
7299 	} else if (m_n == M2_N2) {
7300 
7301 		/*
7302 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
7303 		 * needs to be programmed into M1_N1.
7304 		 */
7305 		dp_m_n = &crtc->config->dp_m2_n2;
7306 	} else {
7307 		DRM_ERROR("Unsupported divider value\n");
7308 		return;
7309 	}
7310 
7311 	if (crtc->config->has_pch_encoder)
7312 		intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7313 	else
7314 		intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7315 }
7316 
7317 static void vlv_compute_dpll(struct intel_crtc *crtc,
7318 			     struct intel_crtc_state *pipe_config)
7319 {
7320 	pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7321 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7322 	if (crtc->pipe != PIPE_A)
7323 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7324 
7325 	/* DPLL not used with DSI, but still need the rest set up */
7326 	if (!pipe_config->has_dsi_encoder)
7327 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7328 			DPLL_EXT_BUFFER_ENABLE_VLV;
7329 
7330 	pipe_config->dpll_hw_state.dpll_md =
7331 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7332 }
7333 
7334 static void chv_compute_dpll(struct intel_crtc *crtc,
7335 			     struct intel_crtc_state *pipe_config)
7336 {
7337 	pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7338 		DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7339 	if (crtc->pipe != PIPE_A)
7340 		pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7341 
7342 	/* DPLL not used with DSI, but still need the rest set up */
7343 	if (!pipe_config->has_dsi_encoder)
7344 		pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7345 
7346 	pipe_config->dpll_hw_state.dpll_md =
7347 		(pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7348 }
7349 
7350 static void vlv_prepare_pll(struct intel_crtc *crtc,
7351 			    const struct intel_crtc_state *pipe_config)
7352 {
7353 	struct drm_device *dev = crtc->base.dev;
7354 	struct drm_i915_private *dev_priv = dev->dev_private;
7355 	enum i915_pipe pipe = crtc->pipe;
7356 	u32 mdiv;
7357 	u32 bestn, bestm1, bestm2, bestp1, bestp2;
7358 	u32 coreclk, reg_val;
7359 
7360 	/* Enable Refclk */
7361 	I915_WRITE(DPLL(pipe),
7362 		   pipe_config->dpll_hw_state.dpll &
7363 		   ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7364 
7365 	/* No need to actually set up the DPLL with DSI */
7366 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7367 		return;
7368 
7369 	mutex_lock(&dev_priv->sb_lock);
7370 
7371 	bestn = pipe_config->dpll.n;
7372 	bestm1 = pipe_config->dpll.m1;
7373 	bestm2 = pipe_config->dpll.m2;
7374 	bestp1 = pipe_config->dpll.p1;
7375 	bestp2 = pipe_config->dpll.p2;
7376 
7377 	/* See eDP HDMI DPIO driver vbios notes doc */
7378 
7379 	/* PLL B needs special handling */
7380 	if (pipe == PIPE_B)
7381 		vlv_pllb_recal_opamp(dev_priv, pipe);
7382 
7383 	/* Set up Tx target for periodic Rcomp update */
7384 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7385 
7386 	/* Disable target IRef on PLL */
7387 	reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7388 	reg_val &= 0x00ffffff;
7389 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7390 
7391 	/* Disable fast lock */
7392 	vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7393 
7394 	/* Set idtafcrecal before PLL is enabled */
7395 	mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7396 	mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7397 	mdiv |= ((bestn << DPIO_N_SHIFT));
7398 	mdiv |= (1 << DPIO_K_SHIFT);
7399 
7400 	/*
7401 	 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7402 	 * but we don't support that).
7403 	 * Note: don't use the DAC post divider as it seems unstable.
7404 	 */
7405 	mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7406 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7407 
7408 	mdiv |= DPIO_ENABLE_CALIBRATION;
7409 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7410 
7411 	/* Set HBR and RBR LPF coefficients */
7412 	if (pipe_config->port_clock == 162000 ||
7413 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7414 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7415 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7416 				 0x009f0003);
7417 	else
7418 		vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7419 				 0x00d0000f);
7420 
7421 	if (pipe_config->has_dp_encoder) {
7422 		/* Use SSC source */
7423 		if (pipe == PIPE_A)
7424 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7425 					 0x0df40000);
7426 		else
7427 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7428 					 0x0df70000);
7429 	} else { /* HDMI or VGA */
7430 		/* Use bend source */
7431 		if (pipe == PIPE_A)
7432 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7433 					 0x0df70000);
7434 		else
7435 			vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7436 					 0x0df40000);
7437 	}
7438 
7439 	coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7440 	coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7441 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7442 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7443 		coreclk |= 0x01000000;
7444 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7445 
7446 	vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7447 	mutex_unlock(&dev_priv->sb_lock);
7448 }
7449 
7450 static void chv_prepare_pll(struct intel_crtc *crtc,
7451 			    const struct intel_crtc_state *pipe_config)
7452 {
7453 	struct drm_device *dev = crtc->base.dev;
7454 	struct drm_i915_private *dev_priv = dev->dev_private;
7455 	enum i915_pipe pipe = crtc->pipe;
7456 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
7457 	u32 loopfilter, tribuf_calcntr;
7458 	u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7459 	u32 dpio_val;
7460 	int vco;
7461 
7462 	/* Enable Refclk and SSC */
7463 	I915_WRITE(DPLL(pipe),
7464 		   pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7465 
7466 	/* No need to actually set up the DPLL with DSI */
7467 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7468 		return;
7469 
7470 	bestn = pipe_config->dpll.n;
7471 	bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7472 	bestm1 = pipe_config->dpll.m1;
7473 	bestm2 = pipe_config->dpll.m2 >> 22;
7474 	bestp1 = pipe_config->dpll.p1;
7475 	bestp2 = pipe_config->dpll.p2;
7476 	vco = pipe_config->dpll.vco;
7477 	dpio_val = 0;
7478 	loopfilter = 0;
7479 
7480 	mutex_lock(&dev_priv->sb_lock);
7481 
7482 	/* p1 and p2 divider */
7483 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7484 			5 << DPIO_CHV_S1_DIV_SHIFT |
7485 			bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7486 			bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7487 			1 << DPIO_CHV_K_DIV_SHIFT);
7488 
7489 	/* Feedback post-divider - m2 */
7490 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7491 
7492 	/* Feedback refclk divider - n and m1 */
7493 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7494 			DPIO_CHV_M1_DIV_BY_2 |
7495 			1 << DPIO_CHV_N_DIV_SHIFT);
7496 
7497 	/* M2 fraction division */
7498 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7499 
7500 	/* M2 fraction division enable */
7501 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7502 	dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7503 	dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7504 	if (bestm2_frac)
7505 		dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7506 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7507 
7508 	/* Program digital lock detect threshold */
7509 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7510 	dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7511 					DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7512 	dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7513 	if (!bestm2_frac)
7514 		dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7515 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7516 
7517 	/* Loop filter */
7518 	if (vco == 5400000) {
7519 		loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7520 		loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7521 		loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7522 		tribuf_calcntr = 0x9;
7523 	} else if (vco <= 6200000) {
7524 		loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7525 		loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7526 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7527 		tribuf_calcntr = 0x9;
7528 	} else if (vco <= 6480000) {
7529 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7530 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7531 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7532 		tribuf_calcntr = 0x8;
7533 	} else {
7534 		/* Not supported. Apply the same limits as in the max case */
7535 		loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7536 		loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7537 		loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7538 		tribuf_calcntr = 0;
7539 	}
7540 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7541 
7542 	dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7543 	dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7544 	dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7545 	vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7546 
7547 	/* AFC Recal */
7548 	vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7549 			vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7550 			DPIO_AFC_RECAL);
7551 
7552 	mutex_unlock(&dev_priv->sb_lock);
7553 }
7554 
7555 /**
7556  * vlv_force_pll_on - forcibly enable just the PLL
7557  * @dev_priv: i915 private structure
7558  * @pipe: pipe PLL to enable
7559  * @dpll: PLL configuration
7560  *
7561  * Enable the PLL for @pipe using the supplied @dpll config. To be used
7562  * in cases where we need the PLL enabled even when @pipe is not going to
7563  * be enabled.
7564  */
7565 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe,
7566 		     const struct dpll *dpll)
7567 {
7568 	struct intel_crtc *crtc =
7569 		to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7570 	struct intel_crtc_state *pipe_config;
7571 
7572 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7573 	if (!pipe_config)
7574 		return -ENOMEM;
7575 
7576 	pipe_config->base.crtc = &crtc->base;
7577 	pipe_config->pixel_multiplier = 1;
7578 	pipe_config->dpll = *dpll;
7579 
7580 	if (IS_CHERRYVIEW(dev)) {
7581 		chv_compute_dpll(crtc, pipe_config);
7582 		chv_prepare_pll(crtc, pipe_config);
7583 		chv_enable_pll(crtc, pipe_config);
7584 	} else {
7585 		vlv_compute_dpll(crtc, pipe_config);
7586 		vlv_prepare_pll(crtc, pipe_config);
7587 		vlv_enable_pll(crtc, pipe_config);
7588 	}
7589 
7590 	kfree(pipe_config);
7591 
7592 	return 0;
7593 }
7594 
7595 /**
7596  * vlv_force_pll_off - forcibly disable just the PLL
7597  * @dev_priv: i915 private structure
7598  * @pipe: pipe PLL to disable
7599  *
7600  * Disable the PLL for @pipe. To be used in cases where we need
7601  * the PLL enabled even when @pipe is not going to be enabled.
7602  */
7603 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe)
7604 {
7605 	if (IS_CHERRYVIEW(dev))
7606 		chv_disable_pll(to_i915(dev), pipe);
7607 	else
7608 		vlv_disable_pll(to_i915(dev), pipe);
7609 }
7610 
7611 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7612 			      struct intel_crtc_state *crtc_state,
7613 			      struct dpll *reduced_clock)
7614 {
7615 	struct drm_device *dev = crtc->base.dev;
7616 	struct drm_i915_private *dev_priv = dev->dev_private;
7617 	u32 dpll;
7618 	bool is_sdvo;
7619 	struct dpll *clock = &crtc_state->dpll;
7620 
7621 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7622 
7623 	is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7624 		intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7625 
7626 	dpll = DPLL_VGA_MODE_DIS;
7627 
7628 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7629 		dpll |= DPLLB_MODE_LVDS;
7630 	else
7631 		dpll |= DPLLB_MODE_DAC_SERIAL;
7632 
7633 	if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7634 		dpll |= (crtc_state->pixel_multiplier - 1)
7635 			<< SDVO_MULTIPLIER_SHIFT_HIRES;
7636 	}
7637 
7638 	if (is_sdvo)
7639 		dpll |= DPLL_SDVO_HIGH_SPEED;
7640 
7641 	if (crtc_state->has_dp_encoder)
7642 		dpll |= DPLL_SDVO_HIGH_SPEED;
7643 
7644 	/* compute bitmask from p1 value */
7645 	if (IS_PINEVIEW(dev))
7646 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7647 	else {
7648 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7649 		if (IS_G4X(dev) && reduced_clock)
7650 			dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7651 	}
7652 	switch (clock->p2) {
7653 	case 5:
7654 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7655 		break;
7656 	case 7:
7657 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7658 		break;
7659 	case 10:
7660 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7661 		break;
7662 	case 14:
7663 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7664 		break;
7665 	}
7666 	if (INTEL_INFO(dev)->gen >= 4)
7667 		dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7668 
7669 	if (crtc_state->sdvo_tv_clock)
7670 		dpll |= PLL_REF_INPUT_TVCLKINBC;
7671 	else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7672 		 intel_panel_use_ssc(dev_priv))
7673 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7674 	else
7675 		dpll |= PLL_REF_INPUT_DREFCLK;
7676 
7677 	dpll |= DPLL_VCO_ENABLE;
7678 	crtc_state->dpll_hw_state.dpll = dpll;
7679 
7680 	if (INTEL_INFO(dev)->gen >= 4) {
7681 		u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7682 			<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
7683 		crtc_state->dpll_hw_state.dpll_md = dpll_md;
7684 	}
7685 }
7686 
7687 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7688 			      struct intel_crtc_state *crtc_state,
7689 			      struct dpll *reduced_clock)
7690 {
7691 	struct drm_device *dev = crtc->base.dev;
7692 	struct drm_i915_private *dev_priv = dev->dev_private;
7693 	u32 dpll;
7694 	struct dpll *clock = &crtc_state->dpll;
7695 
7696 	i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7697 
7698 	dpll = DPLL_VGA_MODE_DIS;
7699 
7700 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7701 		dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7702 	} else {
7703 		if (clock->p1 == 2)
7704 			dpll |= PLL_P1_DIVIDE_BY_TWO;
7705 		else
7706 			dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7707 		if (clock->p2 == 4)
7708 			dpll |= PLL_P2_DIVIDE_BY_4;
7709 	}
7710 
7711 	if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7712 		dpll |= DPLL_DVO_2X_MODE;
7713 
7714 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7715 	    intel_panel_use_ssc(dev_priv))
7716 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7717 	else
7718 		dpll |= PLL_REF_INPUT_DREFCLK;
7719 
7720 	dpll |= DPLL_VCO_ENABLE;
7721 	crtc_state->dpll_hw_state.dpll = dpll;
7722 }
7723 
7724 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7725 {
7726 	struct drm_device *dev = intel_crtc->base.dev;
7727 	struct drm_i915_private *dev_priv = dev->dev_private;
7728 	enum i915_pipe pipe = intel_crtc->pipe;
7729 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7730 	const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7731 	uint32_t crtc_vtotal, crtc_vblank_end;
7732 	int vsyncshift = 0;
7733 
7734 	/* We need to be careful not to changed the adjusted mode, for otherwise
7735 	 * the hw state checker will get angry at the mismatch. */
7736 	crtc_vtotal = adjusted_mode->crtc_vtotal;
7737 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7738 
7739 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7740 		/* the chip adds 2 halflines automatically */
7741 		crtc_vtotal -= 1;
7742 		crtc_vblank_end -= 1;
7743 
7744 		if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7745 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7746 		else
7747 			vsyncshift = adjusted_mode->crtc_hsync_start -
7748 				adjusted_mode->crtc_htotal / 2;
7749 		if (vsyncshift < 0)
7750 			vsyncshift += adjusted_mode->crtc_htotal;
7751 	}
7752 
7753 	if (INTEL_INFO(dev)->gen > 3)
7754 		I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7755 
7756 	I915_WRITE(HTOTAL(cpu_transcoder),
7757 		   (adjusted_mode->crtc_hdisplay - 1) |
7758 		   ((adjusted_mode->crtc_htotal - 1) << 16));
7759 	I915_WRITE(HBLANK(cpu_transcoder),
7760 		   (adjusted_mode->crtc_hblank_start - 1) |
7761 		   ((adjusted_mode->crtc_hblank_end - 1) << 16));
7762 	I915_WRITE(HSYNC(cpu_transcoder),
7763 		   (adjusted_mode->crtc_hsync_start - 1) |
7764 		   ((adjusted_mode->crtc_hsync_end - 1) << 16));
7765 
7766 	I915_WRITE(VTOTAL(cpu_transcoder),
7767 		   (adjusted_mode->crtc_vdisplay - 1) |
7768 		   ((crtc_vtotal - 1) << 16));
7769 	I915_WRITE(VBLANK(cpu_transcoder),
7770 		   (adjusted_mode->crtc_vblank_start - 1) |
7771 		   ((crtc_vblank_end - 1) << 16));
7772 	I915_WRITE(VSYNC(cpu_transcoder),
7773 		   (adjusted_mode->crtc_vsync_start - 1) |
7774 		   ((adjusted_mode->crtc_vsync_end - 1) << 16));
7775 
7776 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7777 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7778 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7779 	 * bits. */
7780 	if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7781 	    (pipe == PIPE_B || pipe == PIPE_C))
7782 		I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7783 
7784 }
7785 
7786 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7787 {
7788 	struct drm_device *dev = intel_crtc->base.dev;
7789 	struct drm_i915_private *dev_priv = dev->dev_private;
7790 	enum i915_pipe pipe = intel_crtc->pipe;
7791 
7792 	/* pipesrc controls the size that is scaled from, which should
7793 	 * always be the user's requested size.
7794 	 */
7795 	I915_WRITE(PIPESRC(pipe),
7796 		   ((intel_crtc->config->pipe_src_w - 1) << 16) |
7797 		   (intel_crtc->config->pipe_src_h - 1));
7798 }
7799 
7800 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7801 				   struct intel_crtc_state *pipe_config)
7802 {
7803 	struct drm_device *dev = crtc->base.dev;
7804 	struct drm_i915_private *dev_priv = dev->dev_private;
7805 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7806 	uint32_t tmp;
7807 
7808 	tmp = I915_READ(HTOTAL(cpu_transcoder));
7809 	pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7810 	pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7811 	tmp = I915_READ(HBLANK(cpu_transcoder));
7812 	pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7813 	pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7814 	tmp = I915_READ(HSYNC(cpu_transcoder));
7815 	pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7816 	pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7817 
7818 	tmp = I915_READ(VTOTAL(cpu_transcoder));
7819 	pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7820 	pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7821 	tmp = I915_READ(VBLANK(cpu_transcoder));
7822 	pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7823 	pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7824 	tmp = I915_READ(VSYNC(cpu_transcoder));
7825 	pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7826 	pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7827 
7828 	if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7829 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7830 		pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7831 		pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7832 	}
7833 }
7834 
7835 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7836 				    struct intel_crtc_state *pipe_config)
7837 {
7838 	struct drm_device *dev = crtc->base.dev;
7839 	struct drm_i915_private *dev_priv = dev->dev_private;
7840 	u32 tmp;
7841 
7842 	tmp = I915_READ(PIPESRC(crtc->pipe));
7843 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7844 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7845 
7846 	pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7847 	pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7848 }
7849 
7850 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7851 				 struct intel_crtc_state *pipe_config)
7852 {
7853 	mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7854 	mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7855 	mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7856 	mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7857 
7858 	mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7859 	mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7860 	mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7861 	mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7862 
7863 	mode->flags = pipe_config->base.adjusted_mode.flags;
7864 	mode->type = DRM_MODE_TYPE_DRIVER;
7865 
7866 	mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7867 	mode->flags |= pipe_config->base.adjusted_mode.flags;
7868 
7869 	mode->hsync = drm_mode_hsync(mode);
7870 	mode->vrefresh = drm_mode_vrefresh(mode);
7871 	drm_mode_set_name(mode);
7872 }
7873 
7874 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7875 {
7876 	struct drm_device *dev = intel_crtc->base.dev;
7877 	struct drm_i915_private *dev_priv = dev->dev_private;
7878 	uint32_t pipeconf;
7879 
7880 	pipeconf = 0;
7881 
7882 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7883 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7884 		pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7885 
7886 	if (intel_crtc->config->double_wide)
7887 		pipeconf |= PIPECONF_DOUBLE_WIDE;
7888 
7889 	/* only g4x and later have fancy bpc/dither controls */
7890 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7891 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
7892 		if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7893 			pipeconf |= PIPECONF_DITHER_EN |
7894 				    PIPECONF_DITHER_TYPE_SP;
7895 
7896 		switch (intel_crtc->config->pipe_bpp) {
7897 		case 18:
7898 			pipeconf |= PIPECONF_6BPC;
7899 			break;
7900 		case 24:
7901 			pipeconf |= PIPECONF_8BPC;
7902 			break;
7903 		case 30:
7904 			pipeconf |= PIPECONF_10BPC;
7905 			break;
7906 		default:
7907 			/* Case prevented by intel_choose_pipe_bpp_dither. */
7908 			BUG();
7909 		}
7910 	}
7911 
7912 	if (HAS_PIPE_CXSR(dev)) {
7913 		if (intel_crtc->lowfreq_avail) {
7914 			DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7915 			pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7916 		} else {
7917 			DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7918 		}
7919 	}
7920 
7921 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7922 		if (INTEL_INFO(dev)->gen < 4 ||
7923 		    intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7924 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7925 		else
7926 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7927 	} else
7928 		pipeconf |= PIPECONF_PROGRESSIVE;
7929 
7930 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7931 	     intel_crtc->config->limited_color_range)
7932 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7933 
7934 	I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7935 	POSTING_READ(PIPECONF(intel_crtc->pipe));
7936 }
7937 
7938 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7939 				   struct intel_crtc_state *crtc_state)
7940 {
7941 	struct drm_device *dev = crtc->base.dev;
7942 	struct drm_i915_private *dev_priv = dev->dev_private;
7943 	const struct intel_limit *limit;
7944 	int refclk = 48000;
7945 
7946 	memset(&crtc_state->dpll_hw_state, 0,
7947 	       sizeof(crtc_state->dpll_hw_state));
7948 
7949 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7950 		if (intel_panel_use_ssc(dev_priv)) {
7951 			refclk = dev_priv->vbt.lvds_ssc_freq;
7952 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7953 		}
7954 
7955 		limit = &intel_limits_i8xx_lvds;
7956 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7957 		limit = &intel_limits_i8xx_dvo;
7958 	} else {
7959 		limit = &intel_limits_i8xx_dac;
7960 	}
7961 
7962 	if (!crtc_state->clock_set &&
7963 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7964 				 refclk, NULL, &crtc_state->dpll)) {
7965 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
7966 		return -EINVAL;
7967 	}
7968 
7969 	i8xx_compute_dpll(crtc, crtc_state, NULL);
7970 
7971 	return 0;
7972 }
7973 
7974 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7975 				  struct intel_crtc_state *crtc_state)
7976 {
7977 	struct drm_device *dev = crtc->base.dev;
7978 	struct drm_i915_private *dev_priv = dev->dev_private;
7979 	const struct intel_limit *limit;
7980 	int refclk = 96000;
7981 
7982 	memset(&crtc_state->dpll_hw_state, 0,
7983 	       sizeof(crtc_state->dpll_hw_state));
7984 
7985 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7986 		if (intel_panel_use_ssc(dev_priv)) {
7987 			refclk = dev_priv->vbt.lvds_ssc_freq;
7988 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7989 		}
7990 
7991 		if (intel_is_dual_link_lvds(dev))
7992 			limit = &intel_limits_g4x_dual_channel_lvds;
7993 		else
7994 			limit = &intel_limits_g4x_single_channel_lvds;
7995 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7996 		   intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7997 		limit = &intel_limits_g4x_hdmi;
7998 	} else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7999 		limit = &intel_limits_g4x_sdvo;
8000 	} else {
8001 		/* The option is for other outputs */
8002 		limit = &intel_limits_i9xx_sdvo;
8003 	}
8004 
8005 	if (!crtc_state->clock_set &&
8006 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8007 				refclk, NULL, &crtc_state->dpll)) {
8008 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8009 		return -EINVAL;
8010 	}
8011 
8012 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8013 
8014 	return 0;
8015 }
8016 
8017 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8018 				  struct intel_crtc_state *crtc_state)
8019 {
8020 	struct drm_device *dev = crtc->base.dev;
8021 	struct drm_i915_private *dev_priv = dev->dev_private;
8022 	const struct intel_limit *limit;
8023 	int refclk = 96000;
8024 
8025 	memset(&crtc_state->dpll_hw_state, 0,
8026 	       sizeof(crtc_state->dpll_hw_state));
8027 
8028 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8029 		if (intel_panel_use_ssc(dev_priv)) {
8030 			refclk = dev_priv->vbt.lvds_ssc_freq;
8031 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8032 		}
8033 
8034 		limit = &intel_limits_pineview_lvds;
8035 	} else {
8036 		limit = &intel_limits_pineview_sdvo;
8037 	}
8038 
8039 	if (!crtc_state->clock_set &&
8040 	    !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8041 				refclk, NULL, &crtc_state->dpll)) {
8042 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8043 		return -EINVAL;
8044 	}
8045 
8046 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8047 
8048 	return 0;
8049 }
8050 
8051 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8052 				   struct intel_crtc_state *crtc_state)
8053 {
8054 	struct drm_device *dev = crtc->base.dev;
8055 	struct drm_i915_private *dev_priv = dev->dev_private;
8056 	const struct intel_limit *limit;
8057 	int refclk = 96000;
8058 
8059 	memset(&crtc_state->dpll_hw_state, 0,
8060 	       sizeof(crtc_state->dpll_hw_state));
8061 
8062 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8063 		if (intel_panel_use_ssc(dev_priv)) {
8064 			refclk = dev_priv->vbt.lvds_ssc_freq;
8065 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8066 		}
8067 
8068 		limit = &intel_limits_i9xx_lvds;
8069 	} else {
8070 		limit = &intel_limits_i9xx_sdvo;
8071 	}
8072 
8073 	if (!crtc_state->clock_set &&
8074 	    !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8075 				 refclk, NULL, &crtc_state->dpll)) {
8076 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8077 		return -EINVAL;
8078 	}
8079 
8080 	i9xx_compute_dpll(crtc, crtc_state, NULL);
8081 
8082 	return 0;
8083 }
8084 
8085 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8086 				  struct intel_crtc_state *crtc_state)
8087 {
8088 	int refclk = 100000;
8089 	const struct intel_limit *limit = &intel_limits_chv;
8090 
8091 	memset(&crtc_state->dpll_hw_state, 0,
8092 	       sizeof(crtc_state->dpll_hw_state));
8093 
8094 	if (!crtc_state->clock_set &&
8095 	    !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8096 				refclk, NULL, &crtc_state->dpll)) {
8097 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8098 		return -EINVAL;
8099 	}
8100 
8101 	chv_compute_dpll(crtc, crtc_state);
8102 
8103 	return 0;
8104 }
8105 
8106 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8107 				  struct intel_crtc_state *crtc_state)
8108 {
8109 	int refclk = 100000;
8110 	const struct intel_limit *limit = &intel_limits_vlv;
8111 
8112 	memset(&crtc_state->dpll_hw_state, 0,
8113 	       sizeof(crtc_state->dpll_hw_state));
8114 
8115 	if (!crtc_state->clock_set &&
8116 	    !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8117 				refclk, NULL, &crtc_state->dpll)) {
8118 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
8119 		return -EINVAL;
8120 	}
8121 
8122 	vlv_compute_dpll(crtc, crtc_state);
8123 
8124 	return 0;
8125 }
8126 
8127 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8128 				 struct intel_crtc_state *pipe_config)
8129 {
8130 	struct drm_device *dev = crtc->base.dev;
8131 	struct drm_i915_private *dev_priv = dev->dev_private;
8132 	uint32_t tmp;
8133 
8134 	if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8135 		return;
8136 
8137 	tmp = I915_READ(PFIT_CONTROL);
8138 	if (!(tmp & PFIT_ENABLE))
8139 		return;
8140 
8141 	/* Check whether the pfit is attached to our pipe. */
8142 	if (INTEL_INFO(dev)->gen < 4) {
8143 		if (crtc->pipe != PIPE_B)
8144 			return;
8145 	} else {
8146 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8147 			return;
8148 	}
8149 
8150 	pipe_config->gmch_pfit.control = tmp;
8151 	pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8152 }
8153 
8154 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8155 			       struct intel_crtc_state *pipe_config)
8156 {
8157 	struct drm_device *dev = crtc->base.dev;
8158 	struct drm_i915_private *dev_priv = dev->dev_private;
8159 	int pipe = pipe_config->cpu_transcoder;
8160 	struct dpll clock;
8161 	u32 mdiv;
8162 	int refclk = 100000;
8163 
8164 	/* In case of DSI, DPLL will not be used */
8165 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8166 		return;
8167 
8168 	mutex_lock(&dev_priv->sb_lock);
8169 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8170 	mutex_unlock(&dev_priv->sb_lock);
8171 
8172 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8173 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
8174 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8175 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8176 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8177 
8178 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8179 }
8180 
8181 static void
8182 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8183 			      struct intel_initial_plane_config *plane_config)
8184 {
8185 	struct drm_device *dev = crtc->base.dev;
8186 	struct drm_i915_private *dev_priv = dev->dev_private;
8187 	u32 val, base, offset;
8188 	int pipe = crtc->pipe, plane = crtc->plane;
8189 	int fourcc, pixel_format;
8190 	unsigned int aligned_height;
8191 	struct drm_framebuffer *fb;
8192 	struct intel_framebuffer *intel_fb;
8193 
8194 	val = I915_READ(DSPCNTR(plane));
8195 	if (!(val & DISPLAY_PLANE_ENABLE))
8196 		return;
8197 
8198 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8199 	if (!intel_fb) {
8200 		DRM_DEBUG_KMS("failed to alloc fb\n");
8201 		return;
8202 	}
8203 
8204 	fb = &intel_fb->base;
8205 
8206 	if (INTEL_INFO(dev)->gen >= 4) {
8207 		if (val & DISPPLANE_TILED) {
8208 			plane_config->tiling = I915_TILING_X;
8209 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8210 		}
8211 	}
8212 
8213 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8214 	fourcc = i9xx_format_to_fourcc(pixel_format);
8215 	fb->pixel_format = fourcc;
8216 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8217 
8218 	if (INTEL_INFO(dev)->gen >= 4) {
8219 		if (plane_config->tiling)
8220 			offset = I915_READ(DSPTILEOFF(plane));
8221 		else
8222 			offset = I915_READ(DSPLINOFF(plane));
8223 		base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8224 	} else {
8225 		base = I915_READ(DSPADDR(plane));
8226 	}
8227 	plane_config->base = base;
8228 
8229 	val = I915_READ(PIPESRC(pipe));
8230 	fb->width = ((val >> 16) & 0xfff) + 1;
8231 	fb->height = ((val >> 0) & 0xfff) + 1;
8232 
8233 	val = I915_READ(DSPSTRIDE(pipe));
8234 	fb->pitches[0] = val & 0xffffffc0;
8235 
8236 	aligned_height = intel_fb_align_height(dev, fb->height,
8237 					       fb->pixel_format,
8238 					       fb->modifier[0]);
8239 
8240 	plane_config->size = fb->pitches[0] * aligned_height;
8241 
8242 	DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8243 		      pipe_name(pipe), plane, fb->width, fb->height,
8244 		      fb->bits_per_pixel, base, fb->pitches[0],
8245 		      plane_config->size);
8246 
8247 	plane_config->fb = intel_fb;
8248 }
8249 
8250 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8251 			       struct intel_crtc_state *pipe_config)
8252 {
8253 	struct drm_device *dev = crtc->base.dev;
8254 	struct drm_i915_private *dev_priv = dev->dev_private;
8255 	int pipe = pipe_config->cpu_transcoder;
8256 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
8257 	struct dpll clock;
8258 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8259 	int refclk = 100000;
8260 
8261 	/* In case of DSI, DPLL will not be used */
8262 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8263 		return;
8264 
8265 	mutex_lock(&dev_priv->sb_lock);
8266 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8267 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8268 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8269 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8270 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8271 	mutex_unlock(&dev_priv->sb_lock);
8272 
8273 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8274 	clock.m2 = (pll_dw0 & 0xff) << 22;
8275 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8276 		clock.m2 |= pll_dw2 & 0x3fffff;
8277 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8278 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8279 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8280 
8281 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8282 }
8283 
8284 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8285 				 struct intel_crtc_state *pipe_config)
8286 {
8287 	struct drm_device *dev = crtc->base.dev;
8288 	struct drm_i915_private *dev_priv = dev->dev_private;
8289 	enum intel_display_power_domain power_domain;
8290 	uint32_t tmp;
8291 	bool ret;
8292 
8293 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8294 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8295 		return false;
8296 
8297 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8298 	pipe_config->shared_dpll = NULL;
8299 
8300 	ret = false;
8301 
8302 	tmp = I915_READ(PIPECONF(crtc->pipe));
8303 	if (!(tmp & PIPECONF_ENABLE))
8304 		goto out;
8305 
8306 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8307 		switch (tmp & PIPECONF_BPC_MASK) {
8308 		case PIPECONF_6BPC:
8309 			pipe_config->pipe_bpp = 18;
8310 			break;
8311 		case PIPECONF_8BPC:
8312 			pipe_config->pipe_bpp = 24;
8313 			break;
8314 		case PIPECONF_10BPC:
8315 			pipe_config->pipe_bpp = 30;
8316 			break;
8317 		default:
8318 			break;
8319 		}
8320 	}
8321 
8322 	if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8323 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
8324 		pipe_config->limited_color_range = true;
8325 
8326 	if (INTEL_INFO(dev)->gen < 4)
8327 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8328 
8329 	intel_get_pipe_timings(crtc, pipe_config);
8330 	intel_get_pipe_src_size(crtc, pipe_config);
8331 
8332 	i9xx_get_pfit_config(crtc, pipe_config);
8333 
8334 	if (INTEL_INFO(dev)->gen >= 4) {
8335 		/* No way to read it out on pipes B and C */
8336 		if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8337 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
8338 		else
8339 			tmp = I915_READ(DPLL_MD(crtc->pipe));
8340 		pipe_config->pixel_multiplier =
8341 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8342 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8343 		pipe_config->dpll_hw_state.dpll_md = tmp;
8344 	} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8345 		tmp = I915_READ(DPLL(crtc->pipe));
8346 		pipe_config->pixel_multiplier =
8347 			((tmp & SDVO_MULTIPLIER_MASK)
8348 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8349 	} else {
8350 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
8351 		 * port and will be fixed up in the encoder->get_config
8352 		 * function. */
8353 		pipe_config->pixel_multiplier = 1;
8354 	}
8355 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8356 	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8357 		/*
8358 		 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8359 		 * on 830. Filter it out here so that we don't
8360 		 * report errors due to that.
8361 		 */
8362 		if (IS_I830(dev))
8363 			pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8364 
8365 		pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8366 		pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8367 	} else {
8368 		/* Mask out read-only status bits. */
8369 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8370 						     DPLL_PORTC_READY_MASK |
8371 						     DPLL_PORTB_READY_MASK);
8372 	}
8373 
8374 	if (IS_CHERRYVIEW(dev))
8375 		chv_crtc_clock_get(crtc, pipe_config);
8376 	else if (IS_VALLEYVIEW(dev))
8377 		vlv_crtc_clock_get(crtc, pipe_config);
8378 	else
8379 		i9xx_crtc_clock_get(crtc, pipe_config);
8380 
8381 	/*
8382 	 * Normally the dotclock is filled in by the encoder .get_config()
8383 	 * but in case the pipe is enabled w/o any ports we need a sane
8384 	 * default.
8385 	 */
8386 	pipe_config->base.adjusted_mode.crtc_clock =
8387 		pipe_config->port_clock / pipe_config->pixel_multiplier;
8388 
8389 	ret = true;
8390 
8391 out:
8392 	intel_display_power_put(dev_priv, power_domain);
8393 
8394 	return ret;
8395 }
8396 
8397 static void ironlake_init_pch_refclk(struct drm_device *dev)
8398 {
8399 	struct drm_i915_private *dev_priv = dev->dev_private;
8400 	struct intel_encoder *encoder;
8401 	int i;
8402 	u32 val, final;
8403 	bool has_lvds = false;
8404 	bool has_cpu_edp = false;
8405 	bool has_panel = false;
8406 	bool has_ck505 = false;
8407 	bool can_ssc = false;
8408 	bool using_ssc_source = false;
8409 
8410 	/* We need to take the global config into account */
8411 	for_each_intel_encoder(dev, encoder) {
8412 		switch (encoder->type) {
8413 		case INTEL_OUTPUT_LVDS:
8414 			has_panel = true;
8415 			has_lvds = true;
8416 			break;
8417 		case INTEL_OUTPUT_EDP:
8418 			has_panel = true;
8419 			if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8420 				has_cpu_edp = true;
8421 			break;
8422 		default:
8423 			break;
8424 		}
8425 	}
8426 
8427 	if (HAS_PCH_IBX(dev)) {
8428 		has_ck505 = dev_priv->vbt.display_clock_mode;
8429 		can_ssc = has_ck505;
8430 	} else {
8431 		has_ck505 = false;
8432 		can_ssc = true;
8433 	}
8434 
8435 	/* Check if any DPLLs are using the SSC source */
8436 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8437 		u32 temp = I915_READ(PCH_DPLL(i));
8438 
8439 		if (!(temp & DPLL_VCO_ENABLE))
8440 			continue;
8441 
8442 		if ((temp & PLL_REF_INPUT_MASK) ==
8443 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8444 			using_ssc_source = true;
8445 			break;
8446 		}
8447 	}
8448 
8449 	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8450 		      has_panel, has_lvds, has_ck505, using_ssc_source);
8451 
8452 	/* Ironlake: try to setup display ref clock before DPLL
8453 	 * enabling. This is only under driver's control after
8454 	 * PCH B stepping, previous chipset stepping should be
8455 	 * ignoring this setting.
8456 	 */
8457 	val = I915_READ(PCH_DREF_CONTROL);
8458 
8459 	/* As we must carefully and slowly disable/enable each source in turn,
8460 	 * compute the final state we want first and check if we need to
8461 	 * make any changes at all.
8462 	 */
8463 	final = val;
8464 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
8465 	if (has_ck505)
8466 		final |= DREF_NONSPREAD_CK505_ENABLE;
8467 	else
8468 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
8469 
8470 	final &= ~DREF_SSC_SOURCE_MASK;
8471 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8472 	final &= ~DREF_SSC1_ENABLE;
8473 
8474 	if (has_panel) {
8475 		final |= DREF_SSC_SOURCE_ENABLE;
8476 
8477 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
8478 			final |= DREF_SSC1_ENABLE;
8479 
8480 		if (has_cpu_edp) {
8481 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
8482 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8483 			else
8484 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8485 		} else
8486 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8487 	} else if (using_ssc_source) {
8488 		final |= DREF_SSC_SOURCE_ENABLE;
8489 		final |= DREF_SSC1_ENABLE;
8490 	}
8491 
8492 	if (final == val)
8493 		return;
8494 
8495 	/* Always enable nonspread source */
8496 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
8497 
8498 	if (has_ck505)
8499 		val |= DREF_NONSPREAD_CK505_ENABLE;
8500 	else
8501 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
8502 
8503 	if (has_panel) {
8504 		val &= ~DREF_SSC_SOURCE_MASK;
8505 		val |= DREF_SSC_SOURCE_ENABLE;
8506 
8507 		/* SSC must be turned on before enabling the CPU output  */
8508 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8509 			DRM_DEBUG_KMS("Using SSC on panel\n");
8510 			val |= DREF_SSC1_ENABLE;
8511 		} else
8512 			val &= ~DREF_SSC1_ENABLE;
8513 
8514 		/* Get SSC going before enabling the outputs */
8515 		I915_WRITE(PCH_DREF_CONTROL, val);
8516 		POSTING_READ(PCH_DREF_CONTROL);
8517 		udelay(200);
8518 
8519 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8520 
8521 		/* Enable CPU source on CPU attached eDP */
8522 		if (has_cpu_edp) {
8523 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8524 				DRM_DEBUG_KMS("Using SSC on eDP\n");
8525 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8526 			} else
8527 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8528 		} else
8529 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8530 
8531 		I915_WRITE(PCH_DREF_CONTROL, val);
8532 		POSTING_READ(PCH_DREF_CONTROL);
8533 		udelay(200);
8534 	} else {
8535 		DRM_DEBUG_KMS("Disabling CPU source output\n");
8536 
8537 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8538 
8539 		/* Turn off CPU output */
8540 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8541 
8542 		I915_WRITE(PCH_DREF_CONTROL, val);
8543 		POSTING_READ(PCH_DREF_CONTROL);
8544 		udelay(200);
8545 
8546 		if (!using_ssc_source) {
8547 			DRM_DEBUG_KMS("Disabling SSC source\n");
8548 
8549 			/* Turn off the SSC source */
8550 			val &= ~DREF_SSC_SOURCE_MASK;
8551 			val |= DREF_SSC_SOURCE_DISABLE;
8552 
8553 			/* Turn off SSC1 */
8554 			val &= ~DREF_SSC1_ENABLE;
8555 
8556 			I915_WRITE(PCH_DREF_CONTROL, val);
8557 			POSTING_READ(PCH_DREF_CONTROL);
8558 			udelay(200);
8559 		}
8560 	}
8561 
8562 	BUG_ON(val != final);
8563 }
8564 
8565 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8566 {
8567 	uint32_t tmp;
8568 
8569 	tmp = I915_READ(SOUTH_CHICKEN2);
8570 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8571 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8572 
8573 	if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8574 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8575 		DRM_ERROR("FDI mPHY reset assert timeout\n");
8576 
8577 	tmp = I915_READ(SOUTH_CHICKEN2);
8578 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8579 	I915_WRITE(SOUTH_CHICKEN2, tmp);
8580 
8581 	if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8582 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8583 		DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8584 }
8585 
8586 /* WaMPhyProgramming:hsw */
8587 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8588 {
8589 	uint32_t tmp;
8590 
8591 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8592 	tmp &= ~(0xFF << 24);
8593 	tmp |= (0x12 << 24);
8594 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8595 
8596 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8597 	tmp |= (1 << 11);
8598 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8599 
8600 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8601 	tmp |= (1 << 11);
8602 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8603 
8604 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8605 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8606 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8607 
8608 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8609 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8610 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8611 
8612 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8613 	tmp &= ~(7 << 13);
8614 	tmp |= (5 << 13);
8615 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8616 
8617 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8618 	tmp &= ~(7 << 13);
8619 	tmp |= (5 << 13);
8620 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8621 
8622 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8623 	tmp &= ~0xFF;
8624 	tmp |= 0x1C;
8625 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8626 
8627 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8628 	tmp &= ~0xFF;
8629 	tmp |= 0x1C;
8630 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8631 
8632 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8633 	tmp &= ~(0xFF << 16);
8634 	tmp |= (0x1C << 16);
8635 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8636 
8637 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8638 	tmp &= ~(0xFF << 16);
8639 	tmp |= (0x1C << 16);
8640 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8641 
8642 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8643 	tmp |= (1 << 27);
8644 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8645 
8646 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8647 	tmp |= (1 << 27);
8648 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8649 
8650 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8651 	tmp &= ~(0xF << 28);
8652 	tmp |= (4 << 28);
8653 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8654 
8655 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8656 	tmp &= ~(0xF << 28);
8657 	tmp |= (4 << 28);
8658 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8659 }
8660 
8661 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8662  * Programming" based on the parameters passed:
8663  * - Sequence to enable CLKOUT_DP
8664  * - Sequence to enable CLKOUT_DP without spread
8665  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8666  */
8667 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8668 				 bool with_fdi)
8669 {
8670 	struct drm_i915_private *dev_priv = dev->dev_private;
8671 	uint32_t reg, tmp;
8672 
8673 	if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8674 		with_spread = true;
8675 	if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8676 		with_fdi = false;
8677 
8678 	mutex_lock(&dev_priv->sb_lock);
8679 
8680 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8681 	tmp &= ~SBI_SSCCTL_DISABLE;
8682 	tmp |= SBI_SSCCTL_PATHALT;
8683 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8684 
8685 	udelay(24);
8686 
8687 	if (with_spread) {
8688 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8689 		tmp &= ~SBI_SSCCTL_PATHALT;
8690 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8691 
8692 		if (with_fdi) {
8693 			lpt_reset_fdi_mphy(dev_priv);
8694 			lpt_program_fdi_mphy(dev_priv);
8695 		}
8696 	}
8697 
8698 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8699 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8700 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8701 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8702 
8703 	mutex_unlock(&dev_priv->sb_lock);
8704 }
8705 
8706 /* Sequence to disable CLKOUT_DP */
8707 static void lpt_disable_clkout_dp(struct drm_device *dev)
8708 {
8709 	struct drm_i915_private *dev_priv = dev->dev_private;
8710 	uint32_t reg, tmp;
8711 
8712 	mutex_lock(&dev_priv->sb_lock);
8713 
8714 	reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8715 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8716 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8717 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8718 
8719 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8720 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
8721 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
8722 			tmp |= SBI_SSCCTL_PATHALT;
8723 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8724 			udelay(32);
8725 		}
8726 		tmp |= SBI_SSCCTL_DISABLE;
8727 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8728 	}
8729 
8730 	mutex_unlock(&dev_priv->sb_lock);
8731 }
8732 
8733 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8734 
8735 static const uint16_t sscdivintphase[] = {
8736 	[BEND_IDX( 50)] = 0x3B23,
8737 	[BEND_IDX( 45)] = 0x3B23,
8738 	[BEND_IDX( 40)] = 0x3C23,
8739 	[BEND_IDX( 35)] = 0x3C23,
8740 	[BEND_IDX( 30)] = 0x3D23,
8741 	[BEND_IDX( 25)] = 0x3D23,
8742 	[BEND_IDX( 20)] = 0x3E23,
8743 	[BEND_IDX( 15)] = 0x3E23,
8744 	[BEND_IDX( 10)] = 0x3F23,
8745 	[BEND_IDX(  5)] = 0x3F23,
8746 	[BEND_IDX(  0)] = 0x0025,
8747 	[BEND_IDX( -5)] = 0x0025,
8748 	[BEND_IDX(-10)] = 0x0125,
8749 	[BEND_IDX(-15)] = 0x0125,
8750 	[BEND_IDX(-20)] = 0x0225,
8751 	[BEND_IDX(-25)] = 0x0225,
8752 	[BEND_IDX(-30)] = 0x0325,
8753 	[BEND_IDX(-35)] = 0x0325,
8754 	[BEND_IDX(-40)] = 0x0425,
8755 	[BEND_IDX(-45)] = 0x0425,
8756 	[BEND_IDX(-50)] = 0x0525,
8757 };
8758 
8759 /*
8760  * Bend CLKOUT_DP
8761  * steps -50 to 50 inclusive, in steps of 5
8762  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8763  * change in clock period = -(steps / 10) * 5.787 ps
8764  */
8765 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8766 {
8767 	uint32_t tmp;
8768 	int idx = BEND_IDX(steps);
8769 
8770 	if (WARN_ON(steps % 5 != 0))
8771 		return;
8772 
8773 	if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8774 		return;
8775 
8776 	mutex_lock(&dev_priv->sb_lock);
8777 
8778 	if (steps % 10 != 0)
8779 		tmp = 0xAAAAAAAB;
8780 	else
8781 		tmp = 0x00000000;
8782 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8783 
8784 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8785 	tmp &= 0xffff0000;
8786 	tmp |= sscdivintphase[idx];
8787 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8788 
8789 	mutex_unlock(&dev_priv->sb_lock);
8790 }
8791 
8792 #undef BEND_IDX
8793 
8794 static void lpt_init_pch_refclk(struct drm_device *dev)
8795 {
8796 	struct intel_encoder *encoder;
8797 	bool has_vga = false;
8798 
8799 	for_each_intel_encoder(dev, encoder) {
8800 		switch (encoder->type) {
8801 		case INTEL_OUTPUT_ANALOG:
8802 			has_vga = true;
8803 			break;
8804 		default:
8805 			break;
8806 		}
8807 	}
8808 
8809 	if (has_vga) {
8810 		lpt_bend_clkout_dp(to_i915(dev), 0);
8811 		lpt_enable_clkout_dp(dev, true, true);
8812 	} else {
8813 		lpt_disable_clkout_dp(dev);
8814 	}
8815 }
8816 
8817 /*
8818  * Initialize reference clocks when the driver loads
8819  */
8820 void intel_init_pch_refclk(struct drm_device *dev)
8821 {
8822 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8823 		ironlake_init_pch_refclk(dev);
8824 	else if (HAS_PCH_LPT(dev))
8825 		lpt_init_pch_refclk(dev);
8826 }
8827 
8828 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8829 {
8830 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8831 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8832 	int pipe = intel_crtc->pipe;
8833 	uint32_t val;
8834 
8835 	val = 0;
8836 
8837 	switch (intel_crtc->config->pipe_bpp) {
8838 	case 18:
8839 		val |= PIPECONF_6BPC;
8840 		break;
8841 	case 24:
8842 		val |= PIPECONF_8BPC;
8843 		break;
8844 	case 30:
8845 		val |= PIPECONF_10BPC;
8846 		break;
8847 	case 36:
8848 		val |= PIPECONF_12BPC;
8849 		break;
8850 	default:
8851 		/* Case prevented by intel_choose_pipe_bpp_dither. */
8852 		BUG();
8853 	}
8854 
8855 	if (intel_crtc->config->dither)
8856 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8857 
8858 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8859 		val |= PIPECONF_INTERLACED_ILK;
8860 	else
8861 		val |= PIPECONF_PROGRESSIVE;
8862 
8863 	if (intel_crtc->config->limited_color_range)
8864 		val |= PIPECONF_COLOR_RANGE_SELECT;
8865 
8866 	I915_WRITE(PIPECONF(pipe), val);
8867 	POSTING_READ(PIPECONF(pipe));
8868 }
8869 
8870 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8871 {
8872 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8873 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8874 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8875 	u32 val = 0;
8876 
8877 	if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8878 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8879 
8880 	if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8881 		val |= PIPECONF_INTERLACED_ILK;
8882 	else
8883 		val |= PIPECONF_PROGRESSIVE;
8884 
8885 	I915_WRITE(PIPECONF(cpu_transcoder), val);
8886 	POSTING_READ(PIPECONF(cpu_transcoder));
8887 }
8888 
8889 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8890 {
8891 	struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8892 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8893 
8894 	if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8895 		u32 val = 0;
8896 
8897 		switch (intel_crtc->config->pipe_bpp) {
8898 		case 18:
8899 			val |= PIPEMISC_DITHER_6_BPC;
8900 			break;
8901 		case 24:
8902 			val |= PIPEMISC_DITHER_8_BPC;
8903 			break;
8904 		case 30:
8905 			val |= PIPEMISC_DITHER_10_BPC;
8906 			break;
8907 		case 36:
8908 			val |= PIPEMISC_DITHER_12_BPC;
8909 			break;
8910 		default:
8911 			/* Case prevented by pipe_config_set_bpp. */
8912 			BUG();
8913 		}
8914 
8915 		if (intel_crtc->config->dither)
8916 			val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8917 
8918 		I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8919 	}
8920 }
8921 
8922 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8923 {
8924 	/*
8925 	 * Account for spread spectrum to avoid
8926 	 * oversubscribing the link. Max center spread
8927 	 * is 2.5%; use 5% for safety's sake.
8928 	 */
8929 	u32 bps = target_clock * bpp * 21 / 20;
8930 	return DIV_ROUND_UP(bps, link_bw * 8);
8931 }
8932 
8933 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8934 {
8935 	return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8936 }
8937 
8938 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8939 				  struct intel_crtc_state *crtc_state,
8940 				  struct dpll *reduced_clock)
8941 {
8942 	struct drm_crtc *crtc = &intel_crtc->base;
8943 	struct drm_device *dev = crtc->dev;
8944 	struct drm_i915_private *dev_priv = dev->dev_private;
8945 	struct drm_atomic_state *state = crtc_state->base.state;
8946 	struct drm_connector *connector;
8947 	struct drm_connector_state *connector_state;
8948 	struct intel_encoder *encoder;
8949 	u32 dpll, fp, fp2;
8950 	int factor, i;
8951 	bool is_lvds = false, is_sdvo = false;
8952 
8953 	for_each_connector_in_state(state, connector, connector_state, i) {
8954 		if (connector_state->crtc != crtc_state->base.crtc)
8955 			continue;
8956 
8957 		encoder = to_intel_encoder(connector_state->best_encoder);
8958 
8959 		switch (encoder->type) {
8960 		case INTEL_OUTPUT_LVDS:
8961 			is_lvds = true;
8962 			break;
8963 		case INTEL_OUTPUT_SDVO:
8964 		case INTEL_OUTPUT_HDMI:
8965 			is_sdvo = true;
8966 			break;
8967 		default:
8968 			break;
8969 		}
8970 	}
8971 
8972 	/* Enable autotuning of the PLL clock (if permissible) */
8973 	factor = 21;
8974 	if (is_lvds) {
8975 		if ((intel_panel_use_ssc(dev_priv) &&
8976 		     dev_priv->vbt.lvds_ssc_freq == 100000) ||
8977 		    (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8978 			factor = 25;
8979 	} else if (crtc_state->sdvo_tv_clock)
8980 		factor = 20;
8981 
8982 	fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8983 
8984 	if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8985 		fp |= FP_CB_TUNE;
8986 
8987 	if (reduced_clock) {
8988 		fp2 = i9xx_dpll_compute_fp(reduced_clock);
8989 
8990 		if (reduced_clock->m < factor * reduced_clock->n)
8991 			fp2 |= FP_CB_TUNE;
8992 	} else {
8993 		fp2 = fp;
8994 	}
8995 
8996 	dpll = 0;
8997 
8998 	if (is_lvds)
8999 		dpll |= DPLLB_MODE_LVDS;
9000 	else
9001 		dpll |= DPLLB_MODE_DAC_SERIAL;
9002 
9003 	dpll |= (crtc_state->pixel_multiplier - 1)
9004 		<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
9005 
9006 	if (is_sdvo)
9007 		dpll |= DPLL_SDVO_HIGH_SPEED;
9008 	if (crtc_state->has_dp_encoder)
9009 		dpll |= DPLL_SDVO_HIGH_SPEED;
9010 
9011 	/* compute bitmask from p1 value */
9012 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9013 	/* also FPA1 */
9014 	dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9015 
9016 	switch (crtc_state->dpll.p2) {
9017 	case 5:
9018 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9019 		break;
9020 	case 7:
9021 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9022 		break;
9023 	case 10:
9024 		dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9025 		break;
9026 	case 14:
9027 		dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9028 		break;
9029 	}
9030 
9031 	if (is_lvds && intel_panel_use_ssc(dev_priv))
9032 		dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9033 	else
9034 		dpll |= PLL_REF_INPUT_DREFCLK;
9035 
9036 	dpll |= DPLL_VCO_ENABLE;
9037 
9038 	crtc_state->dpll_hw_state.dpll = dpll;
9039 	crtc_state->dpll_hw_state.fp0 = fp;
9040 	crtc_state->dpll_hw_state.fp1 = fp2;
9041 }
9042 
9043 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9044 				       struct intel_crtc_state *crtc_state)
9045 {
9046 	struct drm_device *dev = crtc->base.dev;
9047 	struct drm_i915_private *dev_priv = dev->dev_private;
9048 	struct dpll reduced_clock;
9049 	bool has_reduced_clock = false;
9050 	struct intel_shared_dpll *pll;
9051 	const struct intel_limit *limit;
9052 	int refclk = 120000;
9053 
9054 	memset(&crtc_state->dpll_hw_state, 0,
9055 	       sizeof(crtc_state->dpll_hw_state));
9056 
9057 	crtc->lowfreq_avail = false;
9058 
9059 	/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9060 	if (!crtc_state->has_pch_encoder)
9061 		return 0;
9062 
9063 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9064 		if (intel_panel_use_ssc(dev_priv)) {
9065 			DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9066 				      dev_priv->vbt.lvds_ssc_freq);
9067 			refclk = dev_priv->vbt.lvds_ssc_freq;
9068 		}
9069 
9070 		if (intel_is_dual_link_lvds(dev)) {
9071 			if (refclk == 100000)
9072 				limit = &intel_limits_ironlake_dual_lvds_100m;
9073 			else
9074 				limit = &intel_limits_ironlake_dual_lvds;
9075 		} else {
9076 			if (refclk == 100000)
9077 				limit = &intel_limits_ironlake_single_lvds_100m;
9078 			else
9079 				limit = &intel_limits_ironlake_single_lvds;
9080 		}
9081 	} else {
9082 		limit = &intel_limits_ironlake_dac;
9083 	}
9084 
9085 	if (!crtc_state->clock_set &&
9086 	    !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9087 				refclk, NULL, &crtc_state->dpll)) {
9088 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
9089 		return -EINVAL;
9090 	}
9091 
9092 	ironlake_compute_dpll(crtc, crtc_state,
9093 			      has_reduced_clock ? &reduced_clock : NULL);
9094 
9095 	pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
9096 	if (pll == NULL) {
9097 		DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9098 				 pipe_name(crtc->pipe));
9099 		return -EINVAL;
9100 	}
9101 
9102 	if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9103 	    has_reduced_clock)
9104 		crtc->lowfreq_avail = true;
9105 
9106 	return 0;
9107 }
9108 
9109 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9110 					 struct intel_link_m_n *m_n)
9111 {
9112 	struct drm_device *dev = crtc->base.dev;
9113 	struct drm_i915_private *dev_priv = dev->dev_private;
9114 	enum i915_pipe pipe = crtc->pipe;
9115 
9116 	m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9117 	m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9118 	m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9119 		& ~TU_SIZE_MASK;
9120 	m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9121 	m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9122 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9123 }
9124 
9125 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9126 					 enum transcoder transcoder,
9127 					 struct intel_link_m_n *m_n,
9128 					 struct intel_link_m_n *m2_n2)
9129 {
9130 	struct drm_device *dev = crtc->base.dev;
9131 	struct drm_i915_private *dev_priv = dev->dev_private;
9132 	enum i915_pipe pipe = crtc->pipe;
9133 
9134 	if (INTEL_INFO(dev)->gen >= 5) {
9135 		m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9136 		m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9137 		m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9138 			& ~TU_SIZE_MASK;
9139 		m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9140 		m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9141 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9142 		/* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9143 		 * gen < 8) and if DRRS is supported (to make sure the
9144 		 * registers are not unnecessarily read).
9145 		 */
9146 		if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9147 			crtc->config->has_drrs) {
9148 			m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9149 			m2_n2->link_n =	I915_READ(PIPE_LINK_N2(transcoder));
9150 			m2_n2->gmch_m =	I915_READ(PIPE_DATA_M2(transcoder))
9151 					& ~TU_SIZE_MASK;
9152 			m2_n2->gmch_n =	I915_READ(PIPE_DATA_N2(transcoder));
9153 			m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9154 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9155 		}
9156 	} else {
9157 		m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9158 		m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9159 		m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9160 			& ~TU_SIZE_MASK;
9161 		m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9162 		m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9163 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9164 	}
9165 }
9166 
9167 void intel_dp_get_m_n(struct intel_crtc *crtc,
9168 		      struct intel_crtc_state *pipe_config)
9169 {
9170 	if (pipe_config->has_pch_encoder)
9171 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9172 	else
9173 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9174 					     &pipe_config->dp_m_n,
9175 					     &pipe_config->dp_m2_n2);
9176 }
9177 
9178 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9179 					struct intel_crtc_state *pipe_config)
9180 {
9181 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9182 				     &pipe_config->fdi_m_n, NULL);
9183 }
9184 
9185 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9186 				    struct intel_crtc_state *pipe_config)
9187 {
9188 	struct drm_device *dev = crtc->base.dev;
9189 	struct drm_i915_private *dev_priv = dev->dev_private;
9190 	struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9191 	uint32_t ps_ctrl = 0;
9192 	int id = -1;
9193 	int i;
9194 
9195 	/* find scaler attached to this pipe */
9196 	for (i = 0; i < crtc->num_scalers; i++) {
9197 		ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9198 		if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9199 			id = i;
9200 			pipe_config->pch_pfit.enabled = true;
9201 			pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9202 			pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9203 			break;
9204 		}
9205 	}
9206 
9207 	scaler_state->scaler_id = id;
9208 	if (id >= 0) {
9209 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9210 	} else {
9211 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9212 	}
9213 }
9214 
9215 static void
9216 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9217 				 struct intel_initial_plane_config *plane_config)
9218 {
9219 	struct drm_device *dev = crtc->base.dev;
9220 	struct drm_i915_private *dev_priv = dev->dev_private;
9221 	u32 val, base, offset, stride_mult, tiling;
9222 	int pipe = crtc->pipe;
9223 	int fourcc, pixel_format;
9224 	unsigned int aligned_height;
9225 	struct drm_framebuffer *fb;
9226 	struct intel_framebuffer *intel_fb;
9227 
9228 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9229 	if (!intel_fb) {
9230 		DRM_DEBUG_KMS("failed to alloc fb\n");
9231 		return;
9232 	}
9233 
9234 	fb = &intel_fb->base;
9235 
9236 	val = I915_READ(PLANE_CTL(pipe, 0));
9237 	if (!(val & PLANE_CTL_ENABLE))
9238 		goto error;
9239 
9240 	pixel_format = val & PLANE_CTL_FORMAT_MASK;
9241 	fourcc = skl_format_to_fourcc(pixel_format,
9242 				      val & PLANE_CTL_ORDER_RGBX,
9243 				      val & PLANE_CTL_ALPHA_MASK);
9244 	fb->pixel_format = fourcc;
9245 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9246 
9247 	tiling = val & PLANE_CTL_TILED_MASK;
9248 	switch (tiling) {
9249 	case PLANE_CTL_TILED_LINEAR:
9250 		fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9251 		break;
9252 	case PLANE_CTL_TILED_X:
9253 		plane_config->tiling = I915_TILING_X;
9254 		fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9255 		break;
9256 	case PLANE_CTL_TILED_Y:
9257 		fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9258 		break;
9259 	case PLANE_CTL_TILED_YF:
9260 		fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9261 		break;
9262 	default:
9263 		MISSING_CASE(tiling);
9264 		goto error;
9265 	}
9266 
9267 	base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9268 	plane_config->base = base;
9269 
9270 	offset = I915_READ(PLANE_OFFSET(pipe, 0));
9271 
9272 	val = I915_READ(PLANE_SIZE(pipe, 0));
9273 	fb->height = ((val >> 16) & 0xfff) + 1;
9274 	fb->width = ((val >> 0) & 0x1fff) + 1;
9275 
9276 	val = I915_READ(PLANE_STRIDE(pipe, 0));
9277 	stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9278 						fb->pixel_format);
9279 	fb->pitches[0] = (val & 0x3ff) * stride_mult;
9280 
9281 	aligned_height = intel_fb_align_height(dev, fb->height,
9282 					       fb->pixel_format,
9283 					       fb->modifier[0]);
9284 
9285 	plane_config->size = fb->pitches[0] * aligned_height;
9286 
9287 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9288 		      pipe_name(pipe), fb->width, fb->height,
9289 		      fb->bits_per_pixel, base, fb->pitches[0],
9290 		      plane_config->size);
9291 
9292 	plane_config->fb = intel_fb;
9293 	return;
9294 
9295 error:
9296 	kfree(fb);
9297 }
9298 
9299 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9300 				     struct intel_crtc_state *pipe_config)
9301 {
9302 	struct drm_device *dev = crtc->base.dev;
9303 	struct drm_i915_private *dev_priv = dev->dev_private;
9304 	uint32_t tmp;
9305 
9306 	tmp = I915_READ(PF_CTL(crtc->pipe));
9307 
9308 	if (tmp & PF_ENABLE) {
9309 		pipe_config->pch_pfit.enabled = true;
9310 		pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9311 		pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9312 
9313 		/* We currently do not free assignements of panel fitters on
9314 		 * ivb/hsw (since we don't use the higher upscaling modes which
9315 		 * differentiates them) so just WARN about this case for now. */
9316 		if (IS_GEN7(dev)) {
9317 			WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9318 				PF_PIPE_SEL_IVB(crtc->pipe));
9319 		}
9320 	}
9321 }
9322 
9323 static void
9324 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9325 				  struct intel_initial_plane_config *plane_config)
9326 {
9327 	struct drm_device *dev = crtc->base.dev;
9328 	struct drm_i915_private *dev_priv = dev->dev_private;
9329 	u32 val, base, offset;
9330 	int pipe = crtc->pipe;
9331 	int fourcc, pixel_format;
9332 	unsigned int aligned_height;
9333 	struct drm_framebuffer *fb;
9334 	struct intel_framebuffer *intel_fb;
9335 
9336 	val = I915_READ(DSPCNTR(pipe));
9337 	if (!(val & DISPLAY_PLANE_ENABLE))
9338 		return;
9339 
9340 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9341 	if (!intel_fb) {
9342 		DRM_DEBUG_KMS("failed to alloc fb\n");
9343 		return;
9344 	}
9345 
9346 	fb = &intel_fb->base;
9347 
9348 	if (INTEL_INFO(dev)->gen >= 4) {
9349 		if (val & DISPPLANE_TILED) {
9350 			plane_config->tiling = I915_TILING_X;
9351 			fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9352 		}
9353 	}
9354 
9355 	pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9356 	fourcc = i9xx_format_to_fourcc(pixel_format);
9357 	fb->pixel_format = fourcc;
9358 	fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9359 
9360 	base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9361 	if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9362 		offset = I915_READ(DSPOFFSET(pipe));
9363 	} else {
9364 		if (plane_config->tiling)
9365 			offset = I915_READ(DSPTILEOFF(pipe));
9366 		else
9367 			offset = I915_READ(DSPLINOFF(pipe));
9368 	}
9369 	plane_config->base = base;
9370 
9371 	val = I915_READ(PIPESRC(pipe));
9372 	fb->width = ((val >> 16) & 0xfff) + 1;
9373 	fb->height = ((val >> 0) & 0xfff) + 1;
9374 
9375 	val = I915_READ(DSPSTRIDE(pipe));
9376 	fb->pitches[0] = val & 0xffffffc0;
9377 
9378 	aligned_height = intel_fb_align_height(dev, fb->height,
9379 					       fb->pixel_format,
9380 					       fb->modifier[0]);
9381 
9382 	plane_config->size = fb->pitches[0] * aligned_height;
9383 
9384 	DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9385 		      pipe_name(pipe), fb->width, fb->height,
9386 		      fb->bits_per_pixel, base, fb->pitches[0],
9387 		      plane_config->size);
9388 
9389 	plane_config->fb = intel_fb;
9390 }
9391 
9392 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9393 				     struct intel_crtc_state *pipe_config)
9394 {
9395 	struct drm_device *dev = crtc->base.dev;
9396 	struct drm_i915_private *dev_priv = dev->dev_private;
9397 	enum intel_display_power_domain power_domain;
9398 	uint32_t tmp;
9399 	bool ret;
9400 
9401 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9402 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9403 		return false;
9404 
9405 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9406 	pipe_config->shared_dpll = NULL;
9407 
9408 	ret = false;
9409 	tmp = I915_READ(PIPECONF(crtc->pipe));
9410 	if (!(tmp & PIPECONF_ENABLE))
9411 		goto out;
9412 
9413 	switch (tmp & PIPECONF_BPC_MASK) {
9414 	case PIPECONF_6BPC:
9415 		pipe_config->pipe_bpp = 18;
9416 		break;
9417 	case PIPECONF_8BPC:
9418 		pipe_config->pipe_bpp = 24;
9419 		break;
9420 	case PIPECONF_10BPC:
9421 		pipe_config->pipe_bpp = 30;
9422 		break;
9423 	case PIPECONF_12BPC:
9424 		pipe_config->pipe_bpp = 36;
9425 		break;
9426 	default:
9427 		break;
9428 	}
9429 
9430 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9431 		pipe_config->limited_color_range = true;
9432 
9433 	if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9434 		struct intel_shared_dpll *pll;
9435 		enum intel_dpll_id pll_id;
9436 
9437 		pipe_config->has_pch_encoder = true;
9438 
9439 		tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9440 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9441 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
9442 
9443 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
9444 
9445 		if (HAS_PCH_IBX(dev_priv)) {
9446 			/*
9447 			 * The pipe->pch transcoder and pch transcoder->pll
9448 			 * mapping is fixed.
9449 			 */
9450 			pll_id = (enum intel_dpll_id) crtc->pipe;
9451 		} else {
9452 			tmp = I915_READ(PCH_DPLL_SEL);
9453 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9454 				pll_id = DPLL_ID_PCH_PLL_B;
9455 			else
9456 				pll_id= DPLL_ID_PCH_PLL_A;
9457 		}
9458 
9459 		pipe_config->shared_dpll =
9460 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
9461 		pll = pipe_config->shared_dpll;
9462 
9463 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9464 						 &pipe_config->dpll_hw_state));
9465 
9466 		tmp = pipe_config->dpll_hw_state.dpll;
9467 		pipe_config->pixel_multiplier =
9468 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9469 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9470 
9471 		ironlake_pch_clock_get(crtc, pipe_config);
9472 	} else {
9473 		pipe_config->pixel_multiplier = 1;
9474 	}
9475 
9476 	intel_get_pipe_timings(crtc, pipe_config);
9477 	intel_get_pipe_src_size(crtc, pipe_config);
9478 
9479 	ironlake_get_pfit_config(crtc, pipe_config);
9480 
9481 	ret = true;
9482 
9483 out:
9484 	intel_display_power_put(dev_priv, power_domain);
9485 
9486 	return ret;
9487 }
9488 
9489 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9490 {
9491 	struct drm_device *dev = dev_priv->dev;
9492 	struct intel_crtc *crtc;
9493 
9494 	for_each_intel_crtc(dev, crtc)
9495 		I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9496 		     pipe_name(crtc->pipe));
9497 
9498 	I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9499 	I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9500 	I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9501 	I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9502 	I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9503 	I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9504 	     "CPU PWM1 enabled\n");
9505 	if (IS_HASWELL(dev))
9506 		I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9507 		     "CPU PWM2 enabled\n");
9508 	I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9509 	     "PCH PWM1 enabled\n");
9510 	I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9511 	     "Utility pin enabled\n");
9512 	I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9513 
9514 	/*
9515 	 * In theory we can still leave IRQs enabled, as long as only the HPD
9516 	 * interrupts remain enabled. We used to check for that, but since it's
9517 	 * gen-specific and since we only disable LCPLL after we fully disable
9518 	 * the interrupts, the check below should be enough.
9519 	 */
9520 	I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9521 }
9522 
9523 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9524 {
9525 	struct drm_device *dev = dev_priv->dev;
9526 
9527 	if (IS_HASWELL(dev))
9528 		return I915_READ(D_COMP_HSW);
9529 	else
9530 		return I915_READ(D_COMP_BDW);
9531 }
9532 
9533 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9534 {
9535 	struct drm_device *dev = dev_priv->dev;
9536 
9537 	if (IS_HASWELL(dev)) {
9538 		mutex_lock(&dev_priv->rps.hw_lock);
9539 		if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9540 					    val))
9541 			DRM_ERROR("Failed to write to D_COMP\n");
9542 		mutex_unlock(&dev_priv->rps.hw_lock);
9543 	} else {
9544 		I915_WRITE(D_COMP_BDW, val);
9545 		POSTING_READ(D_COMP_BDW);
9546 	}
9547 }
9548 
9549 /*
9550  * This function implements pieces of two sequences from BSpec:
9551  * - Sequence for display software to disable LCPLL
9552  * - Sequence for display software to allow package C8+
9553  * The steps implemented here are just the steps that actually touch the LCPLL
9554  * register. Callers should take care of disabling all the display engine
9555  * functions, doing the mode unset, fixing interrupts, etc.
9556  */
9557 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9558 			      bool switch_to_fclk, bool allow_power_down)
9559 {
9560 	uint32_t val;
9561 
9562 	assert_can_disable_lcpll(dev_priv);
9563 
9564 	val = I915_READ(LCPLL_CTL);
9565 
9566 	if (switch_to_fclk) {
9567 		val |= LCPLL_CD_SOURCE_FCLK;
9568 		I915_WRITE(LCPLL_CTL, val);
9569 
9570 		if (wait_for_us(I915_READ(LCPLL_CTL) &
9571 				LCPLL_CD_SOURCE_FCLK_DONE, 1))
9572 			DRM_ERROR("Switching to FCLK failed\n");
9573 
9574 		val = I915_READ(LCPLL_CTL);
9575 	}
9576 
9577 	val |= LCPLL_PLL_DISABLE;
9578 	I915_WRITE(LCPLL_CTL, val);
9579 	POSTING_READ(LCPLL_CTL);
9580 
9581 	if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9582 		DRM_ERROR("LCPLL still locked\n");
9583 
9584 	val = hsw_read_dcomp(dev_priv);
9585 	val |= D_COMP_COMP_DISABLE;
9586 	hsw_write_dcomp(dev_priv, val);
9587 	ndelay(100);
9588 
9589 	if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9590 		     1))
9591 		DRM_ERROR("D_COMP RCOMP still in progress\n");
9592 
9593 	if (allow_power_down) {
9594 		val = I915_READ(LCPLL_CTL);
9595 		val |= LCPLL_POWER_DOWN_ALLOW;
9596 		I915_WRITE(LCPLL_CTL, val);
9597 		POSTING_READ(LCPLL_CTL);
9598 	}
9599 }
9600 
9601 /*
9602  * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9603  * source.
9604  */
9605 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9606 {
9607 	uint32_t val;
9608 
9609 	val = I915_READ(LCPLL_CTL);
9610 
9611 	if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9612 		    LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9613 		return;
9614 
9615 	/*
9616 	 * Make sure we're not on PC8 state before disabling PC8, otherwise
9617 	 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9618 	 */
9619 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9620 
9621 	if (val & LCPLL_POWER_DOWN_ALLOW) {
9622 		val &= ~LCPLL_POWER_DOWN_ALLOW;
9623 		I915_WRITE(LCPLL_CTL, val);
9624 		POSTING_READ(LCPLL_CTL);
9625 	}
9626 
9627 	val = hsw_read_dcomp(dev_priv);
9628 	val |= D_COMP_COMP_FORCE;
9629 	val &= ~D_COMP_COMP_DISABLE;
9630 	hsw_write_dcomp(dev_priv, val);
9631 
9632 	val = I915_READ(LCPLL_CTL);
9633 	val &= ~LCPLL_PLL_DISABLE;
9634 	I915_WRITE(LCPLL_CTL, val);
9635 
9636 	if (intel_wait_for_register(dev_priv,
9637 				    LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9638 				    5))
9639 		DRM_ERROR("LCPLL not locked yet\n");
9640 
9641 	if (val & LCPLL_CD_SOURCE_FCLK) {
9642 		val = I915_READ(LCPLL_CTL);
9643 		val &= ~LCPLL_CD_SOURCE_FCLK;
9644 		I915_WRITE(LCPLL_CTL, val);
9645 
9646 		if (wait_for_us((I915_READ(LCPLL_CTL) &
9647 				 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9648 			DRM_ERROR("Switching back to LCPLL failed\n");
9649 	}
9650 
9651 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9652 	intel_update_cdclk(dev_priv->dev);
9653 }
9654 
9655 /*
9656  * Package states C8 and deeper are really deep PC states that can only be
9657  * reached when all the devices on the system allow it, so even if the graphics
9658  * device allows PC8+, it doesn't mean the system will actually get to these
9659  * states. Our driver only allows PC8+ when going into runtime PM.
9660  *
9661  * The requirements for PC8+ are that all the outputs are disabled, the power
9662  * well is disabled and most interrupts are disabled, and these are also
9663  * requirements for runtime PM. When these conditions are met, we manually do
9664  * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9665  * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9666  * hang the machine.
9667  *
9668  * When we really reach PC8 or deeper states (not just when we allow it) we lose
9669  * the state of some registers, so when we come back from PC8+ we need to
9670  * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9671  * need to take care of the registers kept by RC6. Notice that this happens even
9672  * if we don't put the device in PCI D3 state (which is what currently happens
9673  * because of the runtime PM support).
9674  *
9675  * For more, read "Display Sequences for Package C8" on the hardware
9676  * documentation.
9677  */
9678 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9679 {
9680 	struct drm_device *dev = dev_priv->dev;
9681 	uint32_t val;
9682 
9683 	DRM_DEBUG_KMS("Enabling package C8+\n");
9684 
9685 	if (HAS_PCH_LPT_LP(dev)) {
9686 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9687 		val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9688 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9689 	}
9690 
9691 	lpt_disable_clkout_dp(dev);
9692 	hsw_disable_lcpll(dev_priv, true, true);
9693 }
9694 
9695 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9696 {
9697 	struct drm_device *dev = dev_priv->dev;
9698 	uint32_t val;
9699 
9700 	DRM_DEBUG_KMS("Disabling package C8+\n");
9701 
9702 	hsw_restore_lcpll(dev_priv);
9703 	lpt_init_pch_refclk(dev);
9704 
9705 	if (HAS_PCH_LPT_LP(dev)) {
9706 		val = I915_READ(SOUTH_DSPCLK_GATE_D);
9707 		val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9708 		I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9709 	}
9710 }
9711 
9712 static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9713 {
9714 	struct drm_device *dev = old_state->dev;
9715 	struct intel_atomic_state *old_intel_state =
9716 		to_intel_atomic_state(old_state);
9717 	unsigned int req_cdclk = old_intel_state->dev_cdclk;
9718 
9719 	bxt_set_cdclk(to_i915(dev), req_cdclk);
9720 }
9721 
9722 /* compute the max rate for new configuration */
9723 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9724 {
9725 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9726 	struct drm_i915_private *dev_priv = state->dev->dev_private;
9727 	struct drm_crtc *crtc;
9728 	struct drm_crtc_state *cstate;
9729 	struct intel_crtc_state *crtc_state;
9730 	unsigned max_pixel_rate = 0, i;
9731 	enum i915_pipe pipe;
9732 
9733 	memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9734 	       sizeof(intel_state->min_pixclk));
9735 
9736 	for_each_crtc_in_state(state, crtc, cstate, i) {
9737 		int pixel_rate;
9738 
9739 		crtc_state = to_intel_crtc_state(cstate);
9740 		if (!crtc_state->base.enable) {
9741 			intel_state->min_pixclk[i] = 0;
9742 			continue;
9743 		}
9744 
9745 		pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9746 
9747 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9748 		if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9749 			pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9750 
9751 		intel_state->min_pixclk[i] = pixel_rate;
9752 	}
9753 
9754 	for_each_pipe(dev_priv, pipe)
9755 		max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9756 
9757 	return max_pixel_rate;
9758 }
9759 
9760 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9761 {
9762 	struct drm_i915_private *dev_priv = dev->dev_private;
9763 	uint32_t val, data;
9764 	int ret;
9765 
9766 	if (WARN((I915_READ(LCPLL_CTL) &
9767 		  (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9768 		   LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9769 		   LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9770 		   LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9771 		 "trying to change cdclk frequency with cdclk not enabled\n"))
9772 		return;
9773 
9774 	mutex_lock(&dev_priv->rps.hw_lock);
9775 	ret = sandybridge_pcode_write(dev_priv,
9776 				      BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9777 	mutex_unlock(&dev_priv->rps.hw_lock);
9778 	if (ret) {
9779 		DRM_ERROR("failed to inform pcode about cdclk change\n");
9780 		return;
9781 	}
9782 
9783 	val = I915_READ(LCPLL_CTL);
9784 	val |= LCPLL_CD_SOURCE_FCLK;
9785 	I915_WRITE(LCPLL_CTL, val);
9786 
9787 	if (wait_for_us(I915_READ(LCPLL_CTL) &
9788 			LCPLL_CD_SOURCE_FCLK_DONE, 1))
9789 		DRM_ERROR("Switching to FCLK failed\n");
9790 
9791 	val = I915_READ(LCPLL_CTL);
9792 	val &= ~LCPLL_CLK_FREQ_MASK;
9793 
9794 	switch (cdclk) {
9795 	case 450000:
9796 		val |= LCPLL_CLK_FREQ_450;
9797 		data = 0;
9798 		break;
9799 	case 540000:
9800 		val |= LCPLL_CLK_FREQ_54O_BDW;
9801 		data = 1;
9802 		break;
9803 	case 337500:
9804 		val |= LCPLL_CLK_FREQ_337_5_BDW;
9805 		data = 2;
9806 		break;
9807 	case 675000:
9808 		val |= LCPLL_CLK_FREQ_675_BDW;
9809 		data = 3;
9810 		break;
9811 	default:
9812 		WARN(1, "invalid cdclk frequency\n");
9813 		return;
9814 	}
9815 
9816 	I915_WRITE(LCPLL_CTL, val);
9817 
9818 	val = I915_READ(LCPLL_CTL);
9819 	val &= ~LCPLL_CD_SOURCE_FCLK;
9820 	I915_WRITE(LCPLL_CTL, val);
9821 
9822 	if (wait_for_us((I915_READ(LCPLL_CTL) &
9823 			LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9824 		DRM_ERROR("Switching back to LCPLL failed\n");
9825 
9826 	mutex_lock(&dev_priv->rps.hw_lock);
9827 	sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9828 	mutex_unlock(&dev_priv->rps.hw_lock);
9829 
9830 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9831 
9832 	I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9833 
9834 	intel_update_cdclk(dev);
9835 
9836 	WARN(cdclk != dev_priv->cdclk_freq,
9837 	     "cdclk requested %d kHz but got %d kHz\n",
9838 	     cdclk, dev_priv->cdclk_freq);
9839 }
9840 
9841 static int broadwell_calc_cdclk(int max_pixclk)
9842 {
9843 	if (max_pixclk > 540000)
9844 		return 675000;
9845 	else if (max_pixclk > 450000)
9846 		return 540000;
9847 	else if (max_pixclk > 337500)
9848 		return 450000;
9849 	else
9850 		return 337500;
9851 }
9852 
9853 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9854 {
9855 	struct drm_i915_private *dev_priv = to_i915(state->dev);
9856 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9857 	int max_pixclk = ilk_max_pixel_rate(state);
9858 	int cdclk;
9859 
9860 	/*
9861 	 * FIXME should also account for plane ratio
9862 	 * once 64bpp pixel formats are supported.
9863 	 */
9864 	cdclk = broadwell_calc_cdclk(max_pixclk);
9865 
9866 	if (cdclk > dev_priv->max_cdclk_freq) {
9867 		DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9868 			      cdclk, dev_priv->max_cdclk_freq);
9869 		return -EINVAL;
9870 	}
9871 
9872 	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9873 	if (!intel_state->active_crtcs)
9874 		intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9875 
9876 	return 0;
9877 }
9878 
9879 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9880 {
9881 	struct drm_device *dev = old_state->dev;
9882 	struct intel_atomic_state *old_intel_state =
9883 		to_intel_atomic_state(old_state);
9884 	unsigned req_cdclk = old_intel_state->dev_cdclk;
9885 
9886 	broadwell_set_cdclk(dev, req_cdclk);
9887 }
9888 
9889 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9890 {
9891 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9892 	struct drm_i915_private *dev_priv = to_i915(state->dev);
9893 	const int max_pixclk = ilk_max_pixel_rate(state);
9894 	int vco = intel_state->cdclk_pll_vco;
9895 	int cdclk;
9896 
9897 	/*
9898 	 * FIXME should also account for plane ratio
9899 	 * once 64bpp pixel formats are supported.
9900 	 */
9901 	cdclk = skl_calc_cdclk(max_pixclk, vco);
9902 
9903 	/*
9904 	 * FIXME move the cdclk caclulation to
9905 	 * compute_config() so we can fail gracegully.
9906 	 */
9907 	if (cdclk > dev_priv->max_cdclk_freq) {
9908 		DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9909 			  cdclk, dev_priv->max_cdclk_freq);
9910 		cdclk = dev_priv->max_cdclk_freq;
9911 	}
9912 
9913 	intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9914 	if (!intel_state->active_crtcs)
9915 		intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
9916 
9917 	return 0;
9918 }
9919 
9920 static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9921 {
9922 	struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9923 	struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9924 	unsigned int req_cdclk = intel_state->dev_cdclk;
9925 	unsigned int req_vco = intel_state->cdclk_pll_vco;
9926 
9927 	skl_set_cdclk(dev_priv, req_cdclk, req_vco);
9928 }
9929 
9930 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9931 				      struct intel_crtc_state *crtc_state)
9932 {
9933 	struct intel_encoder *intel_encoder =
9934 		intel_ddi_get_crtc_new_encoder(crtc_state);
9935 
9936 	if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9937 		if (!intel_ddi_pll_select(crtc, crtc_state))
9938 			return -EINVAL;
9939 	}
9940 
9941 	crtc->lowfreq_avail = false;
9942 
9943 	return 0;
9944 }
9945 
9946 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9947 				enum port port,
9948 				struct intel_crtc_state *pipe_config)
9949 {
9950 	enum intel_dpll_id id;
9951 
9952 	switch (port) {
9953 	case PORT_A:
9954 		pipe_config->ddi_pll_sel = SKL_DPLL0;
9955 		id = DPLL_ID_SKL_DPLL0;
9956 		break;
9957 	case PORT_B:
9958 		pipe_config->ddi_pll_sel = SKL_DPLL1;
9959 		id = DPLL_ID_SKL_DPLL1;
9960 		break;
9961 	case PORT_C:
9962 		pipe_config->ddi_pll_sel = SKL_DPLL2;
9963 		id = DPLL_ID_SKL_DPLL2;
9964 		break;
9965 	default:
9966 		DRM_ERROR("Incorrect port type\n");
9967 		return;
9968 	}
9969 
9970 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9971 }
9972 
9973 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9974 				enum port port,
9975 				struct intel_crtc_state *pipe_config)
9976 {
9977 	enum intel_dpll_id id;
9978 	u32 temp;
9979 
9980 	temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9981 	pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9982 
9983 	switch (pipe_config->ddi_pll_sel) {
9984 	case SKL_DPLL0:
9985 		id = DPLL_ID_SKL_DPLL0;
9986 		break;
9987 	case SKL_DPLL1:
9988 		id = DPLL_ID_SKL_DPLL1;
9989 		break;
9990 	case SKL_DPLL2:
9991 		id = DPLL_ID_SKL_DPLL2;
9992 		break;
9993 	case SKL_DPLL3:
9994 		id = DPLL_ID_SKL_DPLL3;
9995 		break;
9996 	default:
9997 		MISSING_CASE(pipe_config->ddi_pll_sel);
9998 		return;
9999 	}
10000 
10001 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10002 }
10003 
10004 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
10005 				enum port port,
10006 				struct intel_crtc_state *pipe_config)
10007 {
10008 	enum intel_dpll_id id;
10009 
10010 	pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
10011 
10012 	switch (pipe_config->ddi_pll_sel) {
10013 	case PORT_CLK_SEL_WRPLL1:
10014 		id = DPLL_ID_WRPLL1;
10015 		break;
10016 	case PORT_CLK_SEL_WRPLL2:
10017 		id = DPLL_ID_WRPLL2;
10018 		break;
10019 	case PORT_CLK_SEL_SPLL:
10020 		id = DPLL_ID_SPLL;
10021 		break;
10022 	case PORT_CLK_SEL_LCPLL_810:
10023 		id = DPLL_ID_LCPLL_810;
10024 		break;
10025 	case PORT_CLK_SEL_LCPLL_1350:
10026 		id = DPLL_ID_LCPLL_1350;
10027 		break;
10028 	case PORT_CLK_SEL_LCPLL_2700:
10029 		id = DPLL_ID_LCPLL_2700;
10030 		break;
10031 	default:
10032 		MISSING_CASE(pipe_config->ddi_pll_sel);
10033 		/* fall through */
10034 	case PORT_CLK_SEL_NONE:
10035 		return;
10036 	}
10037 
10038 	pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
10039 }
10040 
10041 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10042 				     struct intel_crtc_state *pipe_config,
10043 				     unsigned long *power_domain_mask)
10044 {
10045 	struct drm_device *dev = crtc->base.dev;
10046 	struct drm_i915_private *dev_priv = dev->dev_private;
10047 	enum intel_display_power_domain power_domain;
10048 	u32 tmp;
10049 
10050 	/*
10051 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
10052 	 * transcoder handled below.
10053 	 */
10054 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10055 
10056 	/*
10057 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10058 	 * consistency and less surprising code; it's in always on power).
10059 	 */
10060 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
10061 	if (tmp & TRANS_DDI_FUNC_ENABLE) {
10062 		enum i915_pipe trans_edp_pipe;
10063 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10064 		default:
10065 			WARN(1, "unknown pipe linked to edp transcoder\n");
10066 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
10067 		case TRANS_DDI_EDP_INPUT_A_ON:
10068 			trans_edp_pipe = PIPE_A;
10069 			break;
10070 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
10071 			trans_edp_pipe = PIPE_B;
10072 			break;
10073 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
10074 			trans_edp_pipe = PIPE_C;
10075 			break;
10076 		}
10077 
10078 		if (trans_edp_pipe == crtc->pipe)
10079 			pipe_config->cpu_transcoder = TRANSCODER_EDP;
10080 	}
10081 
10082 	power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10083 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10084 		return false;
10085 	*power_domain_mask |= BIT(power_domain);
10086 
10087 	tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10088 
10089 	return tmp & PIPECONF_ENABLE;
10090 }
10091 
10092 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10093 					 struct intel_crtc_state *pipe_config,
10094 					 unsigned long *power_domain_mask)
10095 {
10096 	struct drm_device *dev = crtc->base.dev;
10097 	struct drm_i915_private *dev_priv = dev->dev_private;
10098 	enum intel_display_power_domain power_domain;
10099 	enum port port;
10100 	enum transcoder cpu_transcoder;
10101 	u32 tmp;
10102 
10103 	pipe_config->has_dsi_encoder = false;
10104 
10105 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10106 		if (port == PORT_A)
10107 			cpu_transcoder = TRANSCODER_DSI_A;
10108 		else
10109 			cpu_transcoder = TRANSCODER_DSI_C;
10110 
10111 		power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10112 		if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10113 			continue;
10114 		*power_domain_mask |= BIT(power_domain);
10115 
10116 		/*
10117 		 * The PLL needs to be enabled with a valid divider
10118 		 * configuration, otherwise accessing DSI registers will hang
10119 		 * the machine. See BSpec North Display Engine
10120 		 * registers/MIPI[BXT]. We can break out here early, since we
10121 		 * need the same DSI PLL to be enabled for both DSI ports.
10122 		 */
10123 		if (!intel_dsi_pll_is_enabled(dev_priv))
10124 			break;
10125 
10126 		/* XXX: this works for video mode only */
10127 		tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10128 		if (!(tmp & DPI_ENABLE))
10129 			continue;
10130 
10131 		tmp = I915_READ(MIPI_CTRL(port));
10132 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10133 			continue;
10134 
10135 		pipe_config->cpu_transcoder = cpu_transcoder;
10136 		pipe_config->has_dsi_encoder = true;
10137 		break;
10138 	}
10139 
10140 	return pipe_config->has_dsi_encoder;
10141 }
10142 
10143 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
10144 				       struct intel_crtc_state *pipe_config)
10145 {
10146 	struct drm_device *dev = crtc->base.dev;
10147 	struct drm_i915_private *dev_priv = dev->dev_private;
10148 	struct intel_shared_dpll *pll;
10149 	enum port port;
10150 	uint32_t tmp;
10151 
10152 	tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10153 
10154 	port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10155 
10156 	if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
10157 		skylake_get_ddi_pll(dev_priv, port, pipe_config);
10158 	else if (IS_BROXTON(dev))
10159 		bxt_get_ddi_pll(dev_priv, port, pipe_config);
10160 	else
10161 		haswell_get_ddi_pll(dev_priv, port, pipe_config);
10162 
10163 	pll = pipe_config->shared_dpll;
10164 	if (pll) {
10165 		WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
10166 						 &pipe_config->dpll_hw_state));
10167 	}
10168 
10169 	/*
10170 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10171 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
10172 	 * the PCH transcoder is on.
10173 	 */
10174 	if (INTEL_INFO(dev)->gen < 9 &&
10175 	    (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
10176 		pipe_config->has_pch_encoder = true;
10177 
10178 		tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10179 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10180 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
10181 
10182 		ironlake_get_fdi_m_n_config(crtc, pipe_config);
10183 	}
10184 }
10185 
10186 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10187 				    struct intel_crtc_state *pipe_config)
10188 {
10189 	struct drm_device *dev = crtc->base.dev;
10190 	struct drm_i915_private *dev_priv = dev->dev_private;
10191 	enum intel_display_power_domain power_domain;
10192 	unsigned long power_domain_mask;
10193 	bool active;
10194 
10195 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10196 	if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10197 		return false;
10198 	power_domain_mask = BIT(power_domain);
10199 
10200 	pipe_config->shared_dpll = NULL;
10201 
10202 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10203 
10204 	if (IS_BROXTON(dev_priv)) {
10205 		bxt_get_dsi_transcoder_state(crtc, pipe_config,
10206 					     &power_domain_mask);
10207 		WARN_ON(active && pipe_config->has_dsi_encoder);
10208 		if (pipe_config->has_dsi_encoder)
10209 			active = true;
10210 	}
10211 
10212 	if (!active)
10213 		goto out;
10214 
10215 	if (!pipe_config->has_dsi_encoder) {
10216 		haswell_get_ddi_port_state(crtc, pipe_config);
10217 		intel_get_pipe_timings(crtc, pipe_config);
10218 	}
10219 
10220 	intel_get_pipe_src_size(crtc, pipe_config);
10221 
10222 	pipe_config->gamma_mode =
10223 		I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10224 
10225 	if (INTEL_INFO(dev)->gen >= 9) {
10226 		skl_init_scalers(dev, crtc, pipe_config);
10227 	}
10228 
10229 	if (INTEL_INFO(dev)->gen >= 9) {
10230 		pipe_config->scaler_state.scaler_id = -1;
10231 		pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10232 	}
10233 
10234 	power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10235 	if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10236 		power_domain_mask |= BIT(power_domain);
10237 		if (INTEL_INFO(dev)->gen >= 9)
10238 			skylake_get_pfit_config(crtc, pipe_config);
10239 		else
10240 			ironlake_get_pfit_config(crtc, pipe_config);
10241 	}
10242 
10243 	if (IS_HASWELL(dev))
10244 		pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10245 			(I915_READ(IPS_CTL) & IPS_ENABLE);
10246 
10247 	if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10248 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10249 		pipe_config->pixel_multiplier =
10250 			I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10251 	} else {
10252 		pipe_config->pixel_multiplier = 1;
10253 	}
10254 
10255 out:
10256 	for_each_power_domain(power_domain, power_domain_mask)
10257 		intel_display_power_put(dev_priv, power_domain);
10258 
10259 	return active;
10260 }
10261 
10262 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10263 			       const struct intel_plane_state *plane_state)
10264 {
10265 	struct drm_device *dev = crtc->dev;
10266 	struct drm_i915_private *dev_priv = dev->dev_private;
10267 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10268 	uint32_t cntl = 0, size = 0;
10269 
10270 	if (plane_state && plane_state->visible) {
10271 		unsigned int width = plane_state->base.crtc_w;
10272 		unsigned int height = plane_state->base.crtc_h;
10273 		unsigned int stride = roundup_pow_of_two(width) * 4;
10274 
10275 		switch (stride) {
10276 		default:
10277 			WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10278 				  width, stride);
10279 			stride = 256;
10280 			/* fallthrough */
10281 		case 256:
10282 		case 512:
10283 		case 1024:
10284 		case 2048:
10285 			break;
10286 		}
10287 
10288 		cntl |= CURSOR_ENABLE |
10289 			CURSOR_GAMMA_ENABLE |
10290 			CURSOR_FORMAT_ARGB |
10291 			CURSOR_STRIDE(stride);
10292 
10293 		size = (height << 12) | width;
10294 	}
10295 
10296 	if (intel_crtc->cursor_cntl != 0 &&
10297 	    (intel_crtc->cursor_base != base ||
10298 	     intel_crtc->cursor_size != size ||
10299 	     intel_crtc->cursor_cntl != cntl)) {
10300 		/* On these chipsets we can only modify the base/size/stride
10301 		 * whilst the cursor is disabled.
10302 		 */
10303 		I915_WRITE(CURCNTR(PIPE_A), 0);
10304 		POSTING_READ(CURCNTR(PIPE_A));
10305 		intel_crtc->cursor_cntl = 0;
10306 	}
10307 
10308 	if (intel_crtc->cursor_base != base) {
10309 		I915_WRITE(CURBASE(PIPE_A), base);
10310 		intel_crtc->cursor_base = base;
10311 	}
10312 
10313 	if (intel_crtc->cursor_size != size) {
10314 		I915_WRITE(CURSIZE, size);
10315 		intel_crtc->cursor_size = size;
10316 	}
10317 
10318 	if (intel_crtc->cursor_cntl != cntl) {
10319 		I915_WRITE(CURCNTR(PIPE_A), cntl);
10320 		POSTING_READ(CURCNTR(PIPE_A));
10321 		intel_crtc->cursor_cntl = cntl;
10322 	}
10323 }
10324 
10325 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10326 			       const struct intel_plane_state *plane_state)
10327 {
10328 	struct drm_device *dev = crtc->dev;
10329 	struct drm_i915_private *dev_priv = dev->dev_private;
10330 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10331 	int pipe = intel_crtc->pipe;
10332 	uint32_t cntl = 0;
10333 
10334 	if (plane_state && plane_state->visible) {
10335 		cntl = MCURSOR_GAMMA_ENABLE;
10336 		switch (plane_state->base.crtc_w) {
10337 			case 64:
10338 				cntl |= CURSOR_MODE_64_ARGB_AX;
10339 				break;
10340 			case 128:
10341 				cntl |= CURSOR_MODE_128_ARGB_AX;
10342 				break;
10343 			case 256:
10344 				cntl |= CURSOR_MODE_256_ARGB_AX;
10345 				break;
10346 			default:
10347 				MISSING_CASE(plane_state->base.crtc_w);
10348 				return;
10349 		}
10350 		cntl |= pipe << 28; /* Connect to correct pipe */
10351 
10352 		if (HAS_DDI(dev))
10353 			cntl |= CURSOR_PIPE_CSC_ENABLE;
10354 
10355 		if (plane_state->base.rotation == DRM_ROTATE_180)
10356 			cntl |= CURSOR_ROTATE_180;
10357 	}
10358 
10359 	if (intel_crtc->cursor_cntl != cntl) {
10360 		I915_WRITE(CURCNTR(pipe), cntl);
10361 		POSTING_READ(CURCNTR(pipe));
10362 		intel_crtc->cursor_cntl = cntl;
10363 	}
10364 
10365 	/* and commit changes on next vblank */
10366 	I915_WRITE(CURBASE(pipe), base);
10367 	POSTING_READ(CURBASE(pipe));
10368 
10369 	intel_crtc->cursor_base = base;
10370 }
10371 
10372 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10373 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10374 				     const struct intel_plane_state *plane_state)
10375 {
10376 	struct drm_device *dev = crtc->dev;
10377 	struct drm_i915_private *dev_priv = dev->dev_private;
10378 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10379 	int pipe = intel_crtc->pipe;
10380 	u32 base = intel_crtc->cursor_addr;
10381 	u32 pos = 0;
10382 
10383 	if (plane_state) {
10384 		int x = plane_state->base.crtc_x;
10385 		int y = plane_state->base.crtc_y;
10386 
10387 		if (x < 0) {
10388 			pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10389 			x = -x;
10390 		}
10391 		pos |= x << CURSOR_X_SHIFT;
10392 
10393 		if (y < 0) {
10394 			pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10395 			y = -y;
10396 		}
10397 		pos |= y << CURSOR_Y_SHIFT;
10398 
10399 		/* ILK+ do this automagically */
10400 		if (HAS_GMCH_DISPLAY(dev) &&
10401 		    plane_state->base.rotation == DRM_ROTATE_180) {
10402 			base += (plane_state->base.crtc_h *
10403 				 plane_state->base.crtc_w - 1) * 4;
10404 		}
10405 	}
10406 
10407 	I915_WRITE(CURPOS(pipe), pos);
10408 
10409 	if (IS_845G(dev) || IS_I865G(dev))
10410 		i845_update_cursor(crtc, base, plane_state);
10411 	else
10412 		i9xx_update_cursor(crtc, base, plane_state);
10413 }
10414 
10415 static bool cursor_size_ok(struct drm_device *dev,
10416 			   uint32_t width, uint32_t height)
10417 {
10418 	if (width == 0 || height == 0)
10419 		return false;
10420 
10421 	/*
10422 	 * 845g/865g are special in that they are only limited by
10423 	 * the width of their cursors, the height is arbitrary up to
10424 	 * the precision of the register. Everything else requires
10425 	 * square cursors, limited to a few power-of-two sizes.
10426 	 */
10427 	if (IS_845G(dev) || IS_I865G(dev)) {
10428 		if ((width & 63) != 0)
10429 			return false;
10430 
10431 		if (width > (IS_845G(dev) ? 64 : 512))
10432 			return false;
10433 
10434 		if (height > 1023)
10435 			return false;
10436 	} else {
10437 		switch (width | height) {
10438 		case 256:
10439 		case 128:
10440 			if (IS_GEN2(dev))
10441 				return false;
10442 		case 64:
10443 			break;
10444 		default:
10445 			return false;
10446 		}
10447 	}
10448 
10449 	return true;
10450 }
10451 
10452 /* VESA 640x480x72Hz mode to set on the pipe */
10453 static struct drm_display_mode load_detect_mode = {
10454 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10455 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10456 };
10457 
10458 struct drm_framebuffer *
10459 __intel_framebuffer_create(struct drm_device *dev,
10460 			   struct drm_mode_fb_cmd2 *mode_cmd,
10461 			   struct drm_i915_gem_object *obj)
10462 {
10463 	struct intel_framebuffer *intel_fb;
10464 	int ret;
10465 
10466 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10467 	if (!intel_fb)
10468 		return ERR_PTR(-ENOMEM);
10469 
10470 	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10471 	if (ret)
10472 		goto err;
10473 
10474 	return &intel_fb->base;
10475 
10476 err:
10477 	kfree(intel_fb);
10478 	return ERR_PTR(ret);
10479 }
10480 
10481 static struct drm_framebuffer *
10482 intel_framebuffer_create(struct drm_device *dev,
10483 			 struct drm_mode_fb_cmd2 *mode_cmd,
10484 			 struct drm_i915_gem_object *obj)
10485 {
10486 	struct drm_framebuffer *fb;
10487 	int ret;
10488 
10489 	ret = i915_mutex_lock_interruptible(dev);
10490 	if (ret)
10491 		return ERR_PTR(ret);
10492 	fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10493 	mutex_unlock(&dev->struct_mutex);
10494 
10495 	return fb;
10496 }
10497 
10498 static u32
10499 intel_framebuffer_pitch_for_width(int width, int bpp)
10500 {
10501 	u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10502 	return ALIGN(pitch, 64);
10503 }
10504 
10505 static u32
10506 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10507 {
10508 	u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10509 	return PAGE_ALIGN(pitch * mode->vdisplay);
10510 }
10511 
10512 static struct drm_framebuffer *
10513 intel_framebuffer_create_for_mode(struct drm_device *dev,
10514 				  struct drm_display_mode *mode,
10515 				  int depth, int bpp)
10516 {
10517 	struct drm_framebuffer *fb;
10518 	struct drm_i915_gem_object *obj;
10519 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10520 
10521 	obj = i915_gem_object_create(dev,
10522 				    intel_framebuffer_size_for_mode(mode, bpp));
10523 	if (IS_ERR(obj))
10524 		return ERR_CAST(obj);
10525 
10526 	mode_cmd.width = mode->hdisplay;
10527 	mode_cmd.height = mode->vdisplay;
10528 	mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10529 								bpp);
10530 	mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10531 
10532 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10533 	if (IS_ERR(fb))
10534 		drm_gem_object_unreference_unlocked(&obj->base);
10535 
10536 	return fb;
10537 }
10538 
10539 static struct drm_framebuffer *
10540 mode_fits_in_fbdev(struct drm_device *dev,
10541 		   struct drm_display_mode *mode)
10542 {
10543 #ifdef CONFIG_DRM_FBDEV_EMULATION
10544 	struct drm_i915_private *dev_priv = dev->dev_private;
10545 	struct drm_i915_gem_object *obj;
10546 	struct drm_framebuffer *fb;
10547 
10548 	if (!dev_priv->fbdev)
10549 		return NULL;
10550 
10551 	if (!dev_priv->fbdev->fb)
10552 		return NULL;
10553 
10554 	obj = dev_priv->fbdev->fb->obj;
10555 	BUG_ON(!obj);
10556 
10557 	fb = &dev_priv->fbdev->fb->base;
10558 	if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10559 							       fb->bits_per_pixel))
10560 		return NULL;
10561 
10562 	if (obj->base.size < mode->vdisplay * fb->pitches[0])
10563 		return NULL;
10564 
10565 	drm_framebuffer_reference(fb);
10566 	return fb;
10567 #else
10568 	return NULL;
10569 #endif
10570 }
10571 
10572 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10573 					   struct drm_crtc *crtc,
10574 					   struct drm_display_mode *mode,
10575 					   struct drm_framebuffer *fb,
10576 					   int x, int y)
10577 {
10578 	struct drm_plane_state *plane_state;
10579 	int hdisplay, vdisplay;
10580 	int ret;
10581 
10582 	plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10583 	if (IS_ERR(plane_state))
10584 		return PTR_ERR(plane_state);
10585 
10586 	if (mode)
10587 		drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10588 	else
10589 		hdisplay = vdisplay = 0;
10590 
10591 	ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10592 	if (ret)
10593 		return ret;
10594 	drm_atomic_set_fb_for_plane(plane_state, fb);
10595 	plane_state->crtc_x = 0;
10596 	plane_state->crtc_y = 0;
10597 	plane_state->crtc_w = hdisplay;
10598 	plane_state->crtc_h = vdisplay;
10599 	plane_state->src_x = x << 16;
10600 	plane_state->src_y = y << 16;
10601 	plane_state->src_w = hdisplay << 16;
10602 	plane_state->src_h = vdisplay << 16;
10603 
10604 	return 0;
10605 }
10606 
10607 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10608 				struct drm_display_mode *mode,
10609 				struct intel_load_detect_pipe *old,
10610 				struct drm_modeset_acquire_ctx *ctx)
10611 {
10612 	struct intel_crtc *intel_crtc;
10613 	struct intel_encoder *intel_encoder =
10614 		intel_attached_encoder(connector);
10615 	struct drm_crtc *possible_crtc;
10616 	struct drm_encoder *encoder = &intel_encoder->base;
10617 	struct drm_crtc *crtc = NULL;
10618 	struct drm_device *dev = encoder->dev;
10619 	struct drm_framebuffer *fb;
10620 	struct drm_mode_config *config = &dev->mode_config;
10621 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
10622 	struct drm_connector_state *connector_state;
10623 	struct intel_crtc_state *crtc_state;
10624 	int ret, i = -1;
10625 
10626 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10627 		      connector->base.id, connector->name,
10628 		      encoder->base.id, encoder->name);
10629 
10630 	old->restore_state = NULL;
10631 
10632 retry:
10633 	ret = drm_modeset_lock(&config->connection_mutex, ctx);
10634 	if (ret)
10635 		goto fail;
10636 
10637 	/*
10638 	 * Algorithm gets a little messy:
10639 	 *
10640 	 *   - if the connector already has an assigned crtc, use it (but make
10641 	 *     sure it's on first)
10642 	 *
10643 	 *   - try to find the first unused crtc that can drive this connector,
10644 	 *     and use that if we find one
10645 	 */
10646 
10647 	/* See if we already have a CRTC for this connector */
10648 	if (connector->state->crtc) {
10649 		crtc = connector->state->crtc;
10650 
10651 		ret = drm_modeset_lock(&crtc->mutex, ctx);
10652 		if (ret)
10653 			goto fail;
10654 
10655 		/* Make sure the crtc and connector are running */
10656 		goto found;
10657 	}
10658 
10659 	/* Find an unused one (if possible) */
10660 	for_each_crtc(dev, possible_crtc) {
10661 		i++;
10662 		if (!(encoder->possible_crtcs & (1 << i)))
10663 			continue;
10664 
10665 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10666 		if (ret)
10667 			goto fail;
10668 
10669 		if (possible_crtc->state->enable) {
10670 			drm_modeset_unlock(&possible_crtc->mutex);
10671 			continue;
10672 		}
10673 
10674 		crtc = possible_crtc;
10675 		break;
10676 	}
10677 
10678 	/*
10679 	 * If we didn't find an unused CRTC, don't use any.
10680 	 */
10681 	if (!crtc) {
10682 		DRM_DEBUG_KMS("no pipe available for load-detect\n");
10683 		goto fail;
10684 	}
10685 
10686 found:
10687 	intel_crtc = to_intel_crtc(crtc);
10688 
10689 	ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10690 	if (ret)
10691 		goto fail;
10692 
10693 	state = drm_atomic_state_alloc(dev);
10694 	restore_state = drm_atomic_state_alloc(dev);
10695 	if (!state || !restore_state) {
10696 		ret = -ENOMEM;
10697 		goto fail;
10698 	}
10699 
10700 	state->acquire_ctx = ctx;
10701 	restore_state->acquire_ctx = ctx;
10702 
10703 	connector_state = drm_atomic_get_connector_state(state, connector);
10704 	if (IS_ERR(connector_state)) {
10705 		ret = PTR_ERR(connector_state);
10706 		goto fail;
10707 	}
10708 
10709 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10710 	if (ret)
10711 		goto fail;
10712 
10713 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10714 	if (IS_ERR(crtc_state)) {
10715 		ret = PTR_ERR(crtc_state);
10716 		goto fail;
10717 	}
10718 
10719 	crtc_state->base.active = crtc_state->base.enable = true;
10720 
10721 	if (!mode)
10722 		mode = &load_detect_mode;
10723 
10724 	/* We need a framebuffer large enough to accommodate all accesses
10725 	 * that the plane may generate whilst we perform load detection.
10726 	 * We can not rely on the fbcon either being present (we get called
10727 	 * during its initialisation to detect all boot displays, or it may
10728 	 * not even exist) or that it is large enough to satisfy the
10729 	 * requested mode.
10730 	 */
10731 	fb = mode_fits_in_fbdev(dev, mode);
10732 	if (fb == NULL) {
10733 		DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10734 		fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10735 	} else
10736 		DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10737 	if (IS_ERR(fb)) {
10738 		DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10739 		goto fail;
10740 	}
10741 
10742 	ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10743 	if (ret)
10744 		goto fail;
10745 
10746 	drm_framebuffer_unreference(fb);
10747 
10748 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10749 	if (ret)
10750 		goto fail;
10751 
10752 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10753 	if (!ret)
10754 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10755 	if (!ret)
10756 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10757 	if (ret) {
10758 		DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10759 		goto fail;
10760 	}
10761 
10762 	ret = drm_atomic_commit(state);
10763 	if (ret) {
10764 		DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10765 		goto fail;
10766 	}
10767 
10768 	old->restore_state = restore_state;
10769 
10770 	/* let the connector get through one full cycle before testing */
10771 	intel_wait_for_vblank(dev, intel_crtc->pipe);
10772 	return true;
10773 
10774 fail:
10775 	drm_atomic_state_free(state);
10776 	drm_atomic_state_free(restore_state);
10777 	restore_state = state = NULL;
10778 
10779 	if (ret == -EDEADLK) {
10780 		drm_modeset_backoff(ctx);
10781 		goto retry;
10782 	}
10783 
10784 	return false;
10785 }
10786 
10787 void intel_release_load_detect_pipe(struct drm_connector *connector,
10788 				    struct intel_load_detect_pipe *old,
10789 				    struct drm_modeset_acquire_ctx *ctx)
10790 {
10791 	struct intel_encoder *intel_encoder =
10792 		intel_attached_encoder(connector);
10793 	struct drm_encoder *encoder = &intel_encoder->base;
10794 	struct drm_atomic_state *state = old->restore_state;
10795 	int ret;
10796 
10797 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10798 		      connector->base.id, connector->name,
10799 		      encoder->base.id, encoder->name);
10800 
10801 	if (!state)
10802 		return;
10803 
10804 	ret = drm_atomic_commit(state);
10805 	if (ret) {
10806 		DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10807 		drm_atomic_state_free(state);
10808 	}
10809 }
10810 
10811 static int i9xx_pll_refclk(struct drm_device *dev,
10812 			   const struct intel_crtc_state *pipe_config)
10813 {
10814 	struct drm_i915_private *dev_priv = dev->dev_private;
10815 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10816 
10817 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10818 		return dev_priv->vbt.lvds_ssc_freq;
10819 	else if (HAS_PCH_SPLIT(dev))
10820 		return 120000;
10821 	else if (!IS_GEN2(dev))
10822 		return 96000;
10823 	else
10824 		return 48000;
10825 }
10826 
10827 /* Returns the clock of the currently programmed mode of the given pipe. */
10828 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10829 				struct intel_crtc_state *pipe_config)
10830 {
10831 	struct drm_device *dev = crtc->base.dev;
10832 	struct drm_i915_private *dev_priv = dev->dev_private;
10833 	int pipe = pipe_config->cpu_transcoder;
10834 	u32 dpll = pipe_config->dpll_hw_state.dpll;
10835 	u32 fp;
10836 	struct dpll clock;
10837 	int port_clock;
10838 	int refclk = i9xx_pll_refclk(dev, pipe_config);
10839 
10840 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10841 		fp = pipe_config->dpll_hw_state.fp0;
10842 	else
10843 		fp = pipe_config->dpll_hw_state.fp1;
10844 
10845 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10846 	if (IS_PINEVIEW(dev)) {
10847 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10848 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10849 	} else {
10850 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10851 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10852 	}
10853 
10854 	if (!IS_GEN2(dev)) {
10855 		if (IS_PINEVIEW(dev))
10856 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10857 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10858 		else
10859 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10860 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
10861 
10862 		switch (dpll & DPLL_MODE_MASK) {
10863 		case DPLLB_MODE_DAC_SERIAL:
10864 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10865 				5 : 10;
10866 			break;
10867 		case DPLLB_MODE_LVDS:
10868 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10869 				7 : 14;
10870 			break;
10871 		default:
10872 			DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10873 				  "mode\n", (int)(dpll & DPLL_MODE_MASK));
10874 			return;
10875 		}
10876 
10877 		if (IS_PINEVIEW(dev))
10878 			port_clock = pnv_calc_dpll_params(refclk, &clock);
10879 		else
10880 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
10881 	} else {
10882 		u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10883 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10884 
10885 		if (is_lvds) {
10886 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10887 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
10888 
10889 			if (lvds & LVDS_CLKB_POWER_UP)
10890 				clock.p2 = 7;
10891 			else
10892 				clock.p2 = 14;
10893 		} else {
10894 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
10895 				clock.p1 = 2;
10896 			else {
10897 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10898 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10899 			}
10900 			if (dpll & PLL_P2_DIVIDE_BY_4)
10901 				clock.p2 = 4;
10902 			else
10903 				clock.p2 = 2;
10904 		}
10905 
10906 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
10907 	}
10908 
10909 	/*
10910 	 * This value includes pixel_multiplier. We will use
10911 	 * port_clock to compute adjusted_mode.crtc_clock in the
10912 	 * encoder's get_config() function.
10913 	 */
10914 	pipe_config->port_clock = port_clock;
10915 }
10916 
10917 int intel_dotclock_calculate(int link_freq,
10918 			     const struct intel_link_m_n *m_n)
10919 {
10920 	/*
10921 	 * The calculation for the data clock is:
10922 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10923 	 * But we want to avoid losing precison if possible, so:
10924 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10925 	 *
10926 	 * and the link clock is simpler:
10927 	 * link_clock = (m * link_clock) / n
10928 	 */
10929 
10930 	if (!m_n->link_n)
10931 		return 0;
10932 
10933 	return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10934 }
10935 
10936 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10937 				   struct intel_crtc_state *pipe_config)
10938 {
10939 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10940 
10941 	/* read out port_clock from the DPLL */
10942 	i9xx_crtc_clock_get(crtc, pipe_config);
10943 
10944 	/*
10945 	 * In case there is an active pipe without active ports,
10946 	 * we may need some idea for the dotclock anyway.
10947 	 * Calculate one based on the FDI configuration.
10948 	 */
10949 	pipe_config->base.adjusted_mode.crtc_clock =
10950 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10951 					 &pipe_config->fdi_m_n);
10952 }
10953 
10954 /** Returns the currently programmed mode of the given pipe. */
10955 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10956 					     struct drm_crtc *crtc)
10957 {
10958 	struct drm_i915_private *dev_priv = dev->dev_private;
10959 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10960 	enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10961 	struct drm_display_mode *mode;
10962 	struct intel_crtc_state *pipe_config;
10963 	int htot = I915_READ(HTOTAL(cpu_transcoder));
10964 	int hsync = I915_READ(HSYNC(cpu_transcoder));
10965 	int vtot = I915_READ(VTOTAL(cpu_transcoder));
10966 	int vsync = I915_READ(VSYNC(cpu_transcoder));
10967 	enum i915_pipe pipe = intel_crtc->pipe;
10968 
10969 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10970 	if (!mode)
10971 		return NULL;
10972 
10973 	pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10974 	if (!pipe_config) {
10975 		kfree(mode);
10976 		return NULL;
10977 	}
10978 
10979 	/*
10980 	 * Construct a pipe_config sufficient for getting the clock info
10981 	 * back out of crtc_clock_get.
10982 	 *
10983 	 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10984 	 * to use a real value here instead.
10985 	 */
10986 	pipe_config->cpu_transcoder = (enum transcoder) pipe;
10987 	pipe_config->pixel_multiplier = 1;
10988 	pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10989 	pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10990 	pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10991 	i9xx_crtc_clock_get(intel_crtc, pipe_config);
10992 
10993 	mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10994 	mode->hdisplay = (htot & 0xffff) + 1;
10995 	mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10996 	mode->hsync_start = (hsync & 0xffff) + 1;
10997 	mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10998 	mode->vdisplay = (vtot & 0xffff) + 1;
10999 	mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
11000 	mode->vsync_start = (vsync & 0xffff) + 1;
11001 	mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
11002 
11003 	drm_mode_set_name(mode);
11004 
11005 	kfree(pipe_config);
11006 
11007 	return mode;
11008 }
11009 
11010 void intel_mark_busy(struct drm_i915_private *dev_priv)
11011 {
11012 	if (dev_priv->mm.busy)
11013 		return;
11014 
11015 	intel_runtime_pm_get(dev_priv);
11016 	i915_update_gfx_val(dev_priv);
11017 	if (INTEL_GEN(dev_priv) >= 6)
11018 		gen6_rps_busy(dev_priv);
11019 	dev_priv->mm.busy = true;
11020 }
11021 
11022 void intel_mark_idle(struct drm_i915_private *dev_priv)
11023 {
11024 	if (!dev_priv->mm.busy)
11025 		return;
11026 
11027 	dev_priv->mm.busy = false;
11028 
11029 	if (INTEL_GEN(dev_priv) >= 6)
11030 		gen6_rps_idle(dev_priv);
11031 
11032 	intel_runtime_pm_put(dev_priv);
11033 }
11034 
11035 static void intel_crtc_destroy(struct drm_crtc *crtc)
11036 {
11037 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11038 	struct drm_device *dev = crtc->dev;
11039 	struct intel_flip_work *work;
11040 
11041 	spin_lock_irq(&dev->event_lock);
11042 	work = intel_crtc->flip_work;
11043 	intel_crtc->flip_work = NULL;
11044 	spin_unlock_irq(&dev->event_lock);
11045 
11046 	if (work) {
11047 		cancel_work_sync(&work->mmio_work);
11048 		cancel_work_sync(&work->unpin_work);
11049 		kfree(work);
11050 	}
11051 
11052 	drm_crtc_cleanup(crtc);
11053 
11054 	kfree(intel_crtc);
11055 }
11056 
11057 static void intel_unpin_work_fn(struct work_struct *__work)
11058 {
11059 	struct intel_flip_work *work =
11060 		container_of(__work, struct intel_flip_work, unpin_work);
11061 	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11062 	struct drm_device *dev = crtc->base.dev;
11063 	struct drm_plane *primary = crtc->base.primary;
11064 
11065 	if (is_mmio_work(work))
11066 		flush_work(&work->mmio_work);
11067 
11068 	mutex_lock(&dev->struct_mutex);
11069 	intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
11070 	drm_gem_object_unreference(&work->pending_flip_obj->base);
11071 
11072 	if (work->flip_queued_req)
11073 		i915_gem_request_assign(&work->flip_queued_req, NULL);
11074 	mutex_unlock(&dev->struct_mutex);
11075 
11076 	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
11077 	intel_fbc_post_update(crtc);
11078 	drm_framebuffer_unreference(work->old_fb);
11079 
11080 	BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
11081 	atomic_dec(&crtc->unpin_work_count);
11082 
11083 	kfree(work);
11084 }
11085 
11086 /* Is 'a' after or equal to 'b'? */
11087 static bool g4x_flip_count_after_eq(u32 a, u32 b)
11088 {
11089 	return !((a - b) & 0x80000000);
11090 }
11091 
11092 static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11093 				   struct intel_flip_work *work)
11094 {
11095 	struct drm_device *dev = crtc->base.dev;
11096 	struct drm_i915_private *dev_priv = dev->dev_private;
11097 	unsigned reset_counter;
11098 
11099 	reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11100 	if (crtc->reset_counter != reset_counter)
11101 		return true;
11102 
11103 	/*
11104 	 * The relevant registers doen't exist on pre-ctg.
11105 	 * As the flip done interrupt doesn't trigger for mmio
11106 	 * flips on gmch platforms, a flip count check isn't
11107 	 * really needed there. But since ctg has the registers,
11108 	 * include it in the check anyway.
11109 	 */
11110 	if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11111 		return true;
11112 
11113 	/*
11114 	 * BDW signals flip done immediately if the plane
11115 	 * is disabled, even if the plane enable is already
11116 	 * armed to occur at the next vblank :(
11117 	 */
11118 
11119 	/*
11120 	 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11121 	 * used the same base address. In that case the mmio flip might
11122 	 * have completed, but the CS hasn't even executed the flip yet.
11123 	 *
11124 	 * A flip count check isn't enough as the CS might have updated
11125 	 * the base address just after start of vblank, but before we
11126 	 * managed to process the interrupt. This means we'd complete the
11127 	 * CS flip too soon.
11128 	 *
11129 	 * Combining both checks should get us a good enough result. It may
11130 	 * still happen that the CS flip has been executed, but has not
11131 	 * yet actually completed. But in case the base address is the same
11132 	 * anyway, we don't really care.
11133 	 */
11134 	return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11135 		crtc->flip_work->gtt_offset &&
11136 		g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11137 				    crtc->flip_work->flip_count);
11138 }
11139 
11140 static bool
11141 __pageflip_finished_mmio(struct intel_crtc *crtc,
11142 			       struct intel_flip_work *work)
11143 {
11144 	/*
11145 	 * MMIO work completes when vblank is different from
11146 	 * flip_queued_vblank.
11147 	 *
11148 	 * Reset counter value doesn't matter, this is handled by
11149 	 * i915_wait_request finishing early, so no need to handle
11150 	 * reset here.
11151 	 */
11152 	return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11153 }
11154 
11155 
11156 static bool pageflip_finished(struct intel_crtc *crtc,
11157 			      struct intel_flip_work *work)
11158 {
11159 	if (!atomic_read(&work->pending))
11160 		return false;
11161 
11162 	smp_rmb();
11163 
11164 	if (is_mmio_work(work))
11165 		return __pageflip_finished_mmio(crtc, work);
11166 	else
11167 		return __pageflip_finished_cs(crtc, work);
11168 }
11169 
11170 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11171 {
11172 	struct drm_device *dev = dev_priv->dev;
11173 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11174 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11175 	struct intel_flip_work *work;
11176 	unsigned long flags;
11177 
11178 	/* Ignore early vblank irqs */
11179 	if (!crtc)
11180 		return;
11181 
11182 	/*
11183 	 * This is called both by irq handlers and the reset code (to complete
11184 	 * lost pageflips) so needs the full irqsave spinlocks.
11185 	 */
11186 	spin_lock_irqsave(&dev->event_lock, flags);
11187 	work = intel_crtc->flip_work;
11188 
11189 	if (work != NULL &&
11190 	    !is_mmio_work(work) &&
11191 	    pageflip_finished(intel_crtc, work))
11192 		page_flip_completed(intel_crtc);
11193 
11194 	spin_unlock_irqrestore(&dev->event_lock, flags);
11195 }
11196 
11197 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11198 {
11199 	struct drm_device *dev = dev_priv->dev;
11200 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11201 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11202 	struct intel_flip_work *work;
11203 	unsigned long flags;
11204 
11205 	/* Ignore early vblank irqs */
11206 	if (!crtc)
11207 		return;
11208 
11209 	/*
11210 	 * This is called both by irq handlers and the reset code (to complete
11211 	 * lost pageflips) so needs the full irqsave spinlocks.
11212 	 */
11213 	spin_lock_irqsave(&dev->event_lock, flags);
11214 	work = intel_crtc->flip_work;
11215 
11216 	if (work != NULL &&
11217 	    is_mmio_work(work) &&
11218 	    pageflip_finished(intel_crtc, work))
11219 		page_flip_completed(intel_crtc);
11220 
11221 	spin_unlock_irqrestore(&dev->event_lock, flags);
11222 }
11223 
11224 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11225 					       struct intel_flip_work *work)
11226 {
11227 	work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11228 
11229 	/* Ensure that the work item is consistent when activating it ... */
11230 	smp_mb__before_atomic();
11231 	atomic_set(&work->pending, 1);
11232 }
11233 
11234 static int intel_gen2_queue_flip(struct drm_device *dev,
11235 				 struct drm_crtc *crtc,
11236 				 struct drm_framebuffer *fb,
11237 				 struct drm_i915_gem_object *obj,
11238 				 struct drm_i915_gem_request *req,
11239 				 uint32_t flags)
11240 {
11241 	struct intel_engine_cs *engine = req->engine;
11242 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11243 	u32 flip_mask;
11244 	int ret;
11245 
11246 	ret = intel_ring_begin(req, 6);
11247 	if (ret)
11248 		return ret;
11249 
11250 	/* Can't queue multiple flips, so wait for the previous
11251 	 * one to finish before executing the next.
11252 	 */
11253 	if (intel_crtc->plane)
11254 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11255 	else
11256 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11257 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11258 	intel_ring_emit(engine, MI_NOOP);
11259 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11260 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11261 	intel_ring_emit(engine, fb->pitches[0]);
11262 	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11263 	intel_ring_emit(engine, 0); /* aux display base address, unused */
11264 
11265 	return 0;
11266 }
11267 
11268 static int intel_gen3_queue_flip(struct drm_device *dev,
11269 				 struct drm_crtc *crtc,
11270 				 struct drm_framebuffer *fb,
11271 				 struct drm_i915_gem_object *obj,
11272 				 struct drm_i915_gem_request *req,
11273 				 uint32_t flags)
11274 {
11275 	struct intel_engine_cs *engine = req->engine;
11276 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11277 	u32 flip_mask;
11278 	int ret;
11279 
11280 	ret = intel_ring_begin(req, 6);
11281 	if (ret)
11282 		return ret;
11283 
11284 	if (intel_crtc->plane)
11285 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11286 	else
11287 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11288 	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11289 	intel_ring_emit(engine, MI_NOOP);
11290 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11291 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11292 	intel_ring_emit(engine, fb->pitches[0]);
11293 	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11294 	intel_ring_emit(engine, MI_NOOP);
11295 
11296 	return 0;
11297 }
11298 
11299 static int intel_gen4_queue_flip(struct drm_device *dev,
11300 				 struct drm_crtc *crtc,
11301 				 struct drm_framebuffer *fb,
11302 				 struct drm_i915_gem_object *obj,
11303 				 struct drm_i915_gem_request *req,
11304 				 uint32_t flags)
11305 {
11306 	struct intel_engine_cs *engine = req->engine;
11307 	struct drm_i915_private *dev_priv = dev->dev_private;
11308 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11309 	uint32_t pf, pipesrc;
11310 	int ret;
11311 
11312 	ret = intel_ring_begin(req, 4);
11313 	if (ret)
11314 		return ret;
11315 
11316 	/* i965+ uses the linear or tiled offsets from the
11317 	 * Display Registers (which do not change across a page-flip)
11318 	 * so we need only reprogram the base address.
11319 	 */
11320 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11321 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11322 	intel_ring_emit(engine, fb->pitches[0]);
11323 	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11324 			obj->tiling_mode);
11325 
11326 	/* XXX Enabling the panel-fitter across page-flip is so far
11327 	 * untested on non-native modes, so ignore it for now.
11328 	 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11329 	 */
11330 	pf = 0;
11331 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11332 	intel_ring_emit(engine, pf | pipesrc);
11333 
11334 	return 0;
11335 }
11336 
11337 static int intel_gen6_queue_flip(struct drm_device *dev,
11338 				 struct drm_crtc *crtc,
11339 				 struct drm_framebuffer *fb,
11340 				 struct drm_i915_gem_object *obj,
11341 				 struct drm_i915_gem_request *req,
11342 				 uint32_t flags)
11343 {
11344 	struct intel_engine_cs *engine = req->engine;
11345 	struct drm_i915_private *dev_priv = dev->dev_private;
11346 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11347 	uint32_t pf, pipesrc;
11348 	int ret;
11349 
11350 	ret = intel_ring_begin(req, 4);
11351 	if (ret)
11352 		return ret;
11353 
11354 	intel_ring_emit(engine, MI_DISPLAY_FLIP |
11355 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11356 	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11357 	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11358 
11359 	/* Contrary to the suggestions in the documentation,
11360 	 * "Enable Panel Fitter" does not seem to be required when page
11361 	 * flipping with a non-native mode, and worse causes a normal
11362 	 * modeset to fail.
11363 	 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11364 	 */
11365 	pf = 0;
11366 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11367 	intel_ring_emit(engine, pf | pipesrc);
11368 
11369 	return 0;
11370 }
11371 
11372 static int intel_gen7_queue_flip(struct drm_device *dev,
11373 				 struct drm_crtc *crtc,
11374 				 struct drm_framebuffer *fb,
11375 				 struct drm_i915_gem_object *obj,
11376 				 struct drm_i915_gem_request *req,
11377 				 uint32_t flags)
11378 {
11379 	struct intel_engine_cs *engine = req->engine;
11380 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11381 	uint32_t plane_bit = 0;
11382 	int len, ret;
11383 
11384 	switch (intel_crtc->plane) {
11385 	case PLANE_A:
11386 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11387 		break;
11388 	case PLANE_B:
11389 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11390 		break;
11391 	case PLANE_C:
11392 		plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11393 		break;
11394 	default:
11395 		WARN_ONCE(1, "unknown plane in flip command\n");
11396 		return -ENODEV;
11397 	}
11398 
11399 	len = 4;
11400 	if (engine->id == RCS) {
11401 		len += 6;
11402 		/*
11403 		 * On Gen 8, SRM is now taking an extra dword to accommodate
11404 		 * 48bits addresses, and we need a NOOP for the batch size to
11405 		 * stay even.
11406 		 */
11407 		if (IS_GEN8(dev))
11408 			len += 2;
11409 	}
11410 
11411 	/*
11412 	 * BSpec MI_DISPLAY_FLIP for IVB:
11413 	 * "The full packet must be contained within the same cache line."
11414 	 *
11415 	 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11416 	 * cacheline, if we ever start emitting more commands before
11417 	 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11418 	 * then do the cacheline alignment, and finally emit the
11419 	 * MI_DISPLAY_FLIP.
11420 	 */
11421 	ret = intel_ring_cacheline_align(req);
11422 	if (ret)
11423 		return ret;
11424 
11425 	ret = intel_ring_begin(req, len);
11426 	if (ret)
11427 		return ret;
11428 
11429 	/* Unmask the flip-done completion message. Note that the bspec says that
11430 	 * we should do this for both the BCS and RCS, and that we must not unmask
11431 	 * more than one flip event at any time (or ensure that one flip message
11432 	 * can be sent by waiting for flip-done prior to queueing new flips).
11433 	 * Experimentation says that BCS works despite DERRMR masking all
11434 	 * flip-done completion events and that unmasking all planes at once
11435 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11436 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11437 	 */
11438 	if (engine->id == RCS) {
11439 		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11440 		intel_ring_emit_reg(engine, DERRMR);
11441 		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11442 					  DERRMR_PIPEB_PRI_FLIP_DONE |
11443 					  DERRMR_PIPEC_PRI_FLIP_DONE));
11444 		if (IS_GEN8(dev))
11445 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11446 					      MI_SRM_LRM_GLOBAL_GTT);
11447 		else
11448 			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11449 					      MI_SRM_LRM_GLOBAL_GTT);
11450 		intel_ring_emit_reg(engine, DERRMR);
11451 		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11452 		if (IS_GEN8(dev)) {
11453 			intel_ring_emit(engine, 0);
11454 			intel_ring_emit(engine, MI_NOOP);
11455 		}
11456 	}
11457 
11458 	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11459 	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11460 	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11461 	intel_ring_emit(engine, (MI_NOOP));
11462 
11463 	return 0;
11464 }
11465 
11466 static bool use_mmio_flip(struct intel_engine_cs *engine,
11467 			  struct drm_i915_gem_object *obj)
11468 {
11469 	struct reservation_object *resv;
11470 
11471 	/*
11472 	 * This is not being used for older platforms, because
11473 	 * non-availability of flip done interrupt forces us to use
11474 	 * CS flips. Older platforms derive flip done using some clever
11475 	 * tricks involving the flip_pending status bits and vblank irqs.
11476 	 * So using MMIO flips there would disrupt this mechanism.
11477 	 */
11478 
11479 	if (engine == NULL)
11480 		return true;
11481 
11482 	if (INTEL_GEN(engine->i915) < 5)
11483 		return false;
11484 
11485 	if (i915.use_mmio_flip < 0)
11486 		return false;
11487 	else if (i915.use_mmio_flip > 0)
11488 		return true;
11489 	else if (i915.enable_execlists)
11490 		return true;
11491 
11492 	resv = i915_gem_object_get_dmabuf_resv(obj);
11493 	if (resv && !reservation_object_test_signaled_rcu(resv, false))
11494 		return true;
11495 
11496 	return engine != i915_gem_request_get_engine(obj->last_write_req);
11497 }
11498 
11499 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11500 			     unsigned int rotation,
11501 			     struct intel_flip_work *work)
11502 {
11503 	struct drm_device *dev = intel_crtc->base.dev;
11504 	struct drm_i915_private *dev_priv = dev->dev_private;
11505 	struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11506 	const enum i915_pipe pipe = intel_crtc->pipe;
11507 	u32 ctl, stride, tile_height;
11508 
11509 	ctl = I915_READ(PLANE_CTL(pipe, 0));
11510 	ctl &= ~PLANE_CTL_TILED_MASK;
11511 	switch (fb->modifier[0]) {
11512 	case DRM_FORMAT_MOD_NONE:
11513 		break;
11514 	case I915_FORMAT_MOD_X_TILED:
11515 		ctl |= PLANE_CTL_TILED_X;
11516 		break;
11517 	case I915_FORMAT_MOD_Y_TILED:
11518 		ctl |= PLANE_CTL_TILED_Y;
11519 		break;
11520 	case I915_FORMAT_MOD_Yf_TILED:
11521 		ctl |= PLANE_CTL_TILED_YF;
11522 		break;
11523 	default:
11524 		MISSING_CASE(fb->modifier[0]);
11525 	}
11526 
11527 	/*
11528 	 * The stride is either expressed as a multiple of 64 bytes chunks for
11529 	 * linear buffers or in number of tiles for tiled buffers.
11530 	 */
11531 	if (intel_rotation_90_or_270(rotation)) {
11532 		/* stride = Surface height in tiles */
11533 		tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11534 		stride = DIV_ROUND_UP(fb->height, tile_height);
11535 	} else {
11536 		stride = fb->pitches[0] /
11537 			intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11538 						  fb->pixel_format);
11539 	}
11540 
11541 	/*
11542 	 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11543 	 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11544 	 */
11545 	I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11546 	I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11547 
11548 	I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11549 	POSTING_READ(PLANE_SURF(pipe, 0));
11550 }
11551 
11552 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11553 			     struct intel_flip_work *work)
11554 {
11555 	struct drm_device *dev = intel_crtc->base.dev;
11556 	struct drm_i915_private *dev_priv = dev->dev_private;
11557 	struct intel_framebuffer *intel_fb =
11558 		to_intel_framebuffer(intel_crtc->base.primary->fb);
11559 	struct drm_i915_gem_object *obj = intel_fb->obj;
11560 	i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11561 	u32 dspcntr;
11562 
11563 	dspcntr = I915_READ(reg);
11564 
11565 	if (obj->tiling_mode != I915_TILING_NONE)
11566 		dspcntr |= DISPPLANE_TILED;
11567 	else
11568 		dspcntr &= ~DISPPLANE_TILED;
11569 
11570 	I915_WRITE(reg, dspcntr);
11571 
11572 	I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11573 	POSTING_READ(DSPSURF(intel_crtc->plane));
11574 }
11575 
11576 static void intel_mmio_flip_work_func(struct work_struct *w)
11577 {
11578 	struct intel_flip_work *work =
11579 		container_of(w, struct intel_flip_work, mmio_work);
11580 	struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11581 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11582 	struct intel_framebuffer *intel_fb =
11583 		to_intel_framebuffer(crtc->base.primary->fb);
11584 	struct drm_i915_gem_object *obj = intel_fb->obj;
11585 	struct reservation_object *resv;
11586 
11587 	if (work->flip_queued_req)
11588 		WARN_ON(__i915_wait_request(work->flip_queued_req,
11589 					    false, NULL,
11590 					    &dev_priv->rps.mmioflips));
11591 
11592 	/* For framebuffer backed by dmabuf, wait for fence */
11593 	resv = i915_gem_object_get_dmabuf_resv(obj);
11594 	if (resv)
11595 		WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
11596 							    MAX_SCHEDULE_TIMEOUT) < 0);
11597 
11598 	intel_pipe_update_start(crtc);
11599 
11600 	if (INTEL_GEN(dev_priv) >= 9)
11601 		skl_do_mmio_flip(crtc, work->rotation, work);
11602 	else
11603 		/* use_mmio_flip() retricts MMIO flips to ilk+ */
11604 		ilk_do_mmio_flip(crtc, work);
11605 
11606 	intel_pipe_update_end(crtc, work);
11607 }
11608 
11609 static int intel_default_queue_flip(struct drm_device *dev,
11610 				    struct drm_crtc *crtc,
11611 				    struct drm_framebuffer *fb,
11612 				    struct drm_i915_gem_object *obj,
11613 				    struct drm_i915_gem_request *req,
11614 				    uint32_t flags)
11615 {
11616 	return -ENODEV;
11617 }
11618 
11619 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11620 				      struct intel_crtc *intel_crtc,
11621 				      struct intel_flip_work *work)
11622 {
11623 	u32 addr, vblank;
11624 
11625 	if (!atomic_read(&work->pending))
11626 		return false;
11627 
11628 	smp_rmb();
11629 
11630 	vblank = intel_crtc_get_vblank_counter(intel_crtc);
11631 	if (work->flip_ready_vblank == 0) {
11632 		if (work->flip_queued_req &&
11633 		    !i915_gem_request_completed(work->flip_queued_req, true))
11634 			return false;
11635 
11636 		work->flip_ready_vblank = vblank;
11637 	}
11638 
11639 	if (vblank - work->flip_ready_vblank < 3)
11640 		return false;
11641 
11642 	/* Potential stall - if we see that the flip has happened,
11643 	 * assume a missed interrupt. */
11644 	if (INTEL_GEN(dev_priv) >= 4)
11645 		addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11646 	else
11647 		addr = I915_READ(DSPADDR(intel_crtc->plane));
11648 
11649 	/* There is a potential issue here with a false positive after a flip
11650 	 * to the same address. We could address this by checking for a
11651 	 * non-incrementing frame counter.
11652 	 */
11653 	return addr == work->gtt_offset;
11654 }
11655 
11656 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11657 {
11658 	struct drm_device *dev = dev_priv->dev;
11659 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11660 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11661 	struct intel_flip_work *work;
11662 
11663 //	WARN_ON(!in_interrupt());
11664 
11665 	if (crtc == NULL)
11666 		return;
11667 
11668 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
11669 	work = intel_crtc->flip_work;
11670 
11671 	if (work != NULL && !is_mmio_work(work) &&
11672 	    __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11673 		WARN_ONCE(1,
11674 			  "Kicking stuck page flip: queued at %d, now %d\n",
11675 			work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11676 		page_flip_completed(intel_crtc);
11677 		work = NULL;
11678 	}
11679 
11680 	if (work != NULL && !is_mmio_work(work) &&
11681 	    intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11682 		intel_queue_rps_boost_for_request(work->flip_queued_req);
11683 	lockmgr(&dev->event_lock, LK_RELEASE);
11684 }
11685 
11686 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11687 				struct drm_framebuffer *fb,
11688 				struct drm_pending_vblank_event *event,
11689 				uint32_t page_flip_flags)
11690 {
11691 	struct drm_device *dev = crtc->dev;
11692 	struct drm_i915_private *dev_priv = dev->dev_private;
11693 	struct drm_framebuffer *old_fb = crtc->primary->fb;
11694 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11695 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11696 	struct drm_plane *primary = crtc->primary;
11697 	enum i915_pipe pipe = intel_crtc->pipe;
11698 	struct intel_flip_work *work;
11699 	struct intel_engine_cs *engine;
11700 	bool mmio_flip;
11701 	struct drm_i915_gem_request *request = NULL;
11702 	int ret;
11703 
11704 	/*
11705 	 * drm_mode_page_flip_ioctl() should already catch this, but double
11706 	 * check to be safe.  In the future we may enable pageflipping from
11707 	 * a disabled primary plane.
11708 	 */
11709 	if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11710 		return -EBUSY;
11711 
11712 	/* Can't change pixel format via MI display flips. */
11713 	if (fb->pixel_format != crtc->primary->fb->pixel_format)
11714 		return -EINVAL;
11715 
11716 	/*
11717 	 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11718 	 * Note that pitch changes could also affect these register.
11719 	 */
11720 	if (INTEL_INFO(dev)->gen > 3 &&
11721 	    (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11722 	     fb->pitches[0] != crtc->primary->fb->pitches[0]))
11723 		return -EINVAL;
11724 
11725 	if (i915_terminally_wedged(&dev_priv->gpu_error))
11726 		goto out_hang;
11727 
11728 	work = kzalloc(sizeof(*work), GFP_KERNEL);
11729 	if (work == NULL)
11730 		return -ENOMEM;
11731 
11732 	work->event = event;
11733 	work->crtc = crtc;
11734 	work->old_fb = old_fb;
11735 	INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11736 
11737 	ret = drm_crtc_vblank_get(crtc);
11738 	if (ret)
11739 		goto free_work;
11740 
11741 	/* We borrow the event spin lock for protecting flip_work */
11742 	spin_lock_irq(&dev->event_lock);
11743 	if (intel_crtc->flip_work) {
11744 		/* Before declaring the flip queue wedged, check if
11745 		 * the hardware completed the operation behind our backs.
11746 		 */
11747 		if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11748 			DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11749 			page_flip_completed(intel_crtc);
11750 		} else {
11751 			DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11752 			spin_unlock_irq(&dev->event_lock);
11753 
11754 			drm_crtc_vblank_put(crtc);
11755 			kfree(work);
11756 			return -EBUSY;
11757 		}
11758 	}
11759 	intel_crtc->flip_work = work;
11760 	spin_unlock_irq(&dev->event_lock);
11761 
11762 	if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11763 		flush_workqueue(dev_priv->wq);
11764 
11765 	/* Reference the objects for the scheduled work. */
11766 	drm_framebuffer_reference(work->old_fb);
11767 	drm_gem_object_reference(&obj->base);
11768 
11769 	crtc->primary->fb = fb;
11770 	update_state_fb(crtc->primary);
11771 
11772 	intel_fbc_pre_update(intel_crtc, intel_crtc->config,
11773 			     to_intel_plane_state(primary->state));
11774 
11775 	work->pending_flip_obj = obj;
11776 
11777 	ret = i915_mutex_lock_interruptible(dev);
11778 	if (ret)
11779 		goto cleanup;
11780 
11781 	intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11782 	if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11783 		ret = -EIO;
11784 		goto cleanup;
11785 	}
11786 
11787 	atomic_inc(&intel_crtc->unpin_work_count);
11788 
11789 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11790 		work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11791 
11792 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11793 		engine = &dev_priv->engine[BCS];
11794 		if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11795 			/* vlv: DISPLAY_FLIP fails to change tiling */
11796 			engine = NULL;
11797 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11798 		engine = &dev_priv->engine[BCS];
11799 	} else if (INTEL_INFO(dev)->gen >= 7) {
11800 		engine = i915_gem_request_get_engine(obj->last_write_req);
11801 		if (engine == NULL || engine->id != RCS)
11802 			engine = &dev_priv->engine[BCS];
11803 	} else {
11804 		engine = &dev_priv->engine[RCS];
11805 	}
11806 
11807 	mmio_flip = use_mmio_flip(engine, obj);
11808 
11809 	/* When using CS flips, we want to emit semaphores between rings.
11810 	 * However, when using mmio flips we will create a task to do the
11811 	 * synchronisation, so all we want here is to pin the framebuffer
11812 	 * into the display plane and skip any waits.
11813 	 */
11814 	if (!mmio_flip) {
11815 		ret = i915_gem_object_sync(obj, engine, &request);
11816 		if (!ret && !request) {
11817 			request = i915_gem_request_alloc(engine, NULL);
11818 			ret = PTR_ERR_OR_ZERO(request);
11819 		}
11820 
11821 		if (ret)
11822 			goto cleanup_pending;
11823 	}
11824 
11825 	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11826 	if (ret)
11827 		goto cleanup_pending;
11828 
11829 	work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11830 						  obj, 0);
11831 	work->gtt_offset += intel_crtc->dspaddr_offset;
11832 	work->rotation = crtc->primary->state->rotation;
11833 
11834 	if (mmio_flip) {
11835 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11836 
11837 		i915_gem_request_assign(&work->flip_queued_req,
11838 					obj->last_write_req);
11839 
11840 		schedule_work(&work->mmio_work);
11841 	} else {
11842 		i915_gem_request_assign(&work->flip_queued_req, request);
11843 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11844 						   page_flip_flags);
11845 		if (ret)
11846 			goto cleanup_unpin;
11847 
11848 		intel_mark_page_flip_active(intel_crtc, work);
11849 
11850 		i915_add_request_no_flush(request);
11851 	}
11852 
11853 	i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11854 			  to_intel_plane(primary)->frontbuffer_bit);
11855 	mutex_unlock(&dev->struct_mutex);
11856 
11857 	intel_frontbuffer_flip_prepare(dev,
11858 				       to_intel_plane(primary)->frontbuffer_bit);
11859 
11860 	trace_i915_flip_request(intel_crtc->plane, obj);
11861 
11862 	return 0;
11863 
11864 cleanup_unpin:
11865 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11866 cleanup_pending:
11867 	if (!IS_ERR_OR_NULL(request))
11868 		i915_add_request_no_flush(request);
11869 	atomic_dec(&intel_crtc->unpin_work_count);
11870 	mutex_unlock(&dev->struct_mutex);
11871 cleanup:
11872 	crtc->primary->fb = old_fb;
11873 	update_state_fb(crtc->primary);
11874 
11875 	drm_gem_object_unreference_unlocked(&obj->base);
11876 	drm_framebuffer_unreference(work->old_fb);
11877 
11878 	spin_lock_irq(&dev->event_lock);
11879 	intel_crtc->flip_work = NULL;
11880 	spin_unlock_irq(&dev->event_lock);
11881 
11882 	drm_crtc_vblank_put(crtc);
11883 free_work:
11884 	kfree(work);
11885 
11886 	if (ret == -EIO) {
11887 		struct drm_atomic_state *state;
11888 		struct drm_plane_state *plane_state;
11889 
11890 out_hang:
11891 		state = drm_atomic_state_alloc(dev);
11892 		if (!state)
11893 			return -ENOMEM;
11894 		state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11895 
11896 retry:
11897 		plane_state = drm_atomic_get_plane_state(state, primary);
11898 		ret = PTR_ERR_OR_ZERO(plane_state);
11899 		if (!ret) {
11900 			drm_atomic_set_fb_for_plane(plane_state, fb);
11901 
11902 			ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11903 			if (!ret)
11904 				ret = drm_atomic_commit(state);
11905 		}
11906 
11907 		if (ret == -EDEADLK) {
11908 			drm_modeset_backoff(state->acquire_ctx);
11909 			drm_atomic_state_clear(state);
11910 			goto retry;
11911 		}
11912 
11913 		if (ret)
11914 			drm_atomic_state_free(state);
11915 
11916 		if (ret == 0 && event) {
11917 			spin_lock_irq(&dev->event_lock);
11918 			drm_crtc_send_vblank_event(crtc, event);
11919 			spin_unlock_irq(&dev->event_lock);
11920 		}
11921 	}
11922 	return ret;
11923 }
11924 
11925 
11926 /**
11927  * intel_wm_need_update - Check whether watermarks need updating
11928  * @plane: drm plane
11929  * @state: new plane state
11930  *
11931  * Check current plane state versus the new one to determine whether
11932  * watermarks need to be recalculated.
11933  *
11934  * Returns true or false.
11935  */
11936 static bool intel_wm_need_update(struct drm_plane *plane,
11937 				 struct drm_plane_state *state)
11938 {
11939 	struct intel_plane_state *new = to_intel_plane_state(state);
11940 	struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11941 
11942 	/* Update watermarks on tiling or size changes. */
11943 	if (new->visible != cur->visible)
11944 		return true;
11945 
11946 	if (!cur->base.fb || !new->base.fb)
11947 		return false;
11948 
11949 	if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11950 	    cur->base.rotation != new->base.rotation ||
11951 	    drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11952 	    drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11953 	    drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11954 	    drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11955 		return true;
11956 
11957 	return false;
11958 }
11959 
11960 static bool needs_scaling(struct intel_plane_state *state)
11961 {
11962 	int src_w = drm_rect_width(&state->src) >> 16;
11963 	int src_h = drm_rect_height(&state->src) >> 16;
11964 	int dst_w = drm_rect_width(&state->dst);
11965 	int dst_h = drm_rect_height(&state->dst);
11966 
11967 	return (src_w != dst_w || src_h != dst_h);
11968 }
11969 
11970 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11971 				    struct drm_plane_state *plane_state)
11972 {
11973 	struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11974 	struct drm_crtc *crtc = crtc_state->crtc;
11975 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11976 	struct drm_plane *plane = plane_state->plane;
11977 	struct drm_device *dev = crtc->dev;
11978 	struct drm_i915_private *dev_priv = to_i915(dev);
11979 	struct intel_plane_state *old_plane_state =
11980 		to_intel_plane_state(plane->state);
11981 	bool mode_changed = needs_modeset(crtc_state);
11982 	bool was_crtc_enabled = crtc->state->active;
11983 	bool is_crtc_enabled = crtc_state->active;
11984 	bool turn_off, turn_on, visible, was_visible;
11985 	struct drm_framebuffer *fb = plane_state->fb;
11986 	int ret;
11987 
11988 	if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11989 	    plane->type != DRM_PLANE_TYPE_CURSOR) {
11990 		ret = skl_update_scaler_plane(
11991 			to_intel_crtc_state(crtc_state),
11992 			to_intel_plane_state(plane_state));
11993 		if (ret)
11994 			return ret;
11995 	}
11996 
11997 	was_visible = old_plane_state->visible;
11998 	visible = to_intel_plane_state(plane_state)->visible;
11999 
12000 	if (!was_crtc_enabled && WARN_ON(was_visible))
12001 		was_visible = false;
12002 
12003 	/*
12004 	 * Visibility is calculated as if the crtc was on, but
12005 	 * after scaler setup everything depends on it being off
12006 	 * when the crtc isn't active.
12007 	 *
12008 	 * FIXME this is wrong for watermarks. Watermarks should also
12009 	 * be computed as if the pipe would be active. Perhaps move
12010 	 * per-plane wm computation to the .check_plane() hook, and
12011 	 * only combine the results from all planes in the current place?
12012 	 */
12013 	if (!is_crtc_enabled)
12014 		to_intel_plane_state(plane_state)->visible = visible = false;
12015 
12016 	if (!was_visible && !visible)
12017 		return 0;
12018 
12019 	if (fb != old_plane_state->base.fb)
12020 		pipe_config->fb_changed = true;
12021 
12022 	turn_off = was_visible && (!visible || mode_changed);
12023 	turn_on = visible && (!was_visible || mode_changed);
12024 
12025 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
12026 			 intel_crtc->base.base.id,
12027 			 intel_crtc->base.name,
12028 			 plane->base.id, plane->name,
12029 			 fb ? fb->base.id : -1);
12030 
12031 	DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12032 			 plane->base.id, plane->name,
12033 			 was_visible, visible,
12034 			 turn_off, turn_on, mode_changed);
12035 
12036 	if (turn_on) {
12037 		pipe_config->update_wm_pre = true;
12038 
12039 		/* must disable cxsr around plane enable/disable */
12040 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
12041 			pipe_config->disable_cxsr = true;
12042 	} else if (turn_off) {
12043 		pipe_config->update_wm_post = true;
12044 
12045 		/* must disable cxsr around plane enable/disable */
12046 		if (plane->type != DRM_PLANE_TYPE_CURSOR)
12047 			pipe_config->disable_cxsr = true;
12048 	} else if (intel_wm_need_update(plane, plane_state)) {
12049 		/* FIXME bollocks */
12050 		pipe_config->update_wm_pre = true;
12051 		pipe_config->update_wm_post = true;
12052 	}
12053 
12054 	/* Pre-gen9 platforms need two-step watermark updates */
12055 	if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
12056 	    INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
12057 		to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
12058 
12059 	if (visible || was_visible)
12060 		pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
12061 
12062 	/*
12063 	 * WaCxSRDisabledForSpriteScaling:ivb
12064 	 *
12065 	 * cstate->update_wm was already set above, so this flag will
12066 	 * take effect when we commit and program watermarks.
12067 	 */
12068 	if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
12069 	    needs_scaling(to_intel_plane_state(plane_state)) &&
12070 	    !needs_scaling(old_plane_state))
12071 		pipe_config->disable_lp_wm = true;
12072 
12073 	return 0;
12074 }
12075 
12076 static bool encoders_cloneable(const struct intel_encoder *a,
12077 			       const struct intel_encoder *b)
12078 {
12079 	/* masks could be asymmetric, so check both ways */
12080 	return a == b || (a->cloneable & (1 << b->type) &&
12081 			  b->cloneable & (1 << a->type));
12082 }
12083 
12084 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12085 					 struct intel_crtc *crtc,
12086 					 struct intel_encoder *encoder)
12087 {
12088 	struct intel_encoder *source_encoder;
12089 	struct drm_connector *connector;
12090 	struct drm_connector_state *connector_state;
12091 	int i;
12092 
12093 	for_each_connector_in_state(state, connector, connector_state, i) {
12094 		if (connector_state->crtc != &crtc->base)
12095 			continue;
12096 
12097 		source_encoder =
12098 			to_intel_encoder(connector_state->best_encoder);
12099 		if (!encoders_cloneable(encoder, source_encoder))
12100 			return false;
12101 	}
12102 
12103 	return true;
12104 }
12105 
12106 static bool check_encoder_cloning(struct drm_atomic_state *state,
12107 				  struct intel_crtc *crtc)
12108 {
12109 	struct intel_encoder *encoder;
12110 	struct drm_connector *connector;
12111 	struct drm_connector_state *connector_state;
12112 	int i;
12113 
12114 	for_each_connector_in_state(state, connector, connector_state, i) {
12115 		if (connector_state->crtc != &crtc->base)
12116 			continue;
12117 
12118 		encoder = to_intel_encoder(connector_state->best_encoder);
12119 		if (!check_single_encoder_cloning(state, crtc, encoder))
12120 			return false;
12121 	}
12122 
12123 	return true;
12124 }
12125 
12126 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12127 				   struct drm_crtc_state *crtc_state)
12128 {
12129 	struct drm_device *dev = crtc->dev;
12130 	struct drm_i915_private *dev_priv = dev->dev_private;
12131 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12132 	struct intel_crtc_state *pipe_config =
12133 		to_intel_crtc_state(crtc_state);
12134 	struct drm_atomic_state *state = crtc_state->state;
12135 	int ret;
12136 	bool mode_changed = needs_modeset(crtc_state);
12137 
12138 	if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12139 		DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12140 		return -EINVAL;
12141 	}
12142 
12143 	if (mode_changed && !crtc_state->active)
12144 		pipe_config->update_wm_post = true;
12145 
12146 	if (mode_changed && crtc_state->enable &&
12147 	    dev_priv->display.crtc_compute_clock &&
12148 	    !WARN_ON(pipe_config->shared_dpll)) {
12149 		ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12150 							   pipe_config);
12151 		if (ret)
12152 			return ret;
12153 	}
12154 
12155 	if (crtc_state->color_mgmt_changed) {
12156 		ret = intel_color_check(crtc, crtc_state);
12157 		if (ret)
12158 			return ret;
12159 	}
12160 
12161 	ret = 0;
12162 	if (dev_priv->display.compute_pipe_wm) {
12163 		ret = dev_priv->display.compute_pipe_wm(pipe_config);
12164 		if (ret) {
12165 			DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12166 			return ret;
12167 		}
12168 	}
12169 
12170 	if (dev_priv->display.compute_intermediate_wm &&
12171 	    !to_intel_atomic_state(state)->skip_intermediate_wm) {
12172 		if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12173 			return 0;
12174 
12175 		/*
12176 		 * Calculate 'intermediate' watermarks that satisfy both the
12177 		 * old state and the new state.  We can program these
12178 		 * immediately.
12179 		 */
12180 		ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12181 								intel_crtc,
12182 								pipe_config);
12183 		if (ret) {
12184 			DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
12185 			return ret;
12186 		}
12187 	} else if (dev_priv->display.compute_intermediate_wm) {
12188 		if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12189 			pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12190 	}
12191 
12192 	if (INTEL_INFO(dev)->gen >= 9) {
12193 		if (mode_changed)
12194 			ret = skl_update_scaler_crtc(pipe_config);
12195 
12196 		if (!ret)
12197 			ret = intel_atomic_setup_scalers(dev, intel_crtc,
12198 							 pipe_config);
12199 	}
12200 
12201 	return ret;
12202 }
12203 
12204 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12205 	.mode_set_base_atomic = intel_pipe_set_base_atomic,
12206 	.atomic_begin = intel_begin_crtc_commit,
12207 	.atomic_flush = intel_finish_crtc_commit,
12208 	.atomic_check = intel_crtc_atomic_check,
12209 };
12210 
12211 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12212 {
12213 	struct intel_connector *connector;
12214 
12215 	for_each_intel_connector(dev, connector) {
12216 		if (connector->base.state->crtc)
12217 			drm_connector_unreference(&connector->base);
12218 
12219 		if (connector->base.encoder) {
12220 			connector->base.state->best_encoder =
12221 				connector->base.encoder;
12222 			connector->base.state->crtc =
12223 				connector->base.encoder->crtc;
12224 
12225 			drm_connector_reference(&connector->base);
12226 		} else {
12227 			connector->base.state->best_encoder = NULL;
12228 			connector->base.state->crtc = NULL;
12229 		}
12230 	}
12231 }
12232 
12233 static void
12234 connected_sink_compute_bpp(struct intel_connector *connector,
12235 			   struct intel_crtc_state *pipe_config)
12236 {
12237 	int bpp = pipe_config->pipe_bpp;
12238 
12239 	DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12240 		connector->base.base.id,
12241 		connector->base.name);
12242 
12243 	/* Don't use an invalid EDID bpc value */
12244 	if (connector->base.display_info.bpc &&
12245 	    connector->base.display_info.bpc * 3 < bpp) {
12246 		DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12247 			      bpp, connector->base.display_info.bpc*3);
12248 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12249 	}
12250 
12251 	/* Clamp bpp to 8 on screens without EDID 1.4 */
12252 	if (connector->base.display_info.bpc == 0 && bpp > 24) {
12253 		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
12254 			      bpp);
12255 		pipe_config->pipe_bpp = 24;
12256 	}
12257 }
12258 
12259 static int
12260 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12261 			  struct intel_crtc_state *pipe_config)
12262 {
12263 	struct drm_device *dev = crtc->base.dev;
12264 	struct drm_atomic_state *state;
12265 	struct drm_connector *connector;
12266 	struct drm_connector_state *connector_state;
12267 	int bpp, i;
12268 
12269 	if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12270 		bpp = 10*3;
12271 	else if (INTEL_INFO(dev)->gen >= 5)
12272 		bpp = 12*3;
12273 	else
12274 		bpp = 8*3;
12275 
12276 
12277 	pipe_config->pipe_bpp = bpp;
12278 
12279 	state = pipe_config->base.state;
12280 
12281 	/* Clamp display bpp to EDID value */
12282 	for_each_connector_in_state(state, connector, connector_state, i) {
12283 		if (connector_state->crtc != &crtc->base)
12284 			continue;
12285 
12286 		connected_sink_compute_bpp(to_intel_connector(connector),
12287 					   pipe_config);
12288 	}
12289 
12290 	return bpp;
12291 }
12292 
12293 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12294 {
12295 	DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12296 			"type: 0x%x flags: 0x%x\n",
12297 		mode->crtc_clock,
12298 		mode->crtc_hdisplay, mode->crtc_hsync_start,
12299 		mode->crtc_hsync_end, mode->crtc_htotal,
12300 		mode->crtc_vdisplay, mode->crtc_vsync_start,
12301 		mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12302 }
12303 
12304 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12305 				   struct intel_crtc_state *pipe_config,
12306 				   const char *context)
12307 {
12308 	struct drm_device *dev = crtc->base.dev;
12309 	struct drm_plane *plane;
12310 	struct intel_plane *intel_plane;
12311 	struct intel_plane_state *state;
12312 	struct drm_framebuffer *fb;
12313 
12314 	DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12315 		      crtc->base.base.id, crtc->base.name,
12316 		      context, pipe_config, pipe_name(crtc->pipe));
12317 
12318 	DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
12319 	DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12320 		      pipe_config->pipe_bpp, pipe_config->dither);
12321 	DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12322 		      pipe_config->has_pch_encoder,
12323 		      pipe_config->fdi_lanes,
12324 		      pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12325 		      pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12326 		      pipe_config->fdi_m_n.tu);
12327 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12328 		      pipe_config->has_dp_encoder,
12329 		      pipe_config->lane_count,
12330 		      pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12331 		      pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12332 		      pipe_config->dp_m_n.tu);
12333 
12334 	DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12335 		      pipe_config->has_dp_encoder,
12336 		      pipe_config->lane_count,
12337 		      pipe_config->dp_m2_n2.gmch_m,
12338 		      pipe_config->dp_m2_n2.gmch_n,
12339 		      pipe_config->dp_m2_n2.link_m,
12340 		      pipe_config->dp_m2_n2.link_n,
12341 		      pipe_config->dp_m2_n2.tu);
12342 
12343 	DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12344 		      pipe_config->has_audio,
12345 		      pipe_config->has_infoframe);
12346 
12347 	DRM_DEBUG_KMS("requested mode:\n");
12348 	drm_mode_debug_printmodeline(&pipe_config->base.mode);
12349 	DRM_DEBUG_KMS("adjusted mode:\n");
12350 	drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12351 	intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12352 	DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12353 	DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12354 		      pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12355 	DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12356 		      crtc->num_scalers,
12357 		      pipe_config->scaler_state.scaler_users,
12358 		      pipe_config->scaler_state.scaler_id);
12359 	DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12360 		      pipe_config->gmch_pfit.control,
12361 		      pipe_config->gmch_pfit.pgm_ratios,
12362 		      pipe_config->gmch_pfit.lvds_border_bits);
12363 	DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12364 		      pipe_config->pch_pfit.pos,
12365 		      pipe_config->pch_pfit.size,
12366 		      pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12367 	DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12368 	DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12369 
12370 	if (IS_BROXTON(dev)) {
12371 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12372 			      "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12373 			      "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12374 			      pipe_config->ddi_pll_sel,
12375 			      pipe_config->dpll_hw_state.ebb0,
12376 			      pipe_config->dpll_hw_state.ebb4,
12377 			      pipe_config->dpll_hw_state.pll0,
12378 			      pipe_config->dpll_hw_state.pll1,
12379 			      pipe_config->dpll_hw_state.pll2,
12380 			      pipe_config->dpll_hw_state.pll3,
12381 			      pipe_config->dpll_hw_state.pll6,
12382 			      pipe_config->dpll_hw_state.pll8,
12383 			      pipe_config->dpll_hw_state.pll9,
12384 			      pipe_config->dpll_hw_state.pll10,
12385 			      pipe_config->dpll_hw_state.pcsdw12);
12386 	} else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12387 		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12388 			      "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12389 			      pipe_config->ddi_pll_sel,
12390 			      pipe_config->dpll_hw_state.ctrl1,
12391 			      pipe_config->dpll_hw_state.cfgcr1,
12392 			      pipe_config->dpll_hw_state.cfgcr2);
12393 	} else if (HAS_DDI(dev)) {
12394 		DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12395 			      pipe_config->ddi_pll_sel,
12396 			      pipe_config->dpll_hw_state.wrpll,
12397 			      pipe_config->dpll_hw_state.spll);
12398 	} else {
12399 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12400 			      "fp0: 0x%x, fp1: 0x%x\n",
12401 			      pipe_config->dpll_hw_state.dpll,
12402 			      pipe_config->dpll_hw_state.dpll_md,
12403 			      pipe_config->dpll_hw_state.fp0,
12404 			      pipe_config->dpll_hw_state.fp1);
12405 	}
12406 
12407 	DRM_DEBUG_KMS("planes on this crtc\n");
12408 	list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12409 		intel_plane = to_intel_plane(plane);
12410 		if (intel_plane->pipe != crtc->pipe)
12411 			continue;
12412 
12413 		state = to_intel_plane_state(plane->state);
12414 		fb = state->base.fb;
12415 		if (!fb) {
12416 			DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12417 				      plane->base.id, plane->name, state->scaler_id);
12418 			continue;
12419 		}
12420 
12421 		DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12422 			      plane->base.id, plane->name);
12423 		DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12424 			      fb->base.id, fb->width, fb->height,
12425 			      drm_get_format_name(fb->pixel_format));
12426 		DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12427 			      state->scaler_id,
12428 			      state->src.x1 >> 16, state->src.y1 >> 16,
12429 			      drm_rect_width(&state->src) >> 16,
12430 			      drm_rect_height(&state->src) >> 16,
12431 			      state->dst.x1, state->dst.y1,
12432 			      drm_rect_width(&state->dst),
12433 			      drm_rect_height(&state->dst));
12434 	}
12435 }
12436 
12437 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12438 {
12439 	struct drm_device *dev = state->dev;
12440 	struct drm_connector *connector;
12441 	unsigned int used_ports = 0;
12442 
12443 	/*
12444 	 * Walk the connector list instead of the encoder
12445 	 * list to detect the problem on ddi platforms
12446 	 * where there's just one encoder per digital port.
12447 	 */
12448 	drm_for_each_connector(connector, dev) {
12449 		struct drm_connector_state *connector_state;
12450 		struct intel_encoder *encoder;
12451 
12452 		connector_state = drm_atomic_get_existing_connector_state(state, connector);
12453 		if (!connector_state)
12454 			connector_state = connector->state;
12455 
12456 		if (!connector_state->best_encoder)
12457 			continue;
12458 
12459 		encoder = to_intel_encoder(connector_state->best_encoder);
12460 
12461 		WARN_ON(!connector_state->crtc);
12462 
12463 		switch (encoder->type) {
12464 			unsigned int port_mask;
12465 		case INTEL_OUTPUT_UNKNOWN:
12466 			if (WARN_ON(!HAS_DDI(dev)))
12467 				break;
12468 		case INTEL_OUTPUT_DISPLAYPORT:
12469 		case INTEL_OUTPUT_HDMI:
12470 		case INTEL_OUTPUT_EDP:
12471 			port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12472 
12473 			/* the same port mustn't appear more than once */
12474 			if (used_ports & port_mask)
12475 				return false;
12476 
12477 			used_ports |= port_mask;
12478 		default:
12479 			break;
12480 		}
12481 	}
12482 
12483 	return true;
12484 }
12485 
12486 static void
12487 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12488 {
12489 	struct drm_crtc_state tmp_state;
12490 	struct intel_crtc_scaler_state scaler_state;
12491 	struct intel_dpll_hw_state dpll_hw_state;
12492 	struct intel_shared_dpll *shared_dpll;
12493 	uint32_t ddi_pll_sel;
12494 	bool force_thru;
12495 
12496 	/* FIXME: before the switch to atomic started, a new pipe_config was
12497 	 * kzalloc'd. Code that depends on any field being zero should be
12498 	 * fixed, so that the crtc_state can be safely duplicated. For now,
12499 	 * only fields that are know to not cause problems are preserved. */
12500 
12501 	tmp_state = crtc_state->base;
12502 	scaler_state = crtc_state->scaler_state;
12503 	shared_dpll = crtc_state->shared_dpll;
12504 	dpll_hw_state = crtc_state->dpll_hw_state;
12505 	ddi_pll_sel = crtc_state->ddi_pll_sel;
12506 	force_thru = crtc_state->pch_pfit.force_thru;
12507 
12508 	memset(crtc_state, 0, sizeof *crtc_state);
12509 
12510 	crtc_state->base = tmp_state;
12511 	crtc_state->scaler_state = scaler_state;
12512 	crtc_state->shared_dpll = shared_dpll;
12513 	crtc_state->dpll_hw_state = dpll_hw_state;
12514 	crtc_state->ddi_pll_sel = ddi_pll_sel;
12515 	crtc_state->pch_pfit.force_thru = force_thru;
12516 }
12517 
12518 static int
12519 intel_modeset_pipe_config(struct drm_crtc *crtc,
12520 			  struct intel_crtc_state *pipe_config)
12521 {
12522 	struct drm_atomic_state *state = pipe_config->base.state;
12523 	struct intel_encoder *encoder;
12524 	struct drm_connector *connector;
12525 	struct drm_connector_state *connector_state;
12526 	int base_bpp, ret = -EINVAL;
12527 	int i;
12528 	bool retry = true;
12529 
12530 	clear_intel_crtc_state(pipe_config);
12531 
12532 	pipe_config->cpu_transcoder =
12533 		(enum transcoder) to_intel_crtc(crtc)->pipe;
12534 
12535 	/*
12536 	 * Sanitize sync polarity flags based on requested ones. If neither
12537 	 * positive or negative polarity is requested, treat this as meaning
12538 	 * negative polarity.
12539 	 */
12540 	if (!(pipe_config->base.adjusted_mode.flags &
12541 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12542 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12543 
12544 	if (!(pipe_config->base.adjusted_mode.flags &
12545 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12546 		pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12547 
12548 	base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12549 					     pipe_config);
12550 	if (base_bpp < 0)
12551 		goto fail;
12552 
12553 	/*
12554 	 * Determine the real pipe dimensions. Note that stereo modes can
12555 	 * increase the actual pipe size due to the frame doubling and
12556 	 * insertion of additional space for blanks between the frame. This
12557 	 * is stored in the crtc timings. We use the requested mode to do this
12558 	 * computation to clearly distinguish it from the adjusted mode, which
12559 	 * can be changed by the connectors in the below retry loop.
12560 	 */
12561 	drm_crtc_get_hv_timing(&pipe_config->base.mode,
12562 			       &pipe_config->pipe_src_w,
12563 			       &pipe_config->pipe_src_h);
12564 
12565 encoder_retry:
12566 	/* Ensure the port clock defaults are reset when retrying. */
12567 	pipe_config->port_clock = 0;
12568 	pipe_config->pixel_multiplier = 1;
12569 
12570 	/* Fill in default crtc timings, allow encoders to overwrite them. */
12571 	drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12572 			      CRTC_STEREO_DOUBLE);
12573 
12574 	/* Pass our mode to the connectors and the CRTC to give them a chance to
12575 	 * adjust it according to limitations or connector properties, and also
12576 	 * a chance to reject the mode entirely.
12577 	 */
12578 	for_each_connector_in_state(state, connector, connector_state, i) {
12579 		if (connector_state->crtc != crtc)
12580 			continue;
12581 
12582 		encoder = to_intel_encoder(connector_state->best_encoder);
12583 
12584 		if (!(encoder->compute_config(encoder, pipe_config))) {
12585 			DRM_DEBUG_KMS("Encoder config failure\n");
12586 			goto fail;
12587 		}
12588 	}
12589 
12590 	/* Set default port clock if not overwritten by the encoder. Needs to be
12591 	 * done afterwards in case the encoder adjusts the mode. */
12592 	if (!pipe_config->port_clock)
12593 		pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12594 			* pipe_config->pixel_multiplier;
12595 
12596 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12597 	if (ret < 0) {
12598 		DRM_DEBUG_KMS("CRTC fixup failed\n");
12599 		goto fail;
12600 	}
12601 
12602 	if (ret == RETRY) {
12603 		if (WARN(!retry, "loop in pipe configuration computation\n")) {
12604 			ret = -EINVAL;
12605 			goto fail;
12606 		}
12607 
12608 		DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12609 		retry = false;
12610 		goto encoder_retry;
12611 	}
12612 
12613 	/* Dithering seems to not pass-through bits correctly when it should, so
12614 	 * only enable it on 6bpc panels. */
12615 	pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12616 	DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12617 		      base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12618 
12619 fail:
12620 	return ret;
12621 }
12622 
12623 static void
12624 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12625 {
12626 	struct drm_crtc *crtc;
12627 	struct drm_crtc_state *crtc_state;
12628 	int i;
12629 
12630 	/* Double check state. */
12631 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
12632 		to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12633 
12634 		/* Update hwmode for vblank functions */
12635 		if (crtc->state->active)
12636 			crtc->hwmode = crtc->state->adjusted_mode;
12637 		else
12638 			crtc->hwmode.crtc_clock = 0;
12639 
12640 		/*
12641 		 * Update legacy state to satisfy fbc code. This can
12642 		 * be removed when fbc uses the atomic state.
12643 		 */
12644 		if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12645 			struct drm_plane_state *plane_state = crtc->primary->state;
12646 
12647 			crtc->primary->fb = plane_state->fb;
12648 			crtc->x = plane_state->src_x >> 16;
12649 			crtc->y = plane_state->src_y >> 16;
12650 		}
12651 	}
12652 }
12653 
12654 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12655 {
12656 	int diff;
12657 
12658 	if (clock1 == clock2)
12659 		return true;
12660 
12661 	if (!clock1 || !clock2)
12662 		return false;
12663 
12664 	diff = abs(clock1 - clock2);
12665 
12666 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12667 		return true;
12668 
12669 	return false;
12670 }
12671 
12672 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12673 	list_for_each_entry((intel_crtc), \
12674 			    &(dev)->mode_config.crtc_list, \
12675 			    base.head) \
12676 		for_each_if (mask & (1 <<(intel_crtc)->pipe))
12677 
12678 static bool
12679 intel_compare_m_n(unsigned int m, unsigned int n,
12680 		  unsigned int m2, unsigned int n2,
12681 		  bool exact)
12682 {
12683 	if (m == m2 && n == n2)
12684 		return true;
12685 
12686 	if (exact || !m || !n || !m2 || !n2)
12687 		return false;
12688 
12689 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12690 
12691 	if (n > n2) {
12692 		while (n > n2) {
12693 			m2 <<= 1;
12694 			n2 <<= 1;
12695 		}
12696 	} else if (n < n2) {
12697 		while (n < n2) {
12698 			m <<= 1;
12699 			n <<= 1;
12700 		}
12701 	}
12702 
12703 	if (n != n2)
12704 		return false;
12705 
12706 	return intel_fuzzy_clock_check(m, m2);
12707 }
12708 
12709 static bool
12710 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12711 		       struct intel_link_m_n *m2_n2,
12712 		       bool adjust)
12713 {
12714 	if (m_n->tu == m2_n2->tu &&
12715 	    intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12716 			      m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12717 	    intel_compare_m_n(m_n->link_m, m_n->link_n,
12718 			      m2_n2->link_m, m2_n2->link_n, !adjust)) {
12719 		if (adjust)
12720 			*m2_n2 = *m_n;
12721 
12722 		return true;
12723 	}
12724 
12725 	return false;
12726 }
12727 
12728 static bool
12729 intel_pipe_config_compare(struct drm_device *dev,
12730 			  struct intel_crtc_state *current_config,
12731 			  struct intel_crtc_state *pipe_config,
12732 			  bool adjust)
12733 {
12734 	bool ret = true;
12735 
12736 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12737 	do { \
12738 		if (!adjust) \
12739 			DRM_ERROR(fmt, ##__VA_ARGS__); \
12740 		else \
12741 			DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12742 	} while (0)
12743 
12744 #define PIPE_CONF_CHECK_X(name)	\
12745 	if (current_config->name != pipe_config->name) { \
12746 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12747 			  "(expected 0x%08x, found 0x%08x)\n", \
12748 			  current_config->name, \
12749 			  pipe_config->name); \
12750 		ret = false; \
12751 	}
12752 
12753 #define PIPE_CONF_CHECK_I(name)	\
12754 	if (current_config->name != pipe_config->name) { \
12755 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12756 			  "(expected %i, found %i)\n", \
12757 			  current_config->name, \
12758 			  pipe_config->name); \
12759 		ret = false; \
12760 	}
12761 
12762 #define PIPE_CONF_CHECK_P(name)	\
12763 	if (current_config->name != pipe_config->name) { \
12764 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12765 			  "(expected %p, found %p)\n", \
12766 			  current_config->name, \
12767 			  pipe_config->name); \
12768 		ret = false; \
12769 	}
12770 
12771 #define PIPE_CONF_CHECK_M_N(name) \
12772 	if (!intel_compare_link_m_n(&current_config->name, \
12773 				    &pipe_config->name,\
12774 				    adjust)) { \
12775 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12776 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12777 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12778 			  current_config->name.tu, \
12779 			  current_config->name.gmch_m, \
12780 			  current_config->name.gmch_n, \
12781 			  current_config->name.link_m, \
12782 			  current_config->name.link_n, \
12783 			  pipe_config->name.tu, \
12784 			  pipe_config->name.gmch_m, \
12785 			  pipe_config->name.gmch_n, \
12786 			  pipe_config->name.link_m, \
12787 			  pipe_config->name.link_n); \
12788 		ret = false; \
12789 	}
12790 
12791 /* This is required for BDW+ where there is only one set of registers for
12792  * switching between high and low RR.
12793  * This macro can be used whenever a comparison has to be made between one
12794  * hw state and multiple sw state variables.
12795  */
12796 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12797 	if (!intel_compare_link_m_n(&current_config->name, \
12798 				    &pipe_config->name, adjust) && \
12799 	    !intel_compare_link_m_n(&current_config->alt_name, \
12800 				    &pipe_config->name, adjust)) { \
12801 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12802 			  "(expected tu %i gmch %i/%i link %i/%i, " \
12803 			  "or tu %i gmch %i/%i link %i/%i, " \
12804 			  "found tu %i, gmch %i/%i link %i/%i)\n", \
12805 			  current_config->name.tu, \
12806 			  current_config->name.gmch_m, \
12807 			  current_config->name.gmch_n, \
12808 			  current_config->name.link_m, \
12809 			  current_config->name.link_n, \
12810 			  current_config->alt_name.tu, \
12811 			  current_config->alt_name.gmch_m, \
12812 			  current_config->alt_name.gmch_n, \
12813 			  current_config->alt_name.link_m, \
12814 			  current_config->alt_name.link_n, \
12815 			  pipe_config->name.tu, \
12816 			  pipe_config->name.gmch_m, \
12817 			  pipe_config->name.gmch_n, \
12818 			  pipe_config->name.link_m, \
12819 			  pipe_config->name.link_n); \
12820 		ret = false; \
12821 	}
12822 
12823 #define PIPE_CONF_CHECK_FLAGS(name, mask)	\
12824 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
12825 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12826 			  "(expected %i, found %i)\n", \
12827 			  current_config->name & (mask), \
12828 			  pipe_config->name & (mask)); \
12829 		ret = false; \
12830 	}
12831 
12832 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12833 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12834 		INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12835 			  "(expected %i, found %i)\n", \
12836 			  current_config->name, \
12837 			  pipe_config->name); \
12838 		ret = false; \
12839 	}
12840 
12841 #define PIPE_CONF_QUIRK(quirk)	\
12842 	((current_config->quirks | pipe_config->quirks) & (quirk))
12843 
12844 	PIPE_CONF_CHECK_I(cpu_transcoder);
12845 
12846 	PIPE_CONF_CHECK_I(has_pch_encoder);
12847 	PIPE_CONF_CHECK_I(fdi_lanes);
12848 	PIPE_CONF_CHECK_M_N(fdi_m_n);
12849 
12850 	PIPE_CONF_CHECK_I(has_dp_encoder);
12851 	PIPE_CONF_CHECK_I(lane_count);
12852 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12853 
12854 	if (INTEL_INFO(dev)->gen < 8) {
12855 		PIPE_CONF_CHECK_M_N(dp_m_n);
12856 
12857 		if (current_config->has_drrs)
12858 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
12859 	} else
12860 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12861 
12862 	PIPE_CONF_CHECK_I(has_dsi_encoder);
12863 
12864 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12865 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12866 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12867 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12868 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12869 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12870 
12871 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12872 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12873 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12874 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12875 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12876 	PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12877 
12878 	PIPE_CONF_CHECK_I(pixel_multiplier);
12879 	PIPE_CONF_CHECK_I(has_hdmi_sink);
12880 	if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12881 	    IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12882 		PIPE_CONF_CHECK_I(limited_color_range);
12883 	PIPE_CONF_CHECK_I(has_infoframe);
12884 
12885 	PIPE_CONF_CHECK_I(has_audio);
12886 
12887 	PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12888 			      DRM_MODE_FLAG_INTERLACE);
12889 
12890 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12891 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12892 				      DRM_MODE_FLAG_PHSYNC);
12893 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12894 				      DRM_MODE_FLAG_NHSYNC);
12895 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12896 				      DRM_MODE_FLAG_PVSYNC);
12897 		PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12898 				      DRM_MODE_FLAG_NVSYNC);
12899 	}
12900 
12901 	PIPE_CONF_CHECK_X(gmch_pfit.control);
12902 	/* pfit ratios are autocomputed by the hw on gen4+ */
12903 	if (INTEL_INFO(dev)->gen < 4)
12904 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12905 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12906 
12907 	if (!adjust) {
12908 		PIPE_CONF_CHECK_I(pipe_src_w);
12909 		PIPE_CONF_CHECK_I(pipe_src_h);
12910 
12911 		PIPE_CONF_CHECK_I(pch_pfit.enabled);
12912 		if (current_config->pch_pfit.enabled) {
12913 			PIPE_CONF_CHECK_X(pch_pfit.pos);
12914 			PIPE_CONF_CHECK_X(pch_pfit.size);
12915 		}
12916 
12917 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12918 	}
12919 
12920 	/* BDW+ don't expose a synchronous way to read the state */
12921 	if (IS_HASWELL(dev))
12922 		PIPE_CONF_CHECK_I(ips_enabled);
12923 
12924 	PIPE_CONF_CHECK_I(double_wide);
12925 
12926 	PIPE_CONF_CHECK_X(ddi_pll_sel);
12927 
12928 	PIPE_CONF_CHECK_P(shared_dpll);
12929 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12930 	PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12931 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12932 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12933 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12934 	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12935 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12936 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12937 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12938 
12939 	PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12940 	PIPE_CONF_CHECK_X(dsi_pll.div);
12941 
12942 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12943 		PIPE_CONF_CHECK_I(pipe_bpp);
12944 
12945 	PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12946 	PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12947 
12948 #undef PIPE_CONF_CHECK_X
12949 #undef PIPE_CONF_CHECK_I
12950 #undef PIPE_CONF_CHECK_P
12951 #undef PIPE_CONF_CHECK_FLAGS
12952 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12953 #undef PIPE_CONF_QUIRK
12954 #undef INTEL_ERR_OR_DBG_KMS
12955 
12956 	return ret;
12957 }
12958 
12959 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12960 					   const struct intel_crtc_state *pipe_config)
12961 {
12962 	if (pipe_config->has_pch_encoder) {
12963 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12964 							    &pipe_config->fdi_m_n);
12965 		int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12966 
12967 		/*
12968 		 * FDI already provided one idea for the dotclock.
12969 		 * Yell if the encoder disagrees.
12970 		 */
12971 		WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12972 		     "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12973 		     fdi_dotclock, dotclock);
12974 	}
12975 }
12976 
12977 static void verify_wm_state(struct drm_crtc *crtc,
12978 			    struct drm_crtc_state *new_state)
12979 {
12980 	struct drm_device *dev = crtc->dev;
12981 	struct drm_i915_private *dev_priv = dev->dev_private;
12982 	struct skl_ddb_allocation hw_ddb, *sw_ddb;
12983 	struct skl_ddb_entry *hw_entry, *sw_entry;
12984 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12985 	const enum i915_pipe pipe = intel_crtc->pipe;
12986 	int plane;
12987 
12988 	if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
12989 		return;
12990 
12991 	skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12992 	sw_ddb = &dev_priv->wm.skl_hw.ddb;
12993 
12994 	/* planes */
12995 	for_each_plane(dev_priv, pipe, plane) {
12996 		hw_entry = &hw_ddb.plane[pipe][plane];
12997 		sw_entry = &sw_ddb->plane[pipe][plane];
12998 
12999 		if (skl_ddb_entry_equal(hw_entry, sw_entry))
13000 			continue;
13001 
13002 		DRM_ERROR("mismatch in DDB state pipe %c plane %d "
13003 			  "(expected (%u,%u), found (%u,%u))\n",
13004 			  pipe_name(pipe), plane + 1,
13005 			  sw_entry->start, sw_entry->end,
13006 			  hw_entry->start, hw_entry->end);
13007 	}
13008 
13009 	/* cursor */
13010 	hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
13011 	sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
13012 
13013 	if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
13014 		DRM_ERROR("mismatch in DDB state pipe %c cursor "
13015 			  "(expected (%u,%u), found (%u,%u))\n",
13016 			  pipe_name(pipe),
13017 			  sw_entry->start, sw_entry->end,
13018 			  hw_entry->start, hw_entry->end);
13019 	}
13020 }
13021 
13022 static void
13023 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
13024 {
13025 	struct drm_connector *connector;
13026 
13027 	drm_for_each_connector(connector, dev) {
13028 		struct drm_encoder *encoder = connector->encoder;
13029 		struct drm_connector_state *state = connector->state;
13030 
13031 		if (state->crtc != crtc)
13032 			continue;
13033 
13034 		intel_connector_verify_state(to_intel_connector(connector));
13035 
13036 		I915_STATE_WARN(state->best_encoder != encoder,
13037 		     "connector's atomic encoder doesn't match legacy encoder\n");
13038 	}
13039 }
13040 
13041 static void
13042 verify_encoder_state(struct drm_device *dev)
13043 {
13044 	struct intel_encoder *encoder;
13045 	struct intel_connector *connector;
13046 
13047 	for_each_intel_encoder(dev, encoder) {
13048 		bool enabled = false;
13049 		enum i915_pipe pipe;
13050 
13051 		DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13052 			      encoder->base.base.id,
13053 			      encoder->base.name);
13054 
13055 		for_each_intel_connector(dev, connector) {
13056 			if (connector->base.state->best_encoder != &encoder->base)
13057 				continue;
13058 			enabled = true;
13059 
13060 			I915_STATE_WARN(connector->base.state->crtc !=
13061 					encoder->base.crtc,
13062 			     "connector's crtc doesn't match encoder crtc\n");
13063 		}
13064 
13065 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
13066 		     "encoder's enabled state mismatch "
13067 		     "(expected %i, found %i)\n",
13068 		     !!encoder->base.crtc, enabled);
13069 
13070 		if (!encoder->base.crtc) {
13071 			bool active;
13072 
13073 			active = encoder->get_hw_state(encoder, &pipe);
13074 			I915_STATE_WARN(active,
13075 			     "encoder detached but still enabled on pipe %c.\n",
13076 			     pipe_name(pipe));
13077 		}
13078 	}
13079 }
13080 
13081 static void
13082 verify_crtc_state(struct drm_crtc *crtc,
13083 		  struct drm_crtc_state *old_crtc_state,
13084 		  struct drm_crtc_state *new_crtc_state)
13085 {
13086 	struct drm_device *dev = crtc->dev;
13087 	struct drm_i915_private *dev_priv = dev->dev_private;
13088 	struct intel_encoder *encoder;
13089 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13090 	struct intel_crtc_state *pipe_config, *sw_config;
13091 	struct drm_atomic_state *old_state;
13092 	bool active;
13093 
13094 	old_state = old_crtc_state->state;
13095 	__drm_atomic_helper_crtc_destroy_state(old_crtc_state);
13096 	pipe_config = to_intel_crtc_state(old_crtc_state);
13097 	memset(pipe_config, 0, sizeof(*pipe_config));
13098 	pipe_config->base.crtc = crtc;
13099 	pipe_config->base.state = old_state;
13100 
13101 	DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
13102 
13103 	active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
13104 
13105 	/* hw state is inconsistent with the pipe quirk */
13106 	if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
13107 	    (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
13108 		active = new_crtc_state->active;
13109 
13110 	I915_STATE_WARN(new_crtc_state->active != active,
13111 	     "crtc active state doesn't match with hw state "
13112 	     "(expected %i, found %i)\n", new_crtc_state->active, active);
13113 
13114 	I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
13115 	     "transitional active state does not match atomic hw state "
13116 	     "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
13117 
13118 	for_each_encoder_on_crtc(dev, crtc, encoder) {
13119 		enum i915_pipe pipe;
13120 
13121 		active = encoder->get_hw_state(encoder, &pipe);
13122 		I915_STATE_WARN(active != new_crtc_state->active,
13123 			"[ENCODER:%i] active %i with crtc active %i\n",
13124 			encoder->base.base.id, active, new_crtc_state->active);
13125 
13126 		I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13127 				"Encoder connected to wrong pipe %c\n",
13128 				pipe_name(pipe));
13129 
13130 		if (active)
13131 			encoder->get_config(encoder, pipe_config);
13132 	}
13133 
13134 	if (!new_crtc_state->active)
13135 		return;
13136 
13137 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
13138 
13139 	sw_config = to_intel_crtc_state(crtc->state);
13140 	if (!intel_pipe_config_compare(dev, sw_config,
13141 				       pipe_config, false)) {
13142 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
13143 		intel_dump_pipe_config(intel_crtc, pipe_config,
13144 				       "[hw state]");
13145 		intel_dump_pipe_config(intel_crtc, sw_config,
13146 				       "[sw state]");
13147 	}
13148 }
13149 
13150 static void
13151 verify_single_dpll_state(struct drm_i915_private *dev_priv,
13152 			 struct intel_shared_dpll *pll,
13153 			 struct drm_crtc *crtc,
13154 			 struct drm_crtc_state *new_state)
13155 {
13156 	struct intel_dpll_hw_state dpll_hw_state;
13157 	unsigned crtc_mask;
13158 	bool active;
13159 
13160 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13161 
13162 	DRM_DEBUG_KMS("%s\n", pll->name);
13163 
13164 	active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
13165 
13166 	if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13167 		I915_STATE_WARN(!pll->on && pll->active_mask,
13168 		     "pll in active use but not on in sw tracking\n");
13169 		I915_STATE_WARN(pll->on && !pll->active_mask,
13170 		     "pll is on but not used by any active crtc\n");
13171 		I915_STATE_WARN(pll->on != active,
13172 		     "pll on state mismatch (expected %i, found %i)\n",
13173 		     pll->on, active);
13174 	}
13175 
13176 	if (!crtc) {
13177 		I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
13178 				"more active pll users than references: %x vs %x\n",
13179 				pll->active_mask, pll->config.crtc_mask);
13180 
13181 		return;
13182 	}
13183 
13184 	crtc_mask = 1 << drm_crtc_index(crtc);
13185 
13186 	if (new_state->active)
13187 		I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13188 				"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13189 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13190 	else
13191 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13192 				"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13193 				pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13194 
13195 	I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13196 			"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13197 			crtc_mask, pll->config.crtc_mask);
13198 
13199 	I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13200 					  &dpll_hw_state,
13201 					  sizeof(dpll_hw_state)),
13202 			"pll hw state mismatch\n");
13203 }
13204 
13205 static void
13206 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13207 			 struct drm_crtc_state *old_crtc_state,
13208 			 struct drm_crtc_state *new_crtc_state)
13209 {
13210 	struct drm_i915_private *dev_priv = dev->dev_private;
13211 	struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13212 	struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13213 
13214 	if (new_state->shared_dpll)
13215 		verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
13216 
13217 	if (old_state->shared_dpll &&
13218 	    old_state->shared_dpll != new_state->shared_dpll) {
13219 		unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13220 		struct intel_shared_dpll *pll = old_state->shared_dpll;
13221 
13222 		I915_STATE_WARN(pll->active_mask & crtc_mask,
13223 				"pll active mismatch (didn't expect pipe %c in active mask)\n",
13224 				pipe_name(drm_crtc_index(crtc)));
13225 		I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13226 				"pll enabled crtcs mismatch (found %x in enabled mask)\n",
13227 				pipe_name(drm_crtc_index(crtc)));
13228 	}
13229 }
13230 
13231 static void
13232 intel_modeset_verify_crtc(struct drm_crtc *crtc,
13233 			 struct drm_crtc_state *old_state,
13234 			 struct drm_crtc_state *new_state)
13235 {
13236 	if (!needs_modeset(new_state) &&
13237 	    !to_intel_crtc_state(new_state)->update_pipe)
13238 		return;
13239 
13240 	verify_wm_state(crtc, new_state);
13241 	verify_connector_state(crtc->dev, crtc);
13242 	verify_crtc_state(crtc, old_state, new_state);
13243 	verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
13244 }
13245 
13246 static void
13247 verify_disabled_dpll_state(struct drm_device *dev)
13248 {
13249 	struct drm_i915_private *dev_priv = dev->dev_private;
13250 	int i;
13251 
13252 	for (i = 0; i < dev_priv->num_shared_dpll; i++)
13253 		verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
13254 }
13255 
13256 static void
13257 intel_modeset_verify_disabled(struct drm_device *dev)
13258 {
13259 	verify_encoder_state(dev);
13260 	verify_connector_state(dev, NULL);
13261 	verify_disabled_dpll_state(dev);
13262 }
13263 
13264 static void update_scanline_offset(struct intel_crtc *crtc)
13265 {
13266 	struct drm_device *dev = crtc->base.dev;
13267 
13268 	/*
13269 	 * The scanline counter increments at the leading edge of hsync.
13270 	 *
13271 	 * On most platforms it starts counting from vtotal-1 on the
13272 	 * first active line. That means the scanline counter value is
13273 	 * always one less than what we would expect. Ie. just after
13274 	 * start of vblank, which also occurs at start of hsync (on the
13275 	 * last active line), the scanline counter will read vblank_start-1.
13276 	 *
13277 	 * On gen2 the scanline counter starts counting from 1 instead
13278 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13279 	 * to keep the value positive), instead of adding one.
13280 	 *
13281 	 * On HSW+ the behaviour of the scanline counter depends on the output
13282 	 * type. For DP ports it behaves like most other platforms, but on HDMI
13283 	 * there's an extra 1 line difference. So we need to add two instead of
13284 	 * one to the value.
13285 	 */
13286 	if (IS_GEN2(dev)) {
13287 		const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13288 		int vtotal;
13289 
13290 		vtotal = adjusted_mode->crtc_vtotal;
13291 		if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13292 			vtotal /= 2;
13293 
13294 		crtc->scanline_offset = vtotal - 1;
13295 	} else if (HAS_DDI(dev) &&
13296 		   intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13297 		crtc->scanline_offset = 2;
13298 	} else
13299 		crtc->scanline_offset = 1;
13300 }
13301 
13302 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13303 {
13304 	struct drm_device *dev = state->dev;
13305 	struct drm_i915_private *dev_priv = to_i915(dev);
13306 	struct intel_shared_dpll_config *shared_dpll = NULL;
13307 	struct drm_crtc *crtc;
13308 	struct drm_crtc_state *crtc_state;
13309 	int i;
13310 
13311 	if (!dev_priv->display.crtc_compute_clock)
13312 		return;
13313 
13314 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13315 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13316 		struct intel_shared_dpll *old_dpll =
13317 			to_intel_crtc_state(crtc->state)->shared_dpll;
13318 
13319 		if (!needs_modeset(crtc_state))
13320 			continue;
13321 
13322 		to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
13323 
13324 		if (!old_dpll)
13325 			continue;
13326 
13327 		if (!shared_dpll)
13328 			shared_dpll = intel_atomic_get_shared_dpll_state(state);
13329 
13330 		intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
13331 	}
13332 }
13333 
13334 /*
13335  * This implements the workaround described in the "notes" section of the mode
13336  * set sequence documentation. When going from no pipes or single pipe to
13337  * multiple pipes, and planes are enabled after the pipe, we need to wait at
13338  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13339  */
13340 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13341 {
13342 	struct drm_crtc_state *crtc_state;
13343 	struct intel_crtc *intel_crtc;
13344 	struct drm_crtc *crtc;
13345 	struct intel_crtc_state *first_crtc_state = NULL;
13346 	struct intel_crtc_state *other_crtc_state = NULL;
13347 	enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13348 	int i;
13349 
13350 	/* look at all crtc's that are going to be enabled in during modeset */
13351 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13352 		intel_crtc = to_intel_crtc(crtc);
13353 
13354 		if (!crtc_state->active || !needs_modeset(crtc_state))
13355 			continue;
13356 
13357 		if (first_crtc_state) {
13358 			other_crtc_state = to_intel_crtc_state(crtc_state);
13359 			break;
13360 		} else {
13361 			first_crtc_state = to_intel_crtc_state(crtc_state);
13362 			first_pipe = intel_crtc->pipe;
13363 		}
13364 	}
13365 
13366 	/* No workaround needed? */
13367 	if (!first_crtc_state)
13368 		return 0;
13369 
13370 	/* w/a possibly needed, check how many crtc's are already enabled. */
13371 	for_each_intel_crtc(state->dev, intel_crtc) {
13372 		struct intel_crtc_state *pipe_config;
13373 
13374 		pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13375 		if (IS_ERR(pipe_config))
13376 			return PTR_ERR(pipe_config);
13377 
13378 		pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13379 
13380 		if (!pipe_config->base.active ||
13381 		    needs_modeset(&pipe_config->base))
13382 			continue;
13383 
13384 		/* 2 or more enabled crtcs means no need for w/a */
13385 		if (enabled_pipe != INVALID_PIPE)
13386 			return 0;
13387 
13388 		enabled_pipe = intel_crtc->pipe;
13389 	}
13390 
13391 	if (enabled_pipe != INVALID_PIPE)
13392 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13393 	else if (other_crtc_state)
13394 		other_crtc_state->hsw_workaround_pipe = first_pipe;
13395 
13396 	return 0;
13397 }
13398 
13399 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13400 {
13401 	struct drm_crtc *crtc;
13402 	struct drm_crtc_state *crtc_state;
13403 	int ret = 0;
13404 
13405 	/* add all active pipes to the state */
13406 	for_each_crtc(state->dev, crtc) {
13407 		crtc_state = drm_atomic_get_crtc_state(state, crtc);
13408 		if (IS_ERR(crtc_state))
13409 			return PTR_ERR(crtc_state);
13410 
13411 		if (!crtc_state->active || needs_modeset(crtc_state))
13412 			continue;
13413 
13414 		crtc_state->mode_changed = true;
13415 
13416 		ret = drm_atomic_add_affected_connectors(state, crtc);
13417 		if (ret)
13418 			break;
13419 
13420 		ret = drm_atomic_add_affected_planes(state, crtc);
13421 		if (ret)
13422 			break;
13423 	}
13424 
13425 	return ret;
13426 }
13427 
13428 static int intel_modeset_checks(struct drm_atomic_state *state)
13429 {
13430 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13431 	struct drm_i915_private *dev_priv = state->dev->dev_private;
13432 	struct drm_crtc *crtc;
13433 	struct drm_crtc_state *crtc_state;
13434 	int ret = 0, i;
13435 
13436 	if (!check_digital_port_conflicts(state)) {
13437 		DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13438 		return -EINVAL;
13439 	}
13440 
13441 	intel_state->modeset = true;
13442 	intel_state->active_crtcs = dev_priv->active_crtcs;
13443 
13444 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13445 		if (crtc_state->active)
13446 			intel_state->active_crtcs |= 1 << i;
13447 		else
13448 			intel_state->active_crtcs &= ~(1 << i);
13449 
13450 		if (crtc_state->active != crtc->state->active)
13451 			intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13452 	}
13453 
13454 	/*
13455 	 * See if the config requires any additional preparation, e.g.
13456 	 * to adjust global state with pipes off.  We need to do this
13457 	 * here so we can get the modeset_pipe updated config for the new
13458 	 * mode set on this crtc.  For other crtcs we need to use the
13459 	 * adjusted_mode bits in the crtc directly.
13460 	 */
13461 	if (dev_priv->display.modeset_calc_cdclk) {
13462 		if (!intel_state->cdclk_pll_vco)
13463 			intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
13464 		if (!intel_state->cdclk_pll_vco)
13465 			intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
13466 
13467 		ret = dev_priv->display.modeset_calc_cdclk(state);
13468 		if (ret < 0)
13469 			return ret;
13470 
13471 		if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13472 		    intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
13473 			ret = intel_modeset_all_pipes(state);
13474 
13475 		if (ret < 0)
13476 			return ret;
13477 
13478 		DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13479 			      intel_state->cdclk, intel_state->dev_cdclk);
13480 	} else
13481 		to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
13482 
13483 	intel_modeset_clear_plls(state);
13484 
13485 	if (IS_HASWELL(dev_priv))
13486 		return haswell_mode_set_planes_workaround(state);
13487 
13488 	return 0;
13489 }
13490 
13491 /*
13492  * Handle calculation of various watermark data at the end of the atomic check
13493  * phase.  The code here should be run after the per-crtc and per-plane 'check'
13494  * handlers to ensure that all derived state has been updated.
13495  */
13496 static int calc_watermark_data(struct drm_atomic_state *state)
13497 {
13498 	struct drm_device *dev = state->dev;
13499 	struct drm_i915_private *dev_priv = to_i915(dev);
13500 
13501 	/* Is there platform-specific watermark information to calculate? */
13502 	if (dev_priv->display.compute_global_watermarks)
13503 		return dev_priv->display.compute_global_watermarks(state);
13504 
13505 	return 0;
13506 }
13507 
13508 /**
13509  * intel_atomic_check - validate state object
13510  * @dev: drm device
13511  * @state: state to validate
13512  */
13513 static int intel_atomic_check(struct drm_device *dev,
13514 			      struct drm_atomic_state *state)
13515 {
13516 	struct drm_i915_private *dev_priv = to_i915(dev);
13517 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13518 	struct drm_crtc *crtc;
13519 	struct drm_crtc_state *crtc_state;
13520 	int ret, i;
13521 	bool any_ms = false;
13522 
13523 	ret = drm_atomic_helper_check_modeset(dev, state);
13524 	if (ret)
13525 		return ret;
13526 
13527 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13528 		struct intel_crtc_state *pipe_config =
13529 			to_intel_crtc_state(crtc_state);
13530 
13531 		/* Catch I915_MODE_FLAG_INHERITED */
13532 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13533 			crtc_state->mode_changed = true;
13534 
13535 		if (!needs_modeset(crtc_state))
13536 			continue;
13537 
13538 		if (!crtc_state->enable) {
13539 			any_ms = true;
13540 			continue;
13541 		}
13542 
13543 		/* FIXME: For only active_changed we shouldn't need to do any
13544 		 * state recomputation at all. */
13545 
13546 		ret = drm_atomic_add_affected_connectors(state, crtc);
13547 		if (ret)
13548 			return ret;
13549 
13550 		ret = intel_modeset_pipe_config(crtc, pipe_config);
13551 		if (ret) {
13552 			intel_dump_pipe_config(to_intel_crtc(crtc),
13553 					       pipe_config, "[failed]");
13554 			return ret;
13555 		}
13556 
13557 		if (i915.fastboot &&
13558 		    intel_pipe_config_compare(dev,
13559 					to_intel_crtc_state(crtc->state),
13560 					pipe_config, true)) {
13561 			crtc_state->mode_changed = false;
13562 			to_intel_crtc_state(crtc_state)->update_pipe = true;
13563 		}
13564 
13565 		if (needs_modeset(crtc_state))
13566 			any_ms = true;
13567 
13568 		ret = drm_atomic_add_affected_planes(state, crtc);
13569 		if (ret)
13570 			return ret;
13571 
13572 		intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13573 				       needs_modeset(crtc_state) ?
13574 				       "[modeset]" : "[fastset]");
13575 	}
13576 
13577 	if (any_ms) {
13578 		ret = intel_modeset_checks(state);
13579 
13580 		if (ret)
13581 			return ret;
13582 	} else
13583 		intel_state->cdclk = dev_priv->cdclk_freq;
13584 
13585 	ret = drm_atomic_helper_check_planes(dev, state);
13586 	if (ret)
13587 		return ret;
13588 
13589 	intel_fbc_choose_crtc(dev_priv, state);
13590 	return calc_watermark_data(state);
13591 }
13592 
13593 static int intel_atomic_prepare_commit(struct drm_device *dev,
13594 				       struct drm_atomic_state *state,
13595 				       bool nonblock)
13596 {
13597 	struct drm_i915_private *dev_priv = dev->dev_private;
13598 	struct drm_plane_state *plane_state;
13599 	struct drm_crtc_state *crtc_state;
13600 	struct drm_plane *plane;
13601 	struct drm_crtc *crtc;
13602 	int i, ret;
13603 
13604 	for_each_crtc_in_state(state, crtc, crtc_state, i) {
13605 		if (state->legacy_cursor_update)
13606 			continue;
13607 
13608 		ret = intel_crtc_wait_for_pending_flips(crtc);
13609 		if (ret)
13610 			return ret;
13611 
13612 		if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13613 			flush_workqueue(dev_priv->wq);
13614 	}
13615 
13616 	ret = mutex_lock_interruptible(&dev->struct_mutex);
13617 	if (ret)
13618 		return ret;
13619 
13620 	ret = drm_atomic_helper_prepare_planes(dev, state);
13621 	mutex_unlock(&dev->struct_mutex);
13622 
13623 	if (!ret && !nonblock) {
13624 		for_each_plane_in_state(state, plane, plane_state, i) {
13625 			struct intel_plane_state *intel_plane_state =
13626 				to_intel_plane_state(plane_state);
13627 
13628 			if (!intel_plane_state->wait_req)
13629 				continue;
13630 
13631 			ret = __i915_wait_request(intel_plane_state->wait_req,
13632 						  true, NULL, NULL);
13633 			if (ret) {
13634 				/* Any hang should be swallowed by the wait */
13635 				WARN_ON(ret == -EIO);
13636 				mutex_lock(&dev->struct_mutex);
13637 				drm_atomic_helper_cleanup_planes(dev, state);
13638 				mutex_unlock(&dev->struct_mutex);
13639 				break;
13640 			}
13641 		}
13642 	}
13643 
13644 	return ret;
13645 }
13646 
13647 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13648 {
13649 	struct drm_device *dev = crtc->base.dev;
13650 
13651 	if (!dev->max_vblank_count)
13652 		return drm_accurate_vblank_count(&crtc->base);
13653 
13654 	return dev->driver->get_vblank_counter(dev, crtc->pipe);
13655 }
13656 
13657 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13658 					  struct drm_i915_private *dev_priv,
13659 					  unsigned crtc_mask)
13660 {
13661 	unsigned last_vblank_count[I915_MAX_PIPES];
13662 	enum i915_pipe pipe;
13663 	int ret;
13664 
13665 	if (!crtc_mask)
13666 		return;
13667 
13668 	for_each_pipe(dev_priv, pipe) {
13669 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13670 
13671 		if (!((1 << pipe) & crtc_mask))
13672 			continue;
13673 
13674 		ret = drm_crtc_vblank_get(crtc);
13675 		if (WARN_ON(ret != 0)) {
13676 			crtc_mask &= ~(1 << pipe);
13677 			continue;
13678 		}
13679 
13680 		last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13681 	}
13682 
13683 	for_each_pipe(dev_priv, pipe) {
13684 		struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13685 		long lret;
13686 
13687 		if (!((1 << pipe) & crtc_mask))
13688 			continue;
13689 
13690 		lret = wait_event_timeout(dev->vblank[pipe].queue,
13691 				last_vblank_count[pipe] !=
13692 					drm_crtc_vblank_count(crtc),
13693 				msecs_to_jiffies(50));
13694 
13695 		WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
13696 
13697 		drm_crtc_vblank_put(crtc);
13698 	}
13699 }
13700 
13701 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13702 {
13703 	/* fb updated, need to unpin old fb */
13704 	if (crtc_state->fb_changed)
13705 		return true;
13706 
13707 	/* wm changes, need vblank before final wm's */
13708 	if (crtc_state->update_wm_post)
13709 		return true;
13710 
13711 	/*
13712 	 * cxsr is re-enabled after vblank.
13713 	 * This is already handled by crtc_state->update_wm_post,
13714 	 * but added for clarity.
13715 	 */
13716 	if (crtc_state->disable_cxsr)
13717 		return true;
13718 
13719 	return false;
13720 }
13721 
13722 static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13723 {
13724 	struct drm_device *dev = state->dev;
13725 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13726 	struct drm_i915_private *dev_priv = dev->dev_private;
13727 	struct drm_crtc_state *old_crtc_state;
13728 	struct drm_crtc *crtc;
13729 	struct intel_crtc_state *intel_cstate;
13730 	struct drm_plane *plane;
13731 	struct drm_plane_state *plane_state;
13732 	bool hw_check = intel_state->modeset;
13733 	unsigned long put_domains[I915_MAX_PIPES] = {};
13734 	unsigned crtc_vblank_mask = 0;
13735 	int i, ret;
13736 
13737 	for_each_plane_in_state(state, plane, plane_state, i) {
13738 		struct intel_plane_state *intel_plane_state =
13739 			to_intel_plane_state(plane_state);
13740 
13741 		if (!intel_plane_state->wait_req)
13742 			continue;
13743 
13744 		ret = __i915_wait_request(intel_plane_state->wait_req,
13745 					  true, NULL, NULL);
13746 		/* EIO should be eaten, and we can't get interrupted in the
13747 		 * worker, and blocking commits have waited already. */
13748 		WARN_ON(ret);
13749 	}
13750 
13751 	drm_atomic_helper_wait_for_dependencies(state);
13752 
13753 	if (intel_state->modeset) {
13754 		memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13755 		       sizeof(intel_state->min_pixclk));
13756 		dev_priv->active_crtcs = intel_state->active_crtcs;
13757 		dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13758 
13759 		intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13760 	}
13761 
13762 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13763 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13764 
13765 		if (needs_modeset(crtc->state) ||
13766 		    to_intel_crtc_state(crtc->state)->update_pipe) {
13767 			hw_check = true;
13768 
13769 			put_domains[to_intel_crtc(crtc)->pipe] =
13770 				modeset_get_crtc_power_domains(crtc,
13771 					to_intel_crtc_state(crtc->state));
13772 		}
13773 
13774 		if (!needs_modeset(crtc->state))
13775 			continue;
13776 
13777 		intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13778 
13779 		if (old_crtc_state->active) {
13780 			intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13781 			dev_priv->display.crtc_disable(crtc);
13782 			intel_crtc->active = false;
13783 			intel_fbc_disable(intel_crtc);
13784 			intel_disable_shared_dpll(intel_crtc);
13785 
13786 			/*
13787 			 * Underruns don't always raise
13788 			 * interrupts, so check manually.
13789 			 */
13790 			intel_check_cpu_fifo_underruns(dev_priv);
13791 			intel_check_pch_fifo_underruns(dev_priv);
13792 
13793 			if (!crtc->state->active)
13794 				intel_update_watermarks(crtc);
13795 		}
13796 	}
13797 
13798 	/* Only after disabling all output pipelines that will be changed can we
13799 	 * update the the output configuration. */
13800 	intel_modeset_update_crtc_state(state);
13801 
13802 	if (intel_state->modeset) {
13803 		drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13804 
13805 		if (dev_priv->display.modeset_commit_cdclk &&
13806 		    (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13807 		     intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13808 			dev_priv->display.modeset_commit_cdclk(state);
13809 
13810 		intel_modeset_verify_disabled(dev);
13811 	}
13812 
13813 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
13814 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13815 		struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13816 		bool modeset = needs_modeset(crtc->state);
13817 		struct intel_crtc_state *pipe_config =
13818 			to_intel_crtc_state(crtc->state);
13819 
13820 		if (modeset && crtc->state->active) {
13821 			update_scanline_offset(to_intel_crtc(crtc));
13822 			dev_priv->display.crtc_enable(crtc);
13823 		}
13824 
13825 		/* Complete events for now disable pipes here. */
13826 		if (modeset && !crtc->state->active && crtc->state->event) {
13827 			spin_lock_irq(&dev->event_lock);
13828 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
13829 			spin_unlock_irq(&dev->event_lock);
13830 
13831 			crtc->state->event = NULL;
13832 		}
13833 
13834 		if (!modeset)
13835 			intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13836 
13837 		if (crtc->state->active &&
13838 		    drm_atomic_get_existing_plane_state(state, crtc->primary))
13839 			intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
13840 
13841 		if (crtc->state->active)
13842 			drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13843 
13844 		if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13845 			crtc_vblank_mask |= 1 << i;
13846 	}
13847 
13848 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13849 	 * already, but still need the state for the delayed optimization. To
13850 	 * fix this:
13851 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13852 	 * - schedule that vblank worker _before_ calling hw_done
13853 	 * - at the start of commit_tail, cancel it _synchrously
13854 	 * - switch over to the vblank wait helper in the core after that since
13855 	 *   we don't need out special handling any more.
13856 	 */
13857 	if (!state->legacy_cursor_update)
13858 		intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13859 
13860 	/*
13861 	 * Now that the vblank has passed, we can go ahead and program the
13862 	 * optimal watermarks on platforms that need two-step watermark
13863 	 * programming.
13864 	 *
13865 	 * TODO: Move this (and other cleanup) to an async worker eventually.
13866 	 */
13867 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13868 		intel_cstate = to_intel_crtc_state(crtc->state);
13869 
13870 		if (dev_priv->display.optimize_watermarks)
13871 			dev_priv->display.optimize_watermarks(intel_cstate);
13872 	}
13873 
13874 	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13875 		intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13876 
13877 		if (put_domains[i])
13878 			modeset_put_power_domains(dev_priv, put_domains[i]);
13879 
13880 		intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13881 	}
13882 
13883 	drm_atomic_helper_commit_hw_done(state);
13884 
13885 	if (intel_state->modeset)
13886 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13887 
13888 	mutex_lock(&dev->struct_mutex);
13889 	drm_atomic_helper_cleanup_planes(dev, state);
13890 	mutex_unlock(&dev->struct_mutex);
13891 
13892 	drm_atomic_helper_commit_cleanup_done(state);
13893 
13894 	drm_atomic_state_free(state);
13895 
13896 	/* As one of the primary mmio accessors, KMS has a high likelihood
13897 	 * of triggering bugs in unclaimed access. After we finish
13898 	 * modesetting, see if an error has been flagged, and if so
13899 	 * enable debugging for the next modeset - and hope we catch
13900 	 * the culprit.
13901 	 *
13902 	 * XXX note that we assume display power is on at this point.
13903 	 * This might hold true now but we need to add pm helper to check
13904 	 * unclaimed only when the hardware is on, as atomic commits
13905 	 * can happen also when the device is completely off.
13906 	 */
13907 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13908 }
13909 
13910 static void intel_atomic_commit_work(struct work_struct *work)
13911 {
13912 	struct drm_atomic_state *state = container_of(work,
13913 						      struct drm_atomic_state,
13914 						      commit_work);
13915 	intel_atomic_commit_tail(state);
13916 }
13917 
13918 static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13919 {
13920 	struct drm_plane_state *old_plane_state;
13921 	struct drm_plane *plane;
13922 	struct drm_i915_gem_object *obj, *old_obj;
13923 	struct intel_plane *intel_plane;
13924 	int i;
13925 
13926 	mutex_lock(&state->dev->struct_mutex);
13927 	for_each_plane_in_state(state, plane, old_plane_state, i) {
13928 		obj = intel_fb_obj(plane->state->fb);
13929 		old_obj = intel_fb_obj(old_plane_state->fb);
13930 		intel_plane = to_intel_plane(plane);
13931 
13932 		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13933 	}
13934 	mutex_unlock(&state->dev->struct_mutex);
13935 }
13936 
13937 /**
13938  * intel_atomic_commit - commit validated state object
13939  * @dev: DRM device
13940  * @state: the top-level driver state object
13941  * @nonblock: nonblocking commit
13942  *
13943  * This function commits a top-level state object that has been validated
13944  * with drm_atomic_helper_check().
13945  *
13946  * FIXME:  Atomic modeset support for i915 is not yet complete.  At the moment
13947  * nonblocking commits are only safe for pure plane updates. Everything else
13948  * should work though.
13949  *
13950  * RETURNS
13951  * Zero for success or -errno.
13952  */
13953 static int intel_atomic_commit(struct drm_device *dev,
13954 			       struct drm_atomic_state *state,
13955 			       bool nonblock)
13956 {
13957 	struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13958 	struct drm_i915_private *dev_priv = dev->dev_private;
13959 	int ret = 0;
13960 
13961 	if (intel_state->modeset && nonblock) {
13962 		DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
13963 		return -EINVAL;
13964 	}
13965 
13966 	ret = drm_atomic_helper_setup_commit(state, nonblock);
13967 	if (ret)
13968 		return ret;
13969 
13970 	INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13971 
13972 	ret = intel_atomic_prepare_commit(dev, state, nonblock);
13973 	if (ret) {
13974 		DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13975 		return ret;
13976 	}
13977 
13978 	drm_atomic_helper_swap_state(state, true);
13979 	dev_priv->wm.distrust_bios_wm = false;
13980 	dev_priv->wm.skl_results = intel_state->wm_results;
13981 	intel_shared_dpll_commit(state);
13982 	intel_atomic_track_fbs(state);
13983 
13984 	if (nonblock)
13985 		queue_work(system_unbound_wq, &state->commit_work);
13986 	else
13987 		intel_atomic_commit_tail(state);
13988 
13989 	return 0;
13990 }
13991 
13992 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13993 {
13994 	struct drm_device *dev = crtc->dev;
13995 	struct drm_atomic_state *state;
13996 	struct drm_crtc_state *crtc_state;
13997 	int ret;
13998 
13999 	state = drm_atomic_state_alloc(dev);
14000 	if (!state) {
14001 		DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
14002 			      crtc->base.id, crtc->name);
14003 		return;
14004 	}
14005 
14006 	state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
14007 
14008 retry:
14009 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
14010 	ret = PTR_ERR_OR_ZERO(crtc_state);
14011 	if (!ret) {
14012 		if (!crtc_state->active)
14013 			goto out;
14014 
14015 		crtc_state->mode_changed = true;
14016 		ret = drm_atomic_commit(state);
14017 	}
14018 
14019 	if (ret == -EDEADLK) {
14020 		drm_atomic_state_clear(state);
14021 		drm_modeset_backoff(state->acquire_ctx);
14022 		goto retry;
14023 	}
14024 
14025 	if (ret)
14026 out:
14027 		drm_atomic_state_free(state);
14028 }
14029 
14030 #undef for_each_intel_crtc_masked
14031 
14032 static const struct drm_crtc_funcs intel_crtc_funcs = {
14033 	.gamma_set = drm_atomic_helper_legacy_gamma_set,
14034 	.set_config = drm_atomic_helper_set_config,
14035 	.set_property = drm_atomic_helper_crtc_set_property,
14036 	.destroy = intel_crtc_destroy,
14037 	.page_flip = intel_crtc_page_flip,
14038 	.atomic_duplicate_state = intel_crtc_duplicate_state,
14039 	.atomic_destroy_state = intel_crtc_destroy_state,
14040 };
14041 
14042 /**
14043  * intel_prepare_plane_fb - Prepare fb for usage on plane
14044  * @plane: drm plane to prepare for
14045  * @fb: framebuffer to prepare for presentation
14046  *
14047  * Prepares a framebuffer for usage on a display plane.  Generally this
14048  * involves pinning the underlying object and updating the frontbuffer tracking
14049  * bits.  Some older platforms need special physical address handling for
14050  * cursor planes.
14051  *
14052  * Must be called with struct_mutex held.
14053  *
14054  * Returns 0 on success, negative error code on failure.
14055  */
14056 int
14057 intel_prepare_plane_fb(struct drm_plane *plane,
14058 		       struct drm_plane_state *new_state)
14059 {
14060 	struct drm_device *dev = plane->dev;
14061 	struct drm_framebuffer *fb = new_state->fb;
14062 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14063 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14064 	struct reservation_object *resv;
14065 	int ret = 0;
14066 
14067 	if (!obj && !old_obj)
14068 		return 0;
14069 
14070 	if (old_obj) {
14071 		struct drm_crtc_state *crtc_state =
14072 			drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
14073 
14074 		/* Big Hammer, we also need to ensure that any pending
14075 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14076 		 * current scanout is retired before unpinning the old
14077 		 * framebuffer. Note that we rely on userspace rendering
14078 		 * into the buffer attached to the pipe they are waiting
14079 		 * on. If not, userspace generates a GPU hang with IPEHR
14080 		 * point to the MI_WAIT_FOR_EVENT.
14081 		 *
14082 		 * This should only fail upon a hung GPU, in which case we
14083 		 * can safely continue.
14084 		 */
14085 		if (needs_modeset(crtc_state))
14086 			ret = i915_gem_object_wait_rendering(old_obj, true);
14087 		if (ret) {
14088 			/* GPU hangs should have been swallowed by the wait */
14089 			WARN_ON(ret == -EIO);
14090 			return ret;
14091 		}
14092 	}
14093 
14094 	if (!obj)
14095 		return 0;
14096 
14097 	/* For framebuffer backed by dmabuf, wait for fence */
14098 	resv = i915_gem_object_get_dmabuf_resv(obj);
14099 	if (resv) {
14100 		long lret;
14101 
14102 		lret = reservation_object_wait_timeout_rcu(resv, false, true,
14103 							   MAX_SCHEDULE_TIMEOUT);
14104 		if (lret == -ERESTARTSYS)
14105 			return lret;
14106 
14107 		WARN(lret < 0, "waiting returns %li\n", lret);
14108 	}
14109 
14110 	if (plane->type == DRM_PLANE_TYPE_CURSOR &&
14111 	    INTEL_INFO(dev)->cursor_needs_physical) {
14112 		int align = IS_I830(dev) ? 16 * 1024 : 256;
14113 		ret = i915_gem_object_attach_phys(obj, align);
14114 		if (ret)
14115 			DRM_DEBUG_KMS("failed to attach phys object\n");
14116 	} else {
14117 		ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
14118 	}
14119 
14120 	if (ret == 0) {
14121 		struct intel_plane_state *plane_state =
14122 			to_intel_plane_state(new_state);
14123 
14124 		i915_gem_request_assign(&plane_state->wait_req,
14125 					obj->last_write_req);
14126 	}
14127 
14128 	return ret;
14129 }
14130 
14131 /**
14132  * intel_cleanup_plane_fb - Cleans up an fb after plane use
14133  * @plane: drm plane to clean up for
14134  * @fb: old framebuffer that was on plane
14135  *
14136  * Cleans up a framebuffer that has just been removed from a plane.
14137  *
14138  * Must be called with struct_mutex held.
14139  */
14140 void
14141 intel_cleanup_plane_fb(struct drm_plane *plane,
14142 		       struct drm_plane_state *old_state)
14143 {
14144 	struct drm_device *dev = plane->dev;
14145 	struct intel_plane_state *old_intel_state;
14146 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14147 	struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
14148 
14149 	old_intel_state = to_intel_plane_state(old_state);
14150 
14151 	if (!obj && !old_obj)
14152 		return;
14153 
14154 	if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
14155 	    !INTEL_INFO(dev)->cursor_needs_physical))
14156 		intel_unpin_fb_obj(old_state->fb, old_state->rotation);
14157 
14158 	i915_gem_request_assign(&old_intel_state->wait_req, NULL);
14159 }
14160 
14161 int
14162 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
14163 {
14164 	int max_scale;
14165 	struct drm_device *dev;
14166 	struct drm_i915_private *dev_priv;
14167 	int crtc_clock, cdclk;
14168 
14169 	if (!intel_crtc || !crtc_state->base.enable)
14170 		return DRM_PLANE_HELPER_NO_SCALING;
14171 
14172 	dev = intel_crtc->base.dev;
14173 	dev_priv = dev->dev_private;
14174 	crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
14175 	cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
14176 
14177 	if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
14178 		return DRM_PLANE_HELPER_NO_SCALING;
14179 
14180 	/*
14181 	 * skl max scale is lower of:
14182 	 *    close to 3 but not 3, -1 is for that purpose
14183 	 *            or
14184 	 *    cdclk/crtc_clock
14185 	 */
14186 	max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
14187 
14188 	return max_scale;
14189 }
14190 
14191 static int
14192 intel_check_primary_plane(struct drm_plane *plane,
14193 			  struct intel_crtc_state *crtc_state,
14194 			  struct intel_plane_state *state)
14195 {
14196 	struct drm_crtc *crtc = state->base.crtc;
14197 	struct drm_framebuffer *fb = state->base.fb;
14198 	int min_scale = DRM_PLANE_HELPER_NO_SCALING;
14199 	int max_scale = DRM_PLANE_HELPER_NO_SCALING;
14200 	bool can_position = false;
14201 
14202 	if (INTEL_INFO(plane->dev)->gen >= 9) {
14203 		/* use scaler when colorkey is not required */
14204 		if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
14205 			min_scale = 1;
14206 			max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
14207 		}
14208 		can_position = true;
14209 	}
14210 
14211 	return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14212 					     &state->dst, &state->clip,
14213 					     state->base.rotation,
14214 					     min_scale, max_scale,
14215 					     can_position, true,
14216 					     &state->visible);
14217 }
14218 
14219 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14220 				    struct drm_crtc_state *old_crtc_state)
14221 {
14222 	struct drm_device *dev = crtc->dev;
14223 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14224 	struct intel_crtc_state *old_intel_state =
14225 		to_intel_crtc_state(old_crtc_state);
14226 	bool modeset = needs_modeset(crtc->state);
14227 
14228 	/* Perform vblank evasion around commit operation */
14229 	intel_pipe_update_start(intel_crtc);
14230 
14231 	if (modeset)
14232 		return;
14233 
14234 	if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14235 		intel_color_set_csc(crtc->state);
14236 		intel_color_load_luts(crtc->state);
14237 	}
14238 
14239 	if (to_intel_crtc_state(crtc->state)->update_pipe)
14240 		intel_update_pipe_config(intel_crtc, old_intel_state);
14241 	else if (INTEL_INFO(dev)->gen >= 9)
14242 		skl_detach_scalers(intel_crtc);
14243 }
14244 
14245 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14246 				     struct drm_crtc_state *old_crtc_state)
14247 {
14248 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14249 
14250 	intel_pipe_update_end(intel_crtc, NULL);
14251 }
14252 
14253 /**
14254  * intel_plane_destroy - destroy a plane
14255  * @plane: plane to destroy
14256  *
14257  * Common destruction function for all types of planes (primary, cursor,
14258  * sprite).
14259  */
14260 void intel_plane_destroy(struct drm_plane *plane)
14261 {
14262 	if (!plane)
14263 		return;
14264 
14265 	drm_plane_cleanup(plane);
14266 	kfree(to_intel_plane(plane));
14267 }
14268 
14269 const struct drm_plane_funcs intel_plane_funcs = {
14270 	.update_plane = drm_atomic_helper_update_plane,
14271 	.disable_plane = drm_atomic_helper_disable_plane,
14272 	.destroy = intel_plane_destroy,
14273 	.set_property = drm_atomic_helper_plane_set_property,
14274 	.atomic_get_property = intel_plane_atomic_get_property,
14275 	.atomic_set_property = intel_plane_atomic_set_property,
14276 	.atomic_duplicate_state = intel_plane_duplicate_state,
14277 	.atomic_destroy_state = intel_plane_destroy_state,
14278 
14279 };
14280 
14281 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14282 						    int pipe)
14283 {
14284 	struct intel_plane *primary = NULL;
14285 	struct intel_plane_state *state = NULL;
14286 	const uint32_t *intel_primary_formats;
14287 	unsigned int num_formats;
14288 	int ret;
14289 
14290 	primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14291 	if (!primary)
14292 		goto fail;
14293 
14294 	state = intel_create_plane_state(&primary->base);
14295 	if (!state)
14296 		goto fail;
14297 	primary->base.state = &state->base;
14298 
14299 	primary->can_scale = false;
14300 	primary->max_downscale = 1;
14301 	if (INTEL_INFO(dev)->gen >= 9) {
14302 		primary->can_scale = true;
14303 		state->scaler_id = -1;
14304 	}
14305 	primary->pipe = pipe;
14306 	primary->plane = pipe;
14307 	primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14308 	primary->check_plane = intel_check_primary_plane;
14309 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14310 		primary->plane = !pipe;
14311 
14312 	if (INTEL_INFO(dev)->gen >= 9) {
14313 		intel_primary_formats = skl_primary_formats;
14314 		num_formats = ARRAY_SIZE(skl_primary_formats);
14315 
14316 		primary->update_plane = skylake_update_primary_plane;
14317 		primary->disable_plane = skylake_disable_primary_plane;
14318 	} else if (HAS_PCH_SPLIT(dev)) {
14319 		intel_primary_formats = i965_primary_formats;
14320 		num_formats = ARRAY_SIZE(i965_primary_formats);
14321 
14322 		primary->update_plane = ironlake_update_primary_plane;
14323 		primary->disable_plane = i9xx_disable_primary_plane;
14324 	} else if (INTEL_INFO(dev)->gen >= 4) {
14325 		intel_primary_formats = i965_primary_formats;
14326 		num_formats = ARRAY_SIZE(i965_primary_formats);
14327 
14328 		primary->update_plane = i9xx_update_primary_plane;
14329 		primary->disable_plane = i9xx_disable_primary_plane;
14330 	} else {
14331 		intel_primary_formats = i8xx_primary_formats;
14332 		num_formats = ARRAY_SIZE(i8xx_primary_formats);
14333 
14334 		primary->update_plane = i9xx_update_primary_plane;
14335 		primary->disable_plane = i9xx_disable_primary_plane;
14336 	}
14337 
14338 	if (INTEL_INFO(dev)->gen >= 9)
14339 		ret = drm_universal_plane_init(dev, &primary->base, 0,
14340 					       &intel_plane_funcs,
14341 					       intel_primary_formats, num_formats,
14342 					       DRM_PLANE_TYPE_PRIMARY,
14343 					       "plane 1%c", pipe_name(pipe));
14344 	else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14345 		ret = drm_universal_plane_init(dev, &primary->base, 0,
14346 					       &intel_plane_funcs,
14347 					       intel_primary_formats, num_formats,
14348 					       DRM_PLANE_TYPE_PRIMARY,
14349 					       "primary %c", pipe_name(pipe));
14350 	else
14351 		ret = drm_universal_plane_init(dev, &primary->base, 0,
14352 					       &intel_plane_funcs,
14353 					       intel_primary_formats, num_formats,
14354 					       DRM_PLANE_TYPE_PRIMARY,
14355 					       "plane %c", plane_name(primary->plane));
14356 	if (ret)
14357 		goto fail;
14358 
14359 	if (INTEL_INFO(dev)->gen >= 4)
14360 		intel_create_rotation_property(dev, primary);
14361 
14362 	drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14363 
14364 	return &primary->base;
14365 
14366 fail:
14367 	kfree(state);
14368 	kfree(primary);
14369 
14370 	return NULL;
14371 }
14372 
14373 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14374 {
14375 	if (!dev->mode_config.rotation_property) {
14376 		unsigned long flags = DRM_ROTATE_0 |
14377 			DRM_ROTATE_180;
14378 
14379 		if (INTEL_INFO(dev)->gen >= 9)
14380 			flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
14381 
14382 		dev->mode_config.rotation_property =
14383 			drm_mode_create_rotation_property(dev, flags);
14384 	}
14385 	if (dev->mode_config.rotation_property)
14386 		drm_object_attach_property(&plane->base.base,
14387 				dev->mode_config.rotation_property,
14388 				plane->base.state->rotation);
14389 }
14390 
14391 static int
14392 intel_check_cursor_plane(struct drm_plane *plane,
14393 			 struct intel_crtc_state *crtc_state,
14394 			 struct intel_plane_state *state)
14395 {
14396 	struct drm_crtc *crtc = crtc_state->base.crtc;
14397 	struct drm_framebuffer *fb = state->base.fb;
14398 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14399 	enum i915_pipe pipe = to_intel_plane(plane)->pipe;
14400 	unsigned stride;
14401 	int ret;
14402 
14403 	ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14404 					    &state->dst, &state->clip,
14405 					    state->base.rotation,
14406 					    DRM_PLANE_HELPER_NO_SCALING,
14407 					    DRM_PLANE_HELPER_NO_SCALING,
14408 					    true, true, &state->visible);
14409 	if (ret)
14410 		return ret;
14411 
14412 	/* if we want to turn off the cursor ignore width and height */
14413 	if (!obj)
14414 		return 0;
14415 
14416 	/* Check for which cursor types we support */
14417 	if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14418 		DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14419 			  state->base.crtc_w, state->base.crtc_h);
14420 		return -EINVAL;
14421 	}
14422 
14423 	stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14424 	if (obj->base.size < stride * state->base.crtc_h) {
14425 		DRM_DEBUG_KMS("buffer is too small\n");
14426 		return -ENOMEM;
14427 	}
14428 
14429 	if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14430 		DRM_DEBUG_KMS("cursor cannot be tiled\n");
14431 		return -EINVAL;
14432 	}
14433 
14434 	/*
14435 	 * There's something wrong with the cursor on CHV pipe C.
14436 	 * If it straddles the left edge of the screen then
14437 	 * moving it away from the edge or disabling it often
14438 	 * results in a pipe underrun, and often that can lead to
14439 	 * dead pipe (constant underrun reported, and it scans
14440 	 * out just a solid color). To recover from that, the
14441 	 * display power well must be turned off and on again.
14442 	 * Refuse the put the cursor into that compromised position.
14443 	 */
14444 	if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14445 	    state->visible && state->base.crtc_x < 0) {
14446 		DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14447 		return -EINVAL;
14448 	}
14449 
14450 	return 0;
14451 }
14452 
14453 static void
14454 intel_disable_cursor_plane(struct drm_plane *plane,
14455 			   struct drm_crtc *crtc)
14456 {
14457 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14458 
14459 	intel_crtc->cursor_addr = 0;
14460 	intel_crtc_update_cursor(crtc, NULL);
14461 }
14462 
14463 static void
14464 intel_update_cursor_plane(struct drm_plane *plane,
14465 			  const struct intel_crtc_state *crtc_state,
14466 			  const struct intel_plane_state *state)
14467 {
14468 	struct drm_crtc *crtc = crtc_state->base.crtc;
14469 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14470 	struct drm_device *dev = plane->dev;
14471 	struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14472 	uint32_t addr;
14473 
14474 	if (!obj)
14475 		addr = 0;
14476 	else if (!INTEL_INFO(dev)->cursor_needs_physical)
14477 		addr = i915_gem_obj_ggtt_offset(obj);
14478 	else
14479 		addr = obj->phys_handle->busaddr;
14480 
14481 	intel_crtc->cursor_addr = addr;
14482 	intel_crtc_update_cursor(crtc, state);
14483 }
14484 
14485 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14486 						   int pipe)
14487 {
14488 	struct intel_plane *cursor = NULL;
14489 	struct intel_plane_state *state = NULL;
14490 	int ret;
14491 
14492 	cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14493 	if (!cursor)
14494 		goto fail;
14495 
14496 	state = intel_create_plane_state(&cursor->base);
14497 	if (!state)
14498 		goto fail;
14499 	cursor->base.state = &state->base;
14500 
14501 	cursor->can_scale = false;
14502 	cursor->max_downscale = 1;
14503 	cursor->pipe = pipe;
14504 	cursor->plane = pipe;
14505 	cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14506 	cursor->check_plane = intel_check_cursor_plane;
14507 	cursor->update_plane = intel_update_cursor_plane;
14508 	cursor->disable_plane = intel_disable_cursor_plane;
14509 
14510 	ret = drm_universal_plane_init(dev, &cursor->base, 0,
14511 				       &intel_plane_funcs,
14512 				       intel_cursor_formats,
14513 				       ARRAY_SIZE(intel_cursor_formats),
14514 				       DRM_PLANE_TYPE_CURSOR,
14515 				       "cursor %c", pipe_name(pipe));
14516 	if (ret)
14517 		goto fail;
14518 
14519 	if (INTEL_INFO(dev)->gen >= 4) {
14520 		if (!dev->mode_config.rotation_property)
14521 			dev->mode_config.rotation_property =
14522 				drm_mode_create_rotation_property(dev,
14523 							DRM_ROTATE_0 |
14524 							DRM_ROTATE_180);
14525 		if (dev->mode_config.rotation_property)
14526 			drm_object_attach_property(&cursor->base.base,
14527 				dev->mode_config.rotation_property,
14528 				state->base.rotation);
14529 	}
14530 
14531 	if (INTEL_INFO(dev)->gen >=9)
14532 		state->scaler_id = -1;
14533 
14534 	drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14535 
14536 	return &cursor->base;
14537 
14538 fail:
14539 	kfree(state);
14540 	kfree(cursor);
14541 
14542 	return NULL;
14543 }
14544 
14545 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14546 	struct intel_crtc_state *crtc_state)
14547 {
14548 	int i;
14549 	struct intel_scaler *intel_scaler;
14550 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14551 
14552 	for (i = 0; i < intel_crtc->num_scalers; i++) {
14553 		intel_scaler = &scaler_state->scalers[i];
14554 		intel_scaler->in_use = 0;
14555 		intel_scaler->mode = PS_SCALER_MODE_DYN;
14556 	}
14557 
14558 	scaler_state->scaler_id = -1;
14559 }
14560 
14561 static void intel_crtc_init(struct drm_device *dev, int pipe)
14562 {
14563 	struct drm_i915_private *dev_priv = dev->dev_private;
14564 	struct intel_crtc *intel_crtc;
14565 	struct intel_crtc_state *crtc_state = NULL;
14566 	struct drm_plane *primary = NULL;
14567 	struct drm_plane *cursor = NULL;
14568 	int ret;
14569 
14570 	intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14571 	if (intel_crtc == NULL)
14572 		return;
14573 
14574 	crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14575 	if (!crtc_state)
14576 		goto fail;
14577 	intel_crtc->config = crtc_state;
14578 	intel_crtc->base.state = &crtc_state->base;
14579 	crtc_state->base.crtc = &intel_crtc->base;
14580 
14581 	/* initialize shared scalers */
14582 	if (INTEL_INFO(dev)->gen >= 9) {
14583 		if (pipe == PIPE_C)
14584 			intel_crtc->num_scalers = 1;
14585 		else
14586 			intel_crtc->num_scalers = SKL_NUM_SCALERS;
14587 
14588 		skl_init_scalers(dev, intel_crtc, crtc_state);
14589 	}
14590 
14591 	primary = intel_primary_plane_create(dev, pipe);
14592 	if (!primary)
14593 		goto fail;
14594 
14595 	cursor = intel_cursor_plane_create(dev, pipe);
14596 	if (!cursor)
14597 		goto fail;
14598 
14599 	ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14600 					cursor, &intel_crtc_funcs,
14601 					"pipe %c", pipe_name(pipe));
14602 	if (ret)
14603 		goto fail;
14604 
14605 	/*
14606 	 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14607 	 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14608 	 */
14609 	intel_crtc->pipe = pipe;
14610 	intel_crtc->plane = pipe;
14611 	if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14612 		DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14613 		intel_crtc->plane = !pipe;
14614 	}
14615 
14616 	intel_crtc->cursor_base = ~0;
14617 	intel_crtc->cursor_cntl = ~0;
14618 	intel_crtc->cursor_size = ~0;
14619 
14620 	intel_crtc->wm.cxsr_allowed = true;
14621 
14622 	BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14623 	       dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14624 	dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14625 	dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14626 
14627 	drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14628 
14629 	intel_color_init(&intel_crtc->base);
14630 
14631 	WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14632 	return;
14633 
14634 fail:
14635 	intel_plane_destroy(primary);
14636 	intel_plane_destroy(cursor);
14637 	kfree(crtc_state);
14638 	kfree(intel_crtc);
14639 }
14640 
14641 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14642 {
14643 	struct drm_encoder *encoder = connector->base.encoder;
14644 	struct drm_device *dev = connector->base.dev;
14645 
14646 	WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14647 
14648 	if (!encoder || WARN_ON(!encoder->crtc))
14649 		return INVALID_PIPE;
14650 
14651 	return to_intel_crtc(encoder->crtc)->pipe;
14652 }
14653 
14654 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14655 				struct drm_file *file)
14656 {
14657 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14658 	struct drm_crtc *drmmode_crtc;
14659 	struct intel_crtc *crtc;
14660 
14661 	drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14662 	if (!drmmode_crtc)
14663 		return -ENOENT;
14664 
14665 	crtc = to_intel_crtc(drmmode_crtc);
14666 	pipe_from_crtc_id->pipe = crtc->pipe;
14667 
14668 	return 0;
14669 }
14670 
14671 static int intel_encoder_clones(struct intel_encoder *encoder)
14672 {
14673 	struct drm_device *dev = encoder->base.dev;
14674 	struct intel_encoder *source_encoder;
14675 	int index_mask = 0;
14676 	int entry = 0;
14677 
14678 	for_each_intel_encoder(dev, source_encoder) {
14679 		if (encoders_cloneable(encoder, source_encoder))
14680 			index_mask |= (1 << entry);
14681 
14682 		entry++;
14683 	}
14684 
14685 	return index_mask;
14686 }
14687 
14688 static bool has_edp_a(struct drm_device *dev)
14689 {
14690 	struct drm_i915_private *dev_priv = dev->dev_private;
14691 
14692 	if (!IS_MOBILE(dev))
14693 		return false;
14694 
14695 	if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14696 		return false;
14697 
14698 	if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14699 		return false;
14700 
14701 	return true;
14702 }
14703 
14704 static bool intel_crt_present(struct drm_device *dev)
14705 {
14706 	struct drm_i915_private *dev_priv = dev->dev_private;
14707 
14708 	if (INTEL_INFO(dev)->gen >= 9)
14709 		return false;
14710 
14711 	if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14712 		return false;
14713 
14714 	if (IS_CHERRYVIEW(dev))
14715 		return false;
14716 
14717 	if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14718 		return false;
14719 
14720 	/* DDI E can't be used if DDI A requires 4 lanes */
14721 	if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14722 		return false;
14723 
14724 	if (!dev_priv->vbt.int_crt_support)
14725 		return false;
14726 
14727 	return true;
14728 }
14729 
14730 static void intel_setup_outputs(struct drm_device *dev)
14731 {
14732 	struct drm_i915_private *dev_priv = dev->dev_private;
14733 	struct intel_encoder *encoder;
14734 	bool dpd_is_edp = false;
14735 
14736 	/*
14737 	 * intel_edp_init_connector() depends on this completing first, to
14738 	 * prevent the registeration of both eDP and LVDS and the incorrect
14739 	 * sharing of the PPS.
14740 	 */
14741 	intel_lvds_init(dev);
14742 
14743 	if (intel_crt_present(dev))
14744 		intel_crt_init(dev);
14745 
14746 	if (IS_BROXTON(dev)) {
14747 		/*
14748 		 * FIXME: Broxton doesn't support port detection via the
14749 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14750 		 * detect the ports.
14751 		 */
14752 		intel_ddi_init(dev, PORT_A);
14753 		intel_ddi_init(dev, PORT_B);
14754 		intel_ddi_init(dev, PORT_C);
14755 
14756 		intel_dsi_init(dev);
14757 	} else if (HAS_DDI(dev)) {
14758 		int found;
14759 
14760 		/*
14761 		 * Haswell uses DDI functions to detect digital outputs.
14762 		 * On SKL pre-D0 the strap isn't connected, so we assume
14763 		 * it's there.
14764 		 */
14765 		found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14766 		/* WaIgnoreDDIAStrap: skl */
14767 		if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14768 			intel_ddi_init(dev, PORT_A);
14769 
14770 		/* DDI B, C and D detection is indicated by the SFUSE_STRAP
14771 		 * register */
14772 		found = I915_READ(SFUSE_STRAP);
14773 
14774 		if (found & SFUSE_STRAP_DDIB_DETECTED)
14775 			intel_ddi_init(dev, PORT_B);
14776 		if (found & SFUSE_STRAP_DDIC_DETECTED)
14777 			intel_ddi_init(dev, PORT_C);
14778 		if (found & SFUSE_STRAP_DDID_DETECTED)
14779 			intel_ddi_init(dev, PORT_D);
14780 		/*
14781 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14782 		 */
14783 		if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14784 		    (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14785 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14786 		     dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14787 			intel_ddi_init(dev, PORT_E);
14788 
14789 	} else if (HAS_PCH_SPLIT(dev)) {
14790 		int found;
14791 		dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14792 
14793 		if (has_edp_a(dev))
14794 			intel_dp_init(dev, DP_A, PORT_A);
14795 
14796 		if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14797 			/* PCH SDVOB multiplex with HDMIB */
14798 			found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14799 			if (!found)
14800 				intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14801 			if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14802 				intel_dp_init(dev, PCH_DP_B, PORT_B);
14803 		}
14804 
14805 		if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14806 			intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14807 
14808 		if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14809 			intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14810 
14811 		if (I915_READ(PCH_DP_C) & DP_DETECTED)
14812 			intel_dp_init(dev, PCH_DP_C, PORT_C);
14813 
14814 		if (I915_READ(PCH_DP_D) & DP_DETECTED)
14815 			intel_dp_init(dev, PCH_DP_D, PORT_D);
14816 	} else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14817 		bool has_edp, has_port;
14818 
14819 		/*
14820 		 * The DP_DETECTED bit is the latched state of the DDC
14821 		 * SDA pin at boot. However since eDP doesn't require DDC
14822 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14823 		 * eDP ports may have been muxed to an alternate function.
14824 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
14825 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
14826 		 * detect eDP ports.
14827 		 *
14828 		 * Sadly the straps seem to be missing sometimes even for HDMI
14829 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14830 		 * and VBT for the presence of the port. Additionally we can't
14831 		 * trust the port type the VBT declares as we've seen at least
14832 		 * HDMI ports that the VBT claim are DP or eDP.
14833 		 */
14834 		has_edp = intel_dp_is_edp(dev, PORT_B);
14835 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14836 		if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
14837 			has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
14838 		if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
14839 			intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14840 
14841 		has_edp = intel_dp_is_edp(dev, PORT_C);
14842 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14843 		if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
14844 			has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
14845 		if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
14846 			intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14847 
14848 		if (IS_CHERRYVIEW(dev)) {
14849 			/*
14850 			 * eDP not supported on port D,
14851 			 * so no need to worry about it
14852 			 */
14853 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14854 			if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
14855 				intel_dp_init(dev, CHV_DP_D, PORT_D);
14856 			if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14857 				intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14858 		}
14859 
14860 		intel_dsi_init(dev);
14861 	} else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14862 		bool found = false;
14863 
14864 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14865 			DRM_DEBUG_KMS("probing SDVOB\n");
14866 			found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14867 			if (!found && IS_G4X(dev)) {
14868 				DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14869 				intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14870 			}
14871 
14872 			if (!found && IS_G4X(dev))
14873 				intel_dp_init(dev, DP_B, PORT_B);
14874 		}
14875 
14876 		/* Before G4X SDVOC doesn't have its own detect register */
14877 
14878 		if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14879 			DRM_DEBUG_KMS("probing SDVOC\n");
14880 			found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14881 		}
14882 
14883 		if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14884 
14885 			if (IS_G4X(dev)) {
14886 				DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14887 				intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14888 			}
14889 			if (IS_G4X(dev))
14890 				intel_dp_init(dev, DP_C, PORT_C);
14891 		}
14892 
14893 		if (IS_G4X(dev) &&
14894 		    (I915_READ(DP_D) & DP_DETECTED))
14895 			intel_dp_init(dev, DP_D, PORT_D);
14896 	} else if (IS_GEN2(dev))
14897 		intel_dvo_init(dev);
14898 
14899 	if (SUPPORTS_TV(dev))
14900 		intel_tv_init(dev);
14901 
14902 	intel_psr_init(dev);
14903 
14904 	for_each_intel_encoder(dev, encoder) {
14905 		encoder->base.possible_crtcs = encoder->crtc_mask;
14906 		encoder->base.possible_clones =
14907 			intel_encoder_clones(encoder);
14908 	}
14909 
14910 	intel_init_pch_refclk(dev);
14911 
14912 	drm_helper_move_panel_connectors_to_head(dev);
14913 }
14914 
14915 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14916 {
14917 	struct drm_device *dev = fb->dev;
14918 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14919 
14920 	drm_framebuffer_cleanup(fb);
14921 	mutex_lock(&dev->struct_mutex);
14922 	WARN_ON(!intel_fb->obj->framebuffer_references--);
14923 	drm_gem_object_unreference(&intel_fb->obj->base);
14924 	mutex_unlock(&dev->struct_mutex);
14925 	kfree(intel_fb);
14926 }
14927 
14928 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14929 						struct drm_file *file,
14930 						unsigned int *handle)
14931 {
14932 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14933 	struct drm_i915_gem_object *obj = intel_fb->obj;
14934 
14935 	if (obj->userptr.mm) {
14936 		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14937 		return -EINVAL;
14938 	}
14939 
14940 	return drm_gem_handle_create(file, &obj->base, handle);
14941 }
14942 
14943 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14944 					struct drm_file *file,
14945 					unsigned flags, unsigned color,
14946 					struct drm_clip_rect *clips,
14947 					unsigned num_clips)
14948 {
14949 	struct drm_device *dev = fb->dev;
14950 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14951 	struct drm_i915_gem_object *obj = intel_fb->obj;
14952 
14953 	mutex_lock(&dev->struct_mutex);
14954 	intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14955 	mutex_unlock(&dev->struct_mutex);
14956 
14957 	return 0;
14958 }
14959 
14960 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14961 	.destroy = intel_user_framebuffer_destroy,
14962 	.create_handle = intel_user_framebuffer_create_handle,
14963 	.dirty = intel_user_framebuffer_dirty,
14964 };
14965 
14966 static
14967 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14968 			 uint32_t pixel_format)
14969 {
14970 	u32 gen = INTEL_INFO(dev)->gen;
14971 
14972 	if (gen >= 9) {
14973 		int cpp = drm_format_plane_cpp(pixel_format, 0);
14974 
14975 		/* "The stride in bytes must not exceed the of the size of 8K
14976 		 *  pixels and 32K bytes."
14977 		 */
14978 		return min(8192 * cpp, 32768);
14979 	} else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14980 		return 32*1024;
14981 	} else if (gen >= 4) {
14982 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14983 			return 16*1024;
14984 		else
14985 			return 32*1024;
14986 	} else if (gen >= 3) {
14987 		if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14988 			return 8*1024;
14989 		else
14990 			return 16*1024;
14991 	} else {
14992 		/* XXX DSPC is limited to 4k tiled */
14993 		return 8*1024;
14994 	}
14995 }
14996 
14997 static int intel_framebuffer_init(struct drm_device *dev,
14998 				  struct intel_framebuffer *intel_fb,
14999 				  struct drm_mode_fb_cmd2 *mode_cmd,
15000 				  struct drm_i915_gem_object *obj)
15001 {
15002 	struct drm_i915_private *dev_priv = to_i915(dev);
15003 	unsigned int aligned_height;
15004 	int ret;
15005 	u32 pitch_limit, stride_alignment;
15006 
15007 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
15008 
15009 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
15010 		/* Enforce that fb modifier and tiling mode match, but only for
15011 		 * X-tiled. This is needed for FBC. */
15012 		if (!!(obj->tiling_mode == I915_TILING_X) !=
15013 		    !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
15014 			DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
15015 			return -EINVAL;
15016 		}
15017 	} else {
15018 		if (obj->tiling_mode == I915_TILING_X)
15019 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
15020 		else if (obj->tiling_mode == I915_TILING_Y) {
15021 			DRM_DEBUG("No Y tiling for legacy addfb\n");
15022 			return -EINVAL;
15023 		}
15024 	}
15025 
15026 	/* Passed in modifier sanity checking. */
15027 	switch (mode_cmd->modifier[0]) {
15028 	case I915_FORMAT_MOD_Y_TILED:
15029 	case I915_FORMAT_MOD_Yf_TILED:
15030 		if (INTEL_INFO(dev)->gen < 9) {
15031 			DRM_DEBUG("Unsupported tiling 0x%llx!\n",
15032 				  mode_cmd->modifier[0]);
15033 			return -EINVAL;
15034 		}
15035 	case DRM_FORMAT_MOD_NONE:
15036 	case I915_FORMAT_MOD_X_TILED:
15037 		break;
15038 	default:
15039 		DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
15040 			  mode_cmd->modifier[0]);
15041 		return -EINVAL;
15042 	}
15043 
15044 	stride_alignment = intel_fb_stride_alignment(dev_priv,
15045 						     mode_cmd->modifier[0],
15046 						     mode_cmd->pixel_format);
15047 	if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
15048 		DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
15049 			  mode_cmd->pitches[0], stride_alignment);
15050 		return -EINVAL;
15051 	}
15052 
15053 	pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
15054 					   mode_cmd->pixel_format);
15055 	if (mode_cmd->pitches[0] > pitch_limit) {
15056 		DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
15057 			  mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
15058 			  "tiled" : "linear",
15059 			  mode_cmd->pitches[0], pitch_limit);
15060 		return -EINVAL;
15061 	}
15062 
15063 	if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
15064 	    mode_cmd->pitches[0] != obj->stride) {
15065 		DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
15066 			  mode_cmd->pitches[0], obj->stride);
15067 		return -EINVAL;
15068 	}
15069 
15070 	/* Reject formats not supported by any plane early. */
15071 	switch (mode_cmd->pixel_format) {
15072 	case DRM_FORMAT_C8:
15073 	case DRM_FORMAT_RGB565:
15074 	case DRM_FORMAT_XRGB8888:
15075 	case DRM_FORMAT_ARGB8888:
15076 		break;
15077 	case DRM_FORMAT_XRGB1555:
15078 		if (INTEL_INFO(dev)->gen > 3) {
15079 			DRM_DEBUG("unsupported pixel format: %s\n",
15080 				  drm_get_format_name(mode_cmd->pixel_format));
15081 			return -EINVAL;
15082 		}
15083 		break;
15084 	case DRM_FORMAT_ABGR8888:
15085 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
15086 		    INTEL_INFO(dev)->gen < 9) {
15087 			DRM_DEBUG("unsupported pixel format: %s\n",
15088 				  drm_get_format_name(mode_cmd->pixel_format));
15089 			return -EINVAL;
15090 		}
15091 		break;
15092 	case DRM_FORMAT_XBGR8888:
15093 	case DRM_FORMAT_XRGB2101010:
15094 	case DRM_FORMAT_XBGR2101010:
15095 		if (INTEL_INFO(dev)->gen < 4) {
15096 			DRM_DEBUG("unsupported pixel format: %s\n",
15097 				  drm_get_format_name(mode_cmd->pixel_format));
15098 			return -EINVAL;
15099 		}
15100 		break;
15101 	case DRM_FORMAT_ABGR2101010:
15102 		if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
15103 			DRM_DEBUG("unsupported pixel format: %s\n",
15104 				  drm_get_format_name(mode_cmd->pixel_format));
15105 			return -EINVAL;
15106 		}
15107 		break;
15108 	case DRM_FORMAT_YUYV:
15109 	case DRM_FORMAT_UYVY:
15110 	case DRM_FORMAT_YVYU:
15111 	case DRM_FORMAT_VYUY:
15112 		if (INTEL_INFO(dev)->gen < 5) {
15113 			DRM_DEBUG("unsupported pixel format: %s\n",
15114 				  drm_get_format_name(mode_cmd->pixel_format));
15115 			return -EINVAL;
15116 		}
15117 		break;
15118 	default:
15119 		DRM_DEBUG("unsupported pixel format: %s\n",
15120 			  drm_get_format_name(mode_cmd->pixel_format));
15121 		return -EINVAL;
15122 	}
15123 
15124 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15125 	if (mode_cmd->offsets[0] != 0)
15126 		return -EINVAL;
15127 
15128 	aligned_height = intel_fb_align_height(dev, mode_cmd->height,
15129 					       mode_cmd->pixel_format,
15130 					       mode_cmd->modifier[0]);
15131 	/* FIXME drm helper for size checks (especially planar formats)? */
15132 	if (obj->base.size < aligned_height * mode_cmd->pitches[0])
15133 		return -EINVAL;
15134 
15135 	drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
15136 	intel_fb->obj = obj;
15137 
15138 	intel_fill_fb_info(dev_priv, &intel_fb->base);
15139 
15140 	ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
15141 	if (ret) {
15142 		DRM_ERROR("framebuffer init failed %d\n", ret);
15143 		return ret;
15144 	}
15145 
15146 	intel_fb->obj->framebuffer_references++;
15147 
15148 	return 0;
15149 }
15150 
15151 static struct drm_framebuffer *
15152 intel_user_framebuffer_create(struct drm_device *dev,
15153 			      struct drm_file *filp,
15154 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
15155 {
15156 	struct drm_framebuffer *fb;
15157 	struct drm_i915_gem_object *obj;
15158 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
15159 
15160 	obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
15161 	if (&obj->base == NULL)
15162 		return ERR_PTR(-ENOENT);
15163 
15164 	fb = intel_framebuffer_create(dev, &mode_cmd, obj);
15165 	if (IS_ERR(fb))
15166 		drm_gem_object_unreference_unlocked(&obj->base);
15167 
15168 	return fb;
15169 }
15170 
15171 #ifndef CONFIG_DRM_FBDEV_EMULATION
15172 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
15173 {
15174 }
15175 #endif
15176 
15177 static const struct drm_mode_config_funcs intel_mode_funcs = {
15178 	.fb_create = intel_user_framebuffer_create,
15179 	.output_poll_changed = intel_fbdev_output_poll_changed,
15180 	.atomic_check = intel_atomic_check,
15181 	.atomic_commit = intel_atomic_commit,
15182 	.atomic_state_alloc = intel_atomic_state_alloc,
15183 	.atomic_state_clear = intel_atomic_state_clear,
15184 };
15185 
15186 /**
15187  * intel_init_display_hooks - initialize the display modesetting hooks
15188  * @dev_priv: device private
15189  */
15190 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15191 {
15192 	if (INTEL_INFO(dev_priv)->gen >= 9) {
15193 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15194 		dev_priv->display.get_initial_plane_config =
15195 			skylake_get_initial_plane_config;
15196 		dev_priv->display.crtc_compute_clock =
15197 			haswell_crtc_compute_clock;
15198 		dev_priv->display.crtc_enable = haswell_crtc_enable;
15199 		dev_priv->display.crtc_disable = haswell_crtc_disable;
15200 	} else if (HAS_DDI(dev_priv)) {
15201 		dev_priv->display.get_pipe_config = haswell_get_pipe_config;
15202 		dev_priv->display.get_initial_plane_config =
15203 			ironlake_get_initial_plane_config;
15204 		dev_priv->display.crtc_compute_clock =
15205 			haswell_crtc_compute_clock;
15206 		dev_priv->display.crtc_enable = haswell_crtc_enable;
15207 		dev_priv->display.crtc_disable = haswell_crtc_disable;
15208 	} else if (HAS_PCH_SPLIT(dev_priv)) {
15209 		dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
15210 		dev_priv->display.get_initial_plane_config =
15211 			ironlake_get_initial_plane_config;
15212 		dev_priv->display.crtc_compute_clock =
15213 			ironlake_crtc_compute_clock;
15214 		dev_priv->display.crtc_enable = ironlake_crtc_enable;
15215 		dev_priv->display.crtc_disable = ironlake_crtc_disable;
15216 	} else if (IS_CHERRYVIEW(dev_priv)) {
15217 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15218 		dev_priv->display.get_initial_plane_config =
15219 			i9xx_get_initial_plane_config;
15220 		dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15221 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
15222 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15223 	} else if (IS_VALLEYVIEW(dev_priv)) {
15224 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15225 		dev_priv->display.get_initial_plane_config =
15226 			i9xx_get_initial_plane_config;
15227 		dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
15228 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
15229 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15230 	} else if (IS_G4X(dev_priv)) {
15231 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15232 		dev_priv->display.get_initial_plane_config =
15233 			i9xx_get_initial_plane_config;
15234 		dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15235 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15236 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15237 	} else if (IS_PINEVIEW(dev_priv)) {
15238 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15239 		dev_priv->display.get_initial_plane_config =
15240 			i9xx_get_initial_plane_config;
15241 		dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15242 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15243 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15244 	} else if (!IS_GEN2(dev_priv)) {
15245 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15246 		dev_priv->display.get_initial_plane_config =
15247 			i9xx_get_initial_plane_config;
15248 		dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
15249 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15250 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15251 	} else {
15252 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15253 		dev_priv->display.get_initial_plane_config =
15254 			i9xx_get_initial_plane_config;
15255 		dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15256 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
15257 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
15258 	}
15259 
15260 	/* Returns the core display clock speed */
15261 	if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
15262 		dev_priv->display.get_display_clock_speed =
15263 			skylake_get_display_clock_speed;
15264 	else if (IS_BROXTON(dev_priv))
15265 		dev_priv->display.get_display_clock_speed =
15266 			broxton_get_display_clock_speed;
15267 	else if (IS_BROADWELL(dev_priv))
15268 		dev_priv->display.get_display_clock_speed =
15269 			broadwell_get_display_clock_speed;
15270 	else if (IS_HASWELL(dev_priv))
15271 		dev_priv->display.get_display_clock_speed =
15272 			haswell_get_display_clock_speed;
15273 	else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15274 		dev_priv->display.get_display_clock_speed =
15275 			valleyview_get_display_clock_speed;
15276 	else if (IS_GEN5(dev_priv))
15277 		dev_priv->display.get_display_clock_speed =
15278 			ilk_get_display_clock_speed;
15279 	else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15280 		 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
15281 		dev_priv->display.get_display_clock_speed =
15282 			i945_get_display_clock_speed;
15283 	else if (IS_GM45(dev_priv))
15284 		dev_priv->display.get_display_clock_speed =
15285 			gm45_get_display_clock_speed;
15286 	else if (IS_CRESTLINE(dev_priv))
15287 		dev_priv->display.get_display_clock_speed =
15288 			i965gm_get_display_clock_speed;
15289 	else if (IS_PINEVIEW(dev_priv))
15290 		dev_priv->display.get_display_clock_speed =
15291 			pnv_get_display_clock_speed;
15292 	else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
15293 		dev_priv->display.get_display_clock_speed =
15294 			g33_get_display_clock_speed;
15295 	else if (IS_I915G(dev_priv))
15296 		dev_priv->display.get_display_clock_speed =
15297 			i915_get_display_clock_speed;
15298 	else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
15299 		dev_priv->display.get_display_clock_speed =
15300 			i9xx_misc_get_display_clock_speed;
15301 	else if (IS_I915GM(dev_priv))
15302 		dev_priv->display.get_display_clock_speed =
15303 			i915gm_get_display_clock_speed;
15304 	else if (IS_I865G(dev_priv))
15305 		dev_priv->display.get_display_clock_speed =
15306 			i865_get_display_clock_speed;
15307 	else if (IS_I85X(dev_priv))
15308 		dev_priv->display.get_display_clock_speed =
15309 			i85x_get_display_clock_speed;
15310 	else { /* 830 */
15311 		WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
15312 		dev_priv->display.get_display_clock_speed =
15313 			i830_get_display_clock_speed;
15314 	}
15315 
15316 	if (IS_GEN5(dev_priv)) {
15317 		dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
15318 	} else if (IS_GEN6(dev_priv)) {
15319 		dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15320 	} else if (IS_IVYBRIDGE(dev_priv)) {
15321 		/* FIXME: detect B0+ stepping and use auto training */
15322 		dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15323 	} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15324 		dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15325 	}
15326 
15327 	if (IS_BROADWELL(dev_priv)) {
15328 		dev_priv->display.modeset_commit_cdclk =
15329 			broadwell_modeset_commit_cdclk;
15330 		dev_priv->display.modeset_calc_cdclk =
15331 			broadwell_modeset_calc_cdclk;
15332 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15333 		dev_priv->display.modeset_commit_cdclk =
15334 			valleyview_modeset_commit_cdclk;
15335 		dev_priv->display.modeset_calc_cdclk =
15336 			valleyview_modeset_calc_cdclk;
15337 	} else if (IS_BROXTON(dev_priv)) {
15338 		dev_priv->display.modeset_commit_cdclk =
15339 			bxt_modeset_commit_cdclk;
15340 		dev_priv->display.modeset_calc_cdclk =
15341 			bxt_modeset_calc_cdclk;
15342 	} else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15343 		dev_priv->display.modeset_commit_cdclk =
15344 			skl_modeset_commit_cdclk;
15345 		dev_priv->display.modeset_calc_cdclk =
15346 			skl_modeset_calc_cdclk;
15347 	}
15348 
15349 	switch (INTEL_INFO(dev_priv)->gen) {
15350 	case 2:
15351 		dev_priv->display.queue_flip = intel_gen2_queue_flip;
15352 		break;
15353 
15354 	case 3:
15355 		dev_priv->display.queue_flip = intel_gen3_queue_flip;
15356 		break;
15357 
15358 	case 4:
15359 	case 5:
15360 		dev_priv->display.queue_flip = intel_gen4_queue_flip;
15361 		break;
15362 
15363 	case 6:
15364 		dev_priv->display.queue_flip = intel_gen6_queue_flip;
15365 		break;
15366 	case 7:
15367 	case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15368 		dev_priv->display.queue_flip = intel_gen7_queue_flip;
15369 		break;
15370 	case 9:
15371 		/* Drop through - unsupported since execlist only. */
15372 	default:
15373 		/* Default just returns -ENODEV to indicate unsupported */
15374 		dev_priv->display.queue_flip = intel_default_queue_flip;
15375 	}
15376 }
15377 
15378 /*
15379  * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15380  * resume, or other times.  This quirk makes sure that's the case for
15381  * affected systems.
15382  */
15383 static void quirk_pipea_force(struct drm_device *dev)
15384 {
15385 	struct drm_i915_private *dev_priv = dev->dev_private;
15386 
15387 	dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15388 	DRM_INFO("applying pipe a force quirk\n");
15389 }
15390 
15391 static void quirk_pipeb_force(struct drm_device *dev)
15392 {
15393 	struct drm_i915_private *dev_priv = dev->dev_private;
15394 
15395 	dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15396 	DRM_INFO("applying pipe b force quirk\n");
15397 }
15398 
15399 /*
15400  * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15401  */
15402 static void quirk_ssc_force_disable(struct drm_device *dev)
15403 {
15404 	struct drm_i915_private *dev_priv = dev->dev_private;
15405 	dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15406 	DRM_INFO("applying lvds SSC disable quirk\n");
15407 }
15408 
15409 /*
15410  * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15411  * brightness value
15412  */
15413 static void quirk_invert_brightness(struct drm_device *dev)
15414 {
15415 	struct drm_i915_private *dev_priv = dev->dev_private;
15416 	dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15417 	DRM_INFO("applying inverted panel brightness quirk\n");
15418 }
15419 
15420 /* Some VBT's incorrectly indicate no backlight is present */
15421 static void quirk_backlight_present(struct drm_device *dev)
15422 {
15423 	struct drm_i915_private *dev_priv = dev->dev_private;
15424 	dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15425 	DRM_INFO("applying backlight present quirk\n");
15426 }
15427 
15428 struct intel_quirk {
15429 	int device;
15430 	int subsystem_vendor;
15431 	int subsystem_device;
15432 	void (*hook)(struct drm_device *dev);
15433 };
15434 
15435 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15436 struct intel_dmi_quirk {
15437 	void (*hook)(struct drm_device *dev);
15438 	const struct dmi_system_id (*dmi_id_list)[];
15439 };
15440 
15441 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15442 {
15443 	DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15444 	return 1;
15445 }
15446 
15447 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15448 	{
15449 		.dmi_id_list = &(const struct dmi_system_id[]) {
15450 			{
15451 				.callback = intel_dmi_reverse_brightness,
15452 				.ident = "NCR Corporation",
15453 				.matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15454 					    DMI_MATCH(DMI_PRODUCT_NAME, ""),
15455 				},
15456 			},
15457 			{ }  /* terminating entry */
15458 		},
15459 		.hook = quirk_invert_brightness,
15460 	},
15461 };
15462 
15463 static struct intel_quirk intel_quirks[] = {
15464 	/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15465 	{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15466 
15467 	/* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15468 	{ 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15469 
15470 	/* 830 needs to leave pipe A & dpll A up */
15471 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15472 
15473 	/* 830 needs to leave pipe B & dpll B up */
15474 	{ 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15475 
15476 	/* Lenovo U160 cannot use SSC on LVDS */
15477 	{ 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15478 
15479 	/* Sony Vaio Y cannot use SSC on LVDS */
15480 	{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15481 
15482 	/* Acer Aspire 5734Z must invert backlight brightness */
15483 	{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15484 
15485 	/* Acer/eMachines G725 */
15486 	{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15487 
15488 	/* Acer/eMachines e725 */
15489 	{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15490 
15491 	/* Acer/Packard Bell NCL20 */
15492 	{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15493 
15494 	/* Acer Aspire 4736Z */
15495 	{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15496 
15497 	/* Acer Aspire 5336 */
15498 	{ 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15499 
15500 	/* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15501 	{ 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15502 
15503 	/* Acer C720 Chromebook (Core i3 4005U) */
15504 	{ 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15505 
15506 	/* Apple Macbook 2,1 (Core 2 T7400) */
15507 	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15508 
15509 	/* Apple Macbook 4,1 */
15510 	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15511 
15512 	/* Toshiba CB35 Chromebook (Celeron 2955U) */
15513 	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15514 
15515 	/* HP Chromebook 14 (Celeron 2955U) */
15516 	{ 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15517 
15518 	/* Dell Chromebook 11 */
15519 	{ 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15520 
15521 	/* Dell Chromebook 11 (2015 version) */
15522 	{ 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15523 };
15524 
15525 static void intel_init_quirks(struct drm_device *dev)
15526 {
15527 	struct pci_dev *d = dev->pdev;
15528 	int i;
15529 
15530 	for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15531 		struct intel_quirk *q = &intel_quirks[i];
15532 
15533 		if (d->device == q->device &&
15534 		    (d->subsystem_vendor == q->subsystem_vendor ||
15535 		     q->subsystem_vendor == PCI_ANY_ID) &&
15536 		    (d->subsystem_device == q->subsystem_device ||
15537 		     q->subsystem_device == PCI_ANY_ID))
15538 			q->hook(dev);
15539 	}
15540 	for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15541 		if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15542 			intel_dmi_quirks[i].hook(dev);
15543 	}
15544 }
15545 
15546 /* Disable the VGA plane that we never use */
15547 static void i915_disable_vga(struct drm_device *dev)
15548 {
15549 	struct drm_i915_private *dev_priv = dev->dev_private;
15550 	u8 sr1;
15551 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15552 
15553 	/* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15554 	vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15555 	outb(VGA_SR_INDEX, SR01);
15556 	sr1 = inb(VGA_SR_DATA);
15557 	outb(VGA_SR_DATA, sr1 | 1 << 5);
15558 	vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15559 	udelay(300);
15560 
15561 	I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15562 	POSTING_READ(vga_reg);
15563 }
15564 
15565 void intel_modeset_init_hw(struct drm_device *dev)
15566 {
15567 	struct drm_i915_private *dev_priv = dev->dev_private;
15568 
15569 	intel_update_cdclk(dev);
15570 
15571 	dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15572 
15573 	intel_init_clock_gating(dev);
15574 	intel_enable_gt_powersave(dev_priv);
15575 }
15576 
15577 /*
15578  * Calculate what we think the watermarks should be for the state we've read
15579  * out of the hardware and then immediately program those watermarks so that
15580  * we ensure the hardware settings match our internal state.
15581  *
15582  * We can calculate what we think WM's should be by creating a duplicate of the
15583  * current state (which was constructed during hardware readout) and running it
15584  * through the atomic check code to calculate new watermark values in the
15585  * state object.
15586  */
15587 static void sanitize_watermarks(struct drm_device *dev)
15588 {
15589 	struct drm_i915_private *dev_priv = to_i915(dev);
15590 	struct drm_atomic_state *state;
15591 	struct drm_crtc *crtc;
15592 	struct drm_crtc_state *cstate;
15593 	struct drm_modeset_acquire_ctx ctx;
15594 	int ret;
15595 	int i;
15596 
15597 	/* Only supported on platforms that use atomic watermark design */
15598 	if (!dev_priv->display.optimize_watermarks)
15599 		return;
15600 
15601 	/*
15602 	 * We need to hold connection_mutex before calling duplicate_state so
15603 	 * that the connector loop is protected.
15604 	 */
15605 	drm_modeset_acquire_init(&ctx, 0);
15606 retry:
15607 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
15608 	if (ret == -EDEADLK) {
15609 		drm_modeset_backoff(&ctx);
15610 		goto retry;
15611 	} else if (WARN_ON(ret)) {
15612 		goto fail;
15613 	}
15614 
15615 	state = drm_atomic_helper_duplicate_state(dev, &ctx);
15616 	if (WARN_ON(IS_ERR(state)))
15617 		goto fail;
15618 
15619 	/*
15620 	 * Hardware readout is the only time we don't want to calculate
15621 	 * intermediate watermarks (since we don't trust the current
15622 	 * watermarks).
15623 	 */
15624 	to_intel_atomic_state(state)->skip_intermediate_wm = true;
15625 
15626 	ret = intel_atomic_check(dev, state);
15627 	if (ret) {
15628 		/*
15629 		 * If we fail here, it means that the hardware appears to be
15630 		 * programmed in a way that shouldn't be possible, given our
15631 		 * understanding of watermark requirements.  This might mean a
15632 		 * mistake in the hardware readout code or a mistake in the
15633 		 * watermark calculations for a given platform.  Raise a WARN
15634 		 * so that this is noticeable.
15635 		 *
15636 		 * If this actually happens, we'll have to just leave the
15637 		 * BIOS-programmed watermarks untouched and hope for the best.
15638 		 */
15639 		WARN(true, "Could not determine valid watermarks for inherited state\n");
15640 		goto fail;
15641 	}
15642 
15643 	/* Write calculated watermark values back */
15644 	for_each_crtc_in_state(state, crtc, cstate, i) {
15645 		struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15646 
15647 		cs->wm.need_postvbl_update = true;
15648 		dev_priv->display.optimize_watermarks(cs);
15649 	}
15650 
15651 	drm_atomic_state_free(state);
15652 fail:
15653 	drm_modeset_drop_locks(&ctx);
15654 	drm_modeset_acquire_fini(&ctx);
15655 }
15656 
15657 void intel_modeset_init(struct drm_device *dev)
15658 {
15659 	struct drm_i915_private *dev_priv = to_i915(dev);
15660 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
15661 	int sprite, ret;
15662 	enum i915_pipe pipe;
15663 	struct intel_crtc *crtc;
15664 
15665 	drm_mode_config_init(dev);
15666 
15667 	dev->mode_config.min_width = 0;
15668 	dev->mode_config.min_height = 0;
15669 
15670 	dev->mode_config.preferred_depth = 24;
15671 	dev->mode_config.prefer_shadow = 1;
15672 
15673 	dev->mode_config.allow_fb_modifiers = true;
15674 
15675 	dev->mode_config.funcs = &intel_mode_funcs;
15676 
15677 	intel_init_quirks(dev);
15678 
15679 	intel_init_pm(dev);
15680 
15681 	if (INTEL_INFO(dev)->num_pipes == 0)
15682 		return;
15683 
15684 	/*
15685 	 * There may be no VBT; and if the BIOS enabled SSC we can
15686 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
15687 	 * BIOS isn't using it, don't assume it will work even if the VBT
15688 	 * indicates as much.
15689 	 */
15690 	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15691 		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15692 					    DREF_SSC1_ENABLE);
15693 
15694 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15695 			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15696 				     bios_lvds_use_ssc ? "en" : "dis",
15697 				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15698 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15699 		}
15700 	}
15701 
15702 	if (IS_GEN2(dev)) {
15703 		dev->mode_config.max_width = 2048;
15704 		dev->mode_config.max_height = 2048;
15705 	} else if (IS_GEN3(dev)) {
15706 		dev->mode_config.max_width = 4096;
15707 		dev->mode_config.max_height = 4096;
15708 	} else {
15709 		dev->mode_config.max_width = 8192;
15710 		dev->mode_config.max_height = 8192;
15711 	}
15712 
15713 	if (IS_845G(dev) || IS_I865G(dev)) {
15714 		dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15715 		dev->mode_config.cursor_height = 1023;
15716 	} else if (IS_GEN2(dev)) {
15717 		dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15718 		dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15719 	} else {
15720 		dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15721 		dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15722 	}
15723 
15724 	dev->mode_config.fb_base = ggtt->mappable_base;
15725 
15726 	DRM_DEBUG_KMS("%d display pipe%s available.\n",
15727 		      INTEL_INFO(dev)->num_pipes,
15728 		      INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15729 
15730 	for_each_pipe(dev_priv, pipe) {
15731 		intel_crtc_init(dev, pipe);
15732 		for_each_sprite(dev_priv, pipe, sprite) {
15733 			ret = intel_plane_init(dev, pipe, sprite);
15734 			if (ret)
15735 				DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15736 					      pipe_name(pipe), sprite_name(pipe, sprite), ret);
15737 		}
15738 	}
15739 
15740 	intel_update_czclk(dev_priv);
15741 	intel_update_cdclk(dev);
15742 
15743 	intel_shared_dpll_init(dev);
15744 
15745 	if (dev_priv->max_cdclk_freq == 0)
15746 		intel_update_max_cdclk(dev);
15747 
15748 	/* Just disable it once at startup */
15749 	i915_disable_vga(dev);
15750 	intel_setup_outputs(dev);
15751 
15752 	drm_modeset_lock_all(dev);
15753 	intel_modeset_setup_hw_state(dev);
15754 	drm_modeset_unlock_all(dev);
15755 
15756 	for_each_intel_crtc(dev, crtc) {
15757 		struct intel_initial_plane_config plane_config = {};
15758 
15759 		if (!crtc->active)
15760 			continue;
15761 
15762 		/*
15763 		 * Note that reserving the BIOS fb up front prevents us
15764 		 * from stuffing other stolen allocations like the ring
15765 		 * on top.  This prevents some ugliness at boot time, and
15766 		 * can even allow for smooth boot transitions if the BIOS
15767 		 * fb is large enough for the active pipe configuration.
15768 		 */
15769 		dev_priv->display.get_initial_plane_config(crtc,
15770 							   &plane_config);
15771 
15772 		/*
15773 		 * If the fb is shared between multiple heads, we'll
15774 		 * just get the first one.
15775 		 */
15776 		intel_find_initial_plane_obj(crtc, &plane_config);
15777 	}
15778 
15779 	/*
15780 	 * Make sure hardware watermarks really match the state we read out.
15781 	 * Note that we need to do this after reconstructing the BIOS fb's
15782 	 * since the watermark calculation done here will use pstate->fb.
15783 	 */
15784 	sanitize_watermarks(dev);
15785 }
15786 
15787 static void intel_enable_pipe_a(struct drm_device *dev)
15788 {
15789 	struct intel_connector *connector;
15790 	struct drm_connector *crt = NULL;
15791 	struct intel_load_detect_pipe load_detect_temp;
15792 	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15793 
15794 	/* We can't just switch on the pipe A, we need to set things up with a
15795 	 * proper mode and output configuration. As a gross hack, enable pipe A
15796 	 * by enabling the load detect pipe once. */
15797 	for_each_intel_connector(dev, connector) {
15798 		if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15799 			crt = &connector->base;
15800 			break;
15801 		}
15802 	}
15803 
15804 	if (!crt)
15805 		return;
15806 
15807 	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15808 		intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15809 }
15810 
15811 static bool
15812 intel_check_plane_mapping(struct intel_crtc *crtc)
15813 {
15814 	struct drm_device *dev = crtc->base.dev;
15815 	struct drm_i915_private *dev_priv = dev->dev_private;
15816 	u32 val;
15817 
15818 	if (INTEL_INFO(dev)->num_pipes == 1)
15819 		return true;
15820 
15821 	val = I915_READ(DSPCNTR(!crtc->plane));
15822 
15823 	if ((val & DISPLAY_PLANE_ENABLE) &&
15824 	    (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15825 		return false;
15826 
15827 	return true;
15828 }
15829 
15830 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15831 {
15832 	struct drm_device *dev = crtc->base.dev;
15833 	struct intel_encoder *encoder;
15834 
15835 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15836 		return true;
15837 
15838 	return false;
15839 }
15840 
15841 static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15842 {
15843 	struct drm_device *dev = encoder->base.dev;
15844 	struct intel_connector *connector;
15845 
15846 	for_each_connector_on_encoder(dev, &encoder->base, connector)
15847 		return true;
15848 
15849 	return false;
15850 }
15851 
15852 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15853 {
15854 	struct drm_device *dev = crtc->base.dev;
15855 	struct drm_i915_private *dev_priv = dev->dev_private;
15856 	enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15857 
15858 	/* Clear any frame start delays used for debugging left by the BIOS */
15859 	if (!transcoder_is_dsi(cpu_transcoder)) {
15860 		i915_reg_t reg = PIPECONF(cpu_transcoder);
15861 
15862 		I915_WRITE(reg,
15863 			   I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15864 	}
15865 
15866 	/* restore vblank interrupts to correct state */
15867 	drm_crtc_vblank_reset(&crtc->base);
15868 	if (crtc->active) {
15869 		struct intel_plane *plane;
15870 
15871 		drm_crtc_vblank_on(&crtc->base);
15872 
15873 		/* Disable everything but the primary plane */
15874 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
15875 			if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15876 				continue;
15877 
15878 			plane->disable_plane(&plane->base, &crtc->base);
15879 		}
15880 	}
15881 
15882 	/* We need to sanitize the plane -> pipe mapping first because this will
15883 	 * disable the crtc (and hence change the state) if it is wrong. Note
15884 	 * that gen4+ has a fixed plane -> pipe mapping.  */
15885 	if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15886 		bool plane;
15887 
15888 		DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15889 			      crtc->base.base.id, crtc->base.name);
15890 
15891 		/* Pipe has the wrong plane attached and the plane is active.
15892 		 * Temporarily change the plane mapping and disable everything
15893 		 * ...  */
15894 		plane = crtc->plane;
15895 		to_intel_plane_state(crtc->base.primary->state)->visible = true;
15896 		crtc->plane = !plane;
15897 		intel_crtc_disable_noatomic(&crtc->base);
15898 		crtc->plane = plane;
15899 	}
15900 
15901 	if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15902 	    crtc->pipe == PIPE_A && !crtc->active) {
15903 		/* BIOS forgot to enable pipe A, this mostly happens after
15904 		 * resume. Force-enable the pipe to fix this, the update_dpms
15905 		 * call below we restore the pipe to the right state, but leave
15906 		 * the required bits on. */
15907 		intel_enable_pipe_a(dev);
15908 	}
15909 
15910 	/* Adjust the state of the output pipe according to whether we
15911 	 * have active connectors/encoders. */
15912 	if (crtc->active && !intel_crtc_has_encoders(crtc))
15913 		intel_crtc_disable_noatomic(&crtc->base);
15914 
15915 	if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15916 		/*
15917 		 * We start out with underrun reporting disabled to avoid races.
15918 		 * For correct bookkeeping mark this on active crtcs.
15919 		 *
15920 		 * Also on gmch platforms we dont have any hardware bits to
15921 		 * disable the underrun reporting. Which means we need to start
15922 		 * out with underrun reporting disabled also on inactive pipes,
15923 		 * since otherwise we'll complain about the garbage we read when
15924 		 * e.g. coming up after runtime pm.
15925 		 *
15926 		 * No protection against concurrent access is required - at
15927 		 * worst a fifo underrun happens which also sets this to false.
15928 		 */
15929 		crtc->cpu_fifo_underrun_disabled = true;
15930 		crtc->pch_fifo_underrun_disabled = true;
15931 	}
15932 }
15933 
15934 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15935 {
15936 	struct intel_connector *connector;
15937 	struct drm_device *dev = encoder->base.dev;
15938 
15939 	/* We need to check both for a crtc link (meaning that the
15940 	 * encoder is active and trying to read from a pipe) and the
15941 	 * pipe itself being active. */
15942 	bool has_active_crtc = encoder->base.crtc &&
15943 		to_intel_crtc(encoder->base.crtc)->active;
15944 
15945 	if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15946 		DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15947 			      encoder->base.base.id,
15948 			      encoder->base.name);
15949 
15950 		/* Connector is active, but has no active pipe. This is
15951 		 * fallout from our resume register restoring. Disable
15952 		 * the encoder manually again. */
15953 		if (encoder->base.crtc) {
15954 			DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15955 				      encoder->base.base.id,
15956 				      encoder->base.name);
15957 			encoder->disable(encoder);
15958 			if (encoder->post_disable)
15959 				encoder->post_disable(encoder);
15960 		}
15961 		encoder->base.crtc = NULL;
15962 
15963 		/* Inconsistent output/port/pipe state happens presumably due to
15964 		 * a bug in one of the get_hw_state functions. Or someplace else
15965 		 * in our code, like the register restore mess on resume. Clamp
15966 		 * things to off as a safer default. */
15967 		for_each_intel_connector(dev, connector) {
15968 			if (connector->encoder != encoder)
15969 				continue;
15970 			connector->base.dpms = DRM_MODE_DPMS_OFF;
15971 			connector->base.encoder = NULL;
15972 		}
15973 	}
15974 	/* Enabled encoders without active connectors will be fixed in
15975 	 * the crtc fixup. */
15976 }
15977 
15978 void i915_redisable_vga_power_on(struct drm_device *dev)
15979 {
15980 	struct drm_i915_private *dev_priv = dev->dev_private;
15981 	i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15982 
15983 	if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15984 		DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15985 		i915_disable_vga(dev);
15986 	}
15987 }
15988 
15989 void i915_redisable_vga(struct drm_device *dev)
15990 {
15991 	struct drm_i915_private *dev_priv = dev->dev_private;
15992 
15993 	/* This function can be called both from intel_modeset_setup_hw_state or
15994 	 * at a very early point in our resume sequence, where the power well
15995 	 * structures are not yet restored. Since this function is at a very
15996 	 * paranoid "someone might have enabled VGA while we were not looking"
15997 	 * level, just check if the power well is enabled instead of trying to
15998 	 * follow the "don't touch the power well if we don't need it" policy
15999 	 * the rest of the driver uses. */
16000 	if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
16001 		return;
16002 
16003 	i915_redisable_vga_power_on(dev);
16004 
16005 	intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
16006 }
16007 
16008 static bool primary_get_hw_state(struct intel_plane *plane)
16009 {
16010 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
16011 
16012 	return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
16013 }
16014 
16015 /* FIXME read out full plane state for all planes */
16016 static void readout_plane_state(struct intel_crtc *crtc)
16017 {
16018 	struct drm_plane *primary = crtc->base.primary;
16019 	struct intel_plane_state *plane_state =
16020 		to_intel_plane_state(primary->state);
16021 
16022 	plane_state->visible = crtc->active &&
16023 		primary_get_hw_state(to_intel_plane(primary));
16024 
16025 	if (plane_state->visible)
16026 		crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
16027 }
16028 
16029 static void intel_modeset_readout_hw_state(struct drm_device *dev)
16030 {
16031 	struct drm_i915_private *dev_priv = dev->dev_private;
16032 	enum i915_pipe pipe;
16033 	struct intel_crtc *crtc;
16034 	struct intel_encoder *encoder;
16035 	struct intel_connector *connector;
16036 	int i;
16037 
16038 	dev_priv->active_crtcs = 0;
16039 
16040 	for_each_intel_crtc(dev, crtc) {
16041 		struct intel_crtc_state *crtc_state = crtc->config;
16042 		int pixclk = 0;
16043 
16044 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
16045 		memset(crtc_state, 0, sizeof(*crtc_state));
16046 		crtc_state->base.crtc = &crtc->base;
16047 
16048 		crtc_state->base.active = crtc_state->base.enable =
16049 			dev_priv->display.get_pipe_config(crtc, crtc_state);
16050 
16051 		crtc->base.enabled = crtc_state->base.enable;
16052 		crtc->active = crtc_state->base.active;
16053 
16054 		if (crtc_state->base.active) {
16055 			dev_priv->active_crtcs |= 1 << crtc->pipe;
16056 
16057 			if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
16058 				pixclk = ilk_pipe_pixel_rate(crtc_state);
16059 			else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
16060 				pixclk = crtc_state->base.adjusted_mode.crtc_clock;
16061 			else
16062 				WARN_ON(dev_priv->display.modeset_calc_cdclk);
16063 
16064 			/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16065 			if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16066 				pixclk = DIV_ROUND_UP(pixclk * 100, 95);
16067 		}
16068 
16069 		dev_priv->min_pixclk[crtc->pipe] = pixclk;
16070 
16071 		readout_plane_state(crtc);
16072 
16073 		DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16074 			      crtc->base.base.id, crtc->base.name,
16075 			      crtc->active ? "enabled" : "disabled");
16076 	}
16077 
16078 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16079 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16080 
16081 		pll->on = pll->funcs.get_hw_state(dev_priv, pll,
16082 						  &pll->config.hw_state);
16083 		pll->config.crtc_mask = 0;
16084 		for_each_intel_crtc(dev, crtc) {
16085 			if (crtc->active && crtc->config->shared_dpll == pll)
16086 				pll->config.crtc_mask |= 1 << crtc->pipe;
16087 		}
16088 		pll->active_mask = pll->config.crtc_mask;
16089 
16090 		DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
16091 			      pll->name, pll->config.crtc_mask, pll->on);
16092 	}
16093 
16094 	for_each_intel_encoder(dev, encoder) {
16095 		pipe = 0;
16096 
16097 		if (encoder->get_hw_state(encoder, &pipe)) {
16098 			crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16099 			encoder->base.crtc = &crtc->base;
16100 			encoder->get_config(encoder, crtc->config);
16101 		} else {
16102 			encoder->base.crtc = NULL;
16103 		}
16104 
16105 		DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
16106 			      encoder->base.base.id,
16107 			      encoder->base.name,
16108 			      encoder->base.crtc ? "enabled" : "disabled",
16109 			      pipe_name(pipe));
16110 	}
16111 
16112 	for_each_intel_connector(dev, connector) {
16113 		if (connector->get_hw_state(connector)) {
16114 			connector->base.dpms = DRM_MODE_DPMS_ON;
16115 
16116 			encoder = connector->encoder;
16117 			connector->base.encoder = &encoder->base;
16118 
16119 			if (encoder->base.crtc &&
16120 			    encoder->base.crtc->state->active) {
16121 				/*
16122 				 * This has to be done during hardware readout
16123 				 * because anything calling .crtc_disable may
16124 				 * rely on the connector_mask being accurate.
16125 				 */
16126 				encoder->base.crtc->state->connector_mask |=
16127 					1 << drm_connector_index(&connector->base);
16128 				encoder->base.crtc->state->encoder_mask |=
16129 					1 << drm_encoder_index(&encoder->base);
16130 			}
16131 
16132 		} else {
16133 			connector->base.dpms = DRM_MODE_DPMS_OFF;
16134 			connector->base.encoder = NULL;
16135 		}
16136 		DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16137 			      connector->base.base.id,
16138 			      connector->base.name,
16139 			      connector->base.encoder ? "enabled" : "disabled");
16140 	}
16141 
16142 	for_each_intel_crtc(dev, crtc) {
16143 		crtc->base.hwmode = crtc->config->base.adjusted_mode;
16144 
16145 		memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16146 		if (crtc->base.state->active) {
16147 			intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
16148 			intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
16149 			WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16150 
16151 			/*
16152 			 * The initial mode needs to be set in order to keep
16153 			 * the atomic core happy. It wants a valid mode if the
16154 			 * crtc's enabled, so we do the above call.
16155 			 *
16156 			 * At this point some state updated by the connectors
16157 			 * in their ->detect() callback has not run yet, so
16158 			 * no recalculation can be done yet.
16159 			 *
16160 			 * Even if we could do a recalculation and modeset
16161 			 * right now it would cause a double modeset if
16162 			 * fbdev or userspace chooses a different initial mode.
16163 			 *
16164 			 * If that happens, someone indicated they wanted a
16165 			 * mode change, which means it's safe to do a full
16166 			 * recalculation.
16167 			 */
16168 			crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
16169 
16170 			drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16171 			update_scanline_offset(crtc);
16172 		}
16173 
16174 		intel_pipe_config_sanity_check(dev_priv, crtc->config);
16175 	}
16176 }
16177 
16178 /* Scan out the current hw modeset state,
16179  * and sanitizes it to the current state
16180  */
16181 static void
16182 intel_modeset_setup_hw_state(struct drm_device *dev)
16183 {
16184 	struct drm_i915_private *dev_priv = dev->dev_private;
16185 	enum i915_pipe pipe;
16186 	struct intel_crtc *crtc;
16187 	struct intel_encoder *encoder;
16188 	int i;
16189 
16190 	intel_modeset_readout_hw_state(dev);
16191 
16192 	/* HW state is read out, now we need to sanitize this mess. */
16193 	for_each_intel_encoder(dev, encoder) {
16194 		intel_sanitize_encoder(encoder);
16195 	}
16196 
16197 	for_each_pipe(dev_priv, pipe) {
16198 		crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16199 		intel_sanitize_crtc(crtc);
16200 		intel_dump_pipe_config(crtc, crtc->config,
16201 				       "[setup_hw_state]");
16202 	}
16203 
16204 	intel_modeset_update_connector_atomic_state(dev);
16205 
16206 	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16207 		struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16208 
16209 		if (!pll->on || pll->active_mask)
16210 			continue;
16211 
16212 		DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
16213 
16214 		pll->funcs.disable(dev_priv, pll);
16215 		pll->on = false;
16216 	}
16217 
16218 	if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
16219 		vlv_wm_get_hw_state(dev);
16220 	else if (IS_GEN9(dev))
16221 		skl_wm_get_hw_state(dev);
16222 	else if (HAS_PCH_SPLIT(dev))
16223 		ilk_wm_get_hw_state(dev);
16224 
16225 	for_each_intel_crtc(dev, crtc) {
16226 		unsigned long put_domains;
16227 
16228 		put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
16229 		if (WARN_ON(put_domains))
16230 			modeset_put_power_domains(dev_priv, put_domains);
16231 	}
16232 	intel_display_set_init_power(dev_priv, false);
16233 
16234 	intel_fbc_init_pipe_state(dev_priv);
16235 }
16236 
16237 void intel_display_resume(struct drm_device *dev)
16238 {
16239 	struct drm_i915_private *dev_priv = to_i915(dev);
16240 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16241 	struct drm_modeset_acquire_ctx ctx;
16242 	int ret;
16243 	bool setup = false;
16244 
16245 	dev_priv->modeset_restore_state = NULL;
16246 
16247 	/*
16248 	 * This is a cludge because with real atomic modeset mode_config.mutex
16249 	 * won't be taken. Unfortunately some probed state like
16250 	 * audio_codec_enable is still protected by mode_config.mutex, so lock
16251 	 * it here for now.
16252 	 */
16253 	mutex_lock(&dev->mode_config.mutex);
16254 	drm_modeset_acquire_init(&ctx, 0);
16255 
16256 retry:
16257 	ret = drm_modeset_lock_all_ctx(dev, &ctx);
16258 
16259 	if (ret == 0 && !setup) {
16260 		setup = true;
16261 
16262 		intel_modeset_setup_hw_state(dev);
16263 		i915_redisable_vga(dev);
16264 	}
16265 
16266 	if (ret == 0 && state) {
16267 		struct drm_crtc_state *crtc_state;
16268 		struct drm_crtc *crtc;
16269 		int i;
16270 
16271 		state->acquire_ctx = &ctx;
16272 
16273 		/* ignore any reset values/BIOS leftovers in the WM registers */
16274 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
16275 
16276 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
16277 			/*
16278 			 * Force recalculation even if we restore
16279 			 * current state. With fast modeset this may not result
16280 			 * in a modeset when the state is compatible.
16281 			 */
16282 			crtc_state->mode_changed = true;
16283 		}
16284 
16285 		ret = drm_atomic_commit(state);
16286 	}
16287 
16288 	if (ret == -EDEADLK) {
16289 		drm_modeset_backoff(&ctx);
16290 		goto retry;
16291 	}
16292 
16293 	drm_modeset_drop_locks(&ctx);
16294 	drm_modeset_acquire_fini(&ctx);
16295 	mutex_unlock(&dev->mode_config.mutex);
16296 
16297 	if (ret) {
16298 		DRM_ERROR("Restoring old state failed with %i\n", ret);
16299 		drm_atomic_state_free(state);
16300 	}
16301 }
16302 
16303 void intel_modeset_gem_init(struct drm_device *dev)
16304 {
16305 	struct drm_i915_private *dev_priv = to_i915(dev);
16306 	struct drm_crtc *c;
16307 	struct drm_i915_gem_object *obj;
16308 	int ret;
16309 
16310 	intel_init_gt_powersave(dev_priv);
16311 
16312 	intel_modeset_init_hw(dev);
16313 
16314 	intel_setup_overlay(dev_priv);
16315 
16316 	/*
16317 	 * Make sure any fbs we allocated at startup are properly
16318 	 * pinned & fenced.  When we do the allocation it's too early
16319 	 * for this.
16320 	 */
16321 	for_each_crtc(dev, c) {
16322 		obj = intel_fb_obj(c->primary->fb);
16323 		if (obj == NULL)
16324 			continue;
16325 
16326 		mutex_lock(&dev->struct_mutex);
16327 		ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16328 						 c->primary->state->rotation);
16329 		mutex_unlock(&dev->struct_mutex);
16330 		if (ret) {
16331 			DRM_ERROR("failed to pin boot fb on pipe %d\n",
16332 				  to_intel_crtc(c)->pipe);
16333 			drm_framebuffer_unreference(c->primary->fb);
16334 			c->primary->fb = NULL;
16335 			c->primary->crtc = c->primary->state->crtc = NULL;
16336 			update_state_fb(c->primary);
16337 			c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16338 		}
16339 	}
16340 }
16341 
16342 int intel_connector_register(struct drm_connector *connector)
16343 {
16344 	struct intel_connector *intel_connector = to_intel_connector(connector);
16345 	int ret;
16346 
16347 	ret = intel_backlight_device_register(intel_connector);
16348 	if (ret)
16349 		goto err;
16350 
16351 	return 0;
16352 
16353 err:
16354 	return ret;
16355 }
16356 
16357 void intel_connector_unregister(struct drm_connector *connector)
16358 {
16359 	struct intel_connector *intel_connector = to_intel_connector(connector);
16360 
16361 	intel_backlight_device_unregister(intel_connector);
16362 	intel_panel_destroy_backlight(connector);
16363 }
16364 
16365 void intel_modeset_cleanup(struct drm_device *dev)
16366 {
16367 	struct drm_i915_private *dev_priv = dev->dev_private;
16368 
16369 	intel_disable_gt_powersave(dev_priv);
16370 
16371 	/*
16372 	 * Interrupts and polling as the first thing to avoid creating havoc.
16373 	 * Too much stuff here (turning of connectors, ...) would
16374 	 * experience fancy races otherwise.
16375 	 */
16376 	intel_irq_uninstall(dev_priv);
16377 
16378 	/*
16379 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
16380 	 * poll handlers. Hence disable polling after hpd handling is shut down.
16381 	 */
16382 	drm_kms_helper_poll_fini(dev);
16383 
16384 	intel_unregister_dsm_handler();
16385 
16386 	intel_fbc_global_disable(dev_priv);
16387 
16388 	/* flush any delayed tasks or pending work */
16389 	flush_scheduled_work();
16390 
16391 	drm_mode_config_cleanup(dev);
16392 
16393 	intel_cleanup_overlay(dev_priv);
16394 
16395 	intel_cleanup_gt_powersave(dev_priv);
16396 
16397 	intel_teardown_gmbus(dev);
16398 }
16399 
16400 void intel_connector_attach_encoder(struct intel_connector *connector,
16401 				    struct intel_encoder *encoder)
16402 {
16403 	connector->encoder = encoder;
16404 	drm_mode_connector_attach_encoder(&connector->base,
16405 					  &encoder->base);
16406 }
16407 
16408 /*
16409  * set vga decode state - true == enable VGA decode
16410  */
16411 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16412 {
16413 	struct drm_i915_private *dev_priv = dev->dev_private;
16414 	unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16415 	u16 gmch_ctrl;
16416 
16417 	if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16418 		DRM_ERROR("failed to read control word\n");
16419 		return -EIO;
16420 	}
16421 
16422 	if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16423 		return 0;
16424 
16425 	if (state)
16426 		gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16427 	else
16428 		gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16429 
16430 	if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16431 		DRM_ERROR("failed to write control word\n");
16432 		return -EIO;
16433 	}
16434 
16435 	return 0;
16436 }
16437 
16438 struct intel_display_error_state {
16439 
16440 	u32 power_well_driver;
16441 
16442 	int num_transcoders;
16443 
16444 	struct intel_cursor_error_state {
16445 		u32 control;
16446 		u32 position;
16447 		u32 base;
16448 		u32 size;
16449 	} cursor[I915_MAX_PIPES];
16450 
16451 	struct intel_pipe_error_state {
16452 		bool power_domain_on;
16453 		u32 source;
16454 		u32 stat;
16455 	} pipe[I915_MAX_PIPES];
16456 
16457 	struct intel_plane_error_state {
16458 		u32 control;
16459 		u32 stride;
16460 		u32 size;
16461 		u32 pos;
16462 		u32 addr;
16463 		u32 surface;
16464 		u32 tile_offset;
16465 	} plane[I915_MAX_PIPES];
16466 
16467 	struct intel_transcoder_error_state {
16468 		bool power_domain_on;
16469 		enum transcoder cpu_transcoder;
16470 
16471 		u32 conf;
16472 
16473 		u32 htotal;
16474 		u32 hblank;
16475 		u32 hsync;
16476 		u32 vtotal;
16477 		u32 vblank;
16478 		u32 vsync;
16479 	} transcoder[4];
16480 };
16481 
16482 struct intel_display_error_state *
16483 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16484 {
16485 	struct intel_display_error_state *error;
16486 	int transcoders[] = {
16487 		TRANSCODER_A,
16488 		TRANSCODER_B,
16489 		TRANSCODER_C,
16490 		TRANSCODER_EDP,
16491 	};
16492 	int i;
16493 
16494 	if (INTEL_INFO(dev_priv)->num_pipes == 0)
16495 		return NULL;
16496 
16497 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
16498 	if (error == NULL)
16499 		return NULL;
16500 
16501 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16502 		error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16503 
16504 	for_each_pipe(dev_priv, i) {
16505 		error->pipe[i].power_domain_on =
16506 			__intel_display_power_is_enabled(dev_priv,
16507 							 POWER_DOMAIN_PIPE(i));
16508 		if (!error->pipe[i].power_domain_on)
16509 			continue;
16510 
16511 		error->cursor[i].control = I915_READ(CURCNTR(i));
16512 		error->cursor[i].position = I915_READ(CURPOS(i));
16513 		error->cursor[i].base = I915_READ(CURBASE(i));
16514 
16515 		error->plane[i].control = I915_READ(DSPCNTR(i));
16516 		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16517 		if (INTEL_GEN(dev_priv) <= 3) {
16518 			error->plane[i].size = I915_READ(DSPSIZE(i));
16519 			error->plane[i].pos = I915_READ(DSPPOS(i));
16520 		}
16521 		if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16522 			error->plane[i].addr = I915_READ(DSPADDR(i));
16523 		if (INTEL_GEN(dev_priv) >= 4) {
16524 			error->plane[i].surface = I915_READ(DSPSURF(i));
16525 			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16526 		}
16527 
16528 		error->pipe[i].source = I915_READ(PIPESRC(i));
16529 
16530 		if (HAS_GMCH_DISPLAY(dev_priv))
16531 			error->pipe[i].stat = I915_READ(PIPESTAT(i));
16532 	}
16533 
16534 	/* Note: this does not include DSI transcoders. */
16535 	error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16536 	if (HAS_DDI(dev_priv))
16537 		error->num_transcoders++; /* Account for eDP. */
16538 
16539 	for (i = 0; i < error->num_transcoders; i++) {
16540 		enum transcoder cpu_transcoder = transcoders[i];
16541 
16542 		error->transcoder[i].power_domain_on =
16543 			__intel_display_power_is_enabled(dev_priv,
16544 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16545 		if (!error->transcoder[i].power_domain_on)
16546 			continue;
16547 
16548 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
16549 
16550 		error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16551 		error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16552 		error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16553 		error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16554 		error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16555 		error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16556 		error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16557 	}
16558 
16559 	return error;
16560 }
16561 
16562 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16563 
16564 void
16565 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16566 				struct drm_device *dev,
16567 				struct intel_display_error_state *error)
16568 {
16569 	struct drm_i915_private *dev_priv = dev->dev_private;
16570 	int i;
16571 
16572 	if (!error)
16573 		return;
16574 
16575 	err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16576 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16577 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
16578 			   error->power_well_driver);
16579 	for_each_pipe(dev_priv, i) {
16580 		err_printf(m, "Pipe [%d]:\n", i);
16581 		err_printf(m, "  Power: %s\n",
16582 			   onoff(error->pipe[i].power_domain_on));
16583 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
16584 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
16585 
16586 		err_printf(m, "Plane [%d]:\n", i);
16587 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
16588 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
16589 		if (INTEL_INFO(dev)->gen <= 3) {
16590 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
16591 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
16592 		}
16593 		if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16594 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
16595 		if (INTEL_INFO(dev)->gen >= 4) {
16596 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
16597 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
16598 		}
16599 
16600 		err_printf(m, "Cursor [%d]:\n", i);
16601 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
16602 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
16603 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
16604 	}
16605 
16606 	for (i = 0; i < error->num_transcoders; i++) {
16607 		err_printf(m, "CPU transcoder: %s\n",
16608 			   transcoder_name(error->transcoder[i].cpu_transcoder));
16609 		err_printf(m, "  Power: %s\n",
16610 			   onoff(error->transcoder[i].power_domain_on));
16611 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
16612 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
16613 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
16614 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
16615 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
16616 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
16617 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
16618 	}
16619 }
16620