1 /*
2  * Copyright © 2006-2007 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *	Eric Anholt <eric@anholt.net>
25  */
26 
27 #include <acpi/video.h>
28 #include <linux/i2c.h>
29 #include <linux/input.h>
30 #include <linux/intel-iommu.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/dma-resv.h>
34 #include <linux/slab.h>
35 
36 #include <drm/drm_atomic.h>
37 #include <drm/drm_atomic_helper.h>
38 #include <drm/drm_atomic_uapi.h>
39 #include <drm/drm_damage_helper.h>
40 #include <drm/drm_dp_helper.h>
41 #include <drm/drm_edid.h>
42 #include <drm/drm_fourcc.h>
43 #include <drm/drm_plane_helper.h>
44 #include <drm/drm_probe_helper.h>
45 #include <drm/drm_rect.h>
46 
47 #include "display/intel_audio.h"
48 #include "display/intel_crt.h"
49 #include "display/intel_ddi.h"
50 #include "display/intel_display_debugfs.h"
51 #include "display/intel_dp.h"
52 #include "display/intel_dp_mst.h"
53 #include "display/intel_dpll.h"
54 #include "display/intel_dpll_mgr.h"
55 #include "display/intel_dsi.h"
56 #include "display/intel_dvo.h"
57 #include "display/intel_fb.h"
58 #include "display/intel_gmbus.h"
59 #include "display/intel_hdmi.h"
60 #include "display/intel_lvds.h"
61 #include "display/intel_sdvo.h"
62 #include "display/intel_tv.h"
63 #include "display/intel_vdsc.h"
64 #include "display/intel_vrr.h"
65 
66 #include "gem/i915_gem_object.h"
67 
68 #include "gt/intel_rps.h"
69 
70 #include "g4x_dp.h"
71 #include "g4x_hdmi.h"
72 #include "i915_drv.h"
73 #include "intel_acpi.h"
74 #include "intel_atomic.h"
75 #include "intel_atomic_plane.h"
76 #include "intel_bw.h"
77 #include "intel_cdclk.h"
78 #include "intel_color.h"
79 #include "intel_crtc.h"
80 #include "intel_csr.h"
81 #include "intel_display_types.h"
82 #include "intel_dp_link_training.h"
83 #include "intel_fbc.h"
84 #include "intel_fdi.h"
85 #include "intel_fbdev.h"
86 #include "intel_fifo_underrun.h"
87 #include "intel_frontbuffer.h"
88 #include "intel_hdcp.h"
89 #include "intel_hotplug.h"
90 #include "intel_overlay.h"
91 #include "intel_pipe_crc.h"
92 #include "intel_pm.h"
93 #include "intel_pps.h"
94 #include "intel_psr.h"
95 #include "intel_quirks.h"
96 #include "intel_sideband.h"
97 #include "intel_sprite.h"
98 #include "intel_tc.h"
99 #include "intel_vga.h"
100 #include "i9xx_plane.h"
101 #include "skl_scaler.h"
102 #include "skl_universal_plane.h"
103 
104 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
105 				struct intel_crtc_state *pipe_config);
106 static void ilk_pch_clock_get(struct intel_crtc *crtc,
107 			      struct intel_crtc_state *pipe_config);
108 
109 static int intel_framebuffer_init(struct intel_framebuffer *ifb,
110 				  struct drm_i915_gem_object *obj,
111 				  struct drm_mode_fb_cmd2 *mode_cmd);
112 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
113 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
114 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
115 					 const struct intel_link_m_n *m_n,
116 					 const struct intel_link_m_n *m2_n2);
117 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
118 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
119 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
120 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
121 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
122 static void intel_modeset_setup_hw_state(struct drm_device *dev,
123 					 struct drm_modeset_acquire_ctx *ctx);
124 
125 /* returns HPLL frequency in kHz */
vlv_get_hpll_vco(struct drm_i915_private * dev_priv)126 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
127 {
128 	int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
129 
130 	/* Obtain SKU information */
131 	hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
132 		CCK_FUSE_HPLL_FREQ_MASK;
133 
134 	return vco_freq[hpll_freq] * 1000;
135 }
136 
vlv_get_cck_clock(struct drm_i915_private * dev_priv,const char * name,u32 reg,int ref_freq)137 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
138 		      const char *name, u32 reg, int ref_freq)
139 {
140 	u32 val;
141 	int divider;
142 
143 	val = vlv_cck_read(dev_priv, reg);
144 	divider = val & CCK_FREQUENCY_VALUES;
145 
146 	drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
147 		 (divider << CCK_FREQUENCY_STATUS_SHIFT),
148 		 "%s change in progress\n", name);
149 
150 	return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
151 }
152 
vlv_get_cck_clock_hpll(struct drm_i915_private * dev_priv,const char * name,u32 reg)153 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
154 			   const char *name, u32 reg)
155 {
156 	int hpll;
157 
158 	vlv_cck_get(dev_priv);
159 
160 	if (dev_priv->hpll_freq == 0)
161 		dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
162 
163 	hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
164 
165 	vlv_cck_put(dev_priv);
166 
167 	return hpll;
168 }
169 
intel_update_czclk(struct drm_i915_private * dev_priv)170 static void intel_update_czclk(struct drm_i915_private *dev_priv)
171 {
172 	if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
173 		return;
174 
175 	dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
176 						      CCK_CZ_CLOCK_CONTROL);
177 
178 	drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
179 		dev_priv->czclk_freq);
180 }
181 
182 /* WA Display #0827: Gen9:all */
183 static void
skl_wa_827(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)184 skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
185 {
186 	if (enable)
187 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
188 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
189 	else
190 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
191 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
192 }
193 
194 /* Wa_2006604312:icl,ehl */
195 static void
icl_wa_scalerclkgating(struct drm_i915_private * dev_priv,enum pipe pipe,bool enable)196 icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
197 		       bool enable)
198 {
199 	if (enable)
200 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
201 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
202 	else
203 		intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
204 		               intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
205 }
206 
207 static bool
is_trans_port_sync_slave(const struct intel_crtc_state * crtc_state)208 is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
209 {
210 	return crtc_state->master_transcoder != INVALID_TRANSCODER;
211 }
212 
213 static bool
is_trans_port_sync_master(const struct intel_crtc_state * crtc_state)214 is_trans_port_sync_master(const struct intel_crtc_state *crtc_state)
215 {
216 	return crtc_state->sync_mode_slaves_mask != 0;
217 }
218 
219 bool
is_trans_port_sync_mode(const struct intel_crtc_state * crtc_state)220 is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state)
221 {
222 	return is_trans_port_sync_master(crtc_state) ||
223 		is_trans_port_sync_slave(crtc_state);
224 }
225 
pipe_scanline_is_moving(struct drm_i915_private * dev_priv,enum pipe pipe)226 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
227 				    enum pipe pipe)
228 {
229 	i915_reg_t reg = PIPEDSL(pipe);
230 	u32 line1, line2;
231 	u32 line_mask;
232 
233 	if (IS_DISPLAY_VER(dev_priv, 2))
234 		line_mask = DSL_LINEMASK_GEN2;
235 	else
236 		line_mask = DSL_LINEMASK_GEN3;
237 
238 	line1 = intel_de_read(dev_priv, reg) & line_mask;
239 	msleep(5);
240 	line2 = intel_de_read(dev_priv, reg) & line_mask;
241 
242 	return line1 != line2;
243 }
244 
wait_for_pipe_scanline_moving(struct intel_crtc * crtc,bool state)245 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
246 {
247 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
248 	enum pipe pipe = crtc->pipe;
249 
250 	/* Wait for the display line to settle/start moving */
251 	if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
252 		drm_err(&dev_priv->drm,
253 			"pipe %c scanline %s wait timed out\n",
254 			pipe_name(pipe), onoff(state));
255 }
256 
intel_wait_for_pipe_scanline_stopped(struct intel_crtc * crtc)257 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
258 {
259 	wait_for_pipe_scanline_moving(crtc, false);
260 }
261 
intel_wait_for_pipe_scanline_moving(struct intel_crtc * crtc)262 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
263 {
264 	wait_for_pipe_scanline_moving(crtc, true);
265 }
266 
267 static void
intel_wait_for_pipe_off(const struct intel_crtc_state * old_crtc_state)268 intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
269 {
270 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
271 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
272 
273 	if (DISPLAY_VER(dev_priv) >= 4) {
274 		enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
275 		i915_reg_t reg = PIPECONF(cpu_transcoder);
276 
277 		/* Wait for the Pipe State to go off */
278 		if (intel_de_wait_for_clear(dev_priv, reg,
279 					    I965_PIPECONF_ACTIVE, 100))
280 			drm_WARN(&dev_priv->drm, 1,
281 				 "pipe_off wait timed out\n");
282 	} else {
283 		intel_wait_for_pipe_scanline_stopped(crtc);
284 	}
285 }
286 
287 /* Only for pre-ILK configs */
assert_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)288 void assert_pll(struct drm_i915_private *dev_priv,
289 		enum pipe pipe, bool state)
290 {
291 	u32 val;
292 	bool cur_state;
293 
294 	val = intel_de_read(dev_priv, DPLL(pipe));
295 	cur_state = !!(val & DPLL_VCO_ENABLE);
296 	I915_STATE_WARN(cur_state != state,
297 	     "PLL state assertion failure (expected %s, current %s)\n",
298 			onoff(state), onoff(cur_state));
299 }
300 
301 /* XXX: the dsi pll is shared between MIPI DSI ports */
assert_dsi_pll(struct drm_i915_private * dev_priv,bool state)302 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
303 {
304 	u32 val;
305 	bool cur_state;
306 
307 	vlv_cck_get(dev_priv);
308 	val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
309 	vlv_cck_put(dev_priv);
310 
311 	cur_state = val & DSI_PLL_VCO_EN;
312 	I915_STATE_WARN(cur_state != state,
313 	     "DSI PLL state assertion failure (expected %s, current %s)\n",
314 			onoff(state), onoff(cur_state));
315 }
316 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)317 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
318 			  enum pipe pipe, bool state)
319 {
320 	bool cur_state;
321 
322 	if (HAS_DDI(dev_priv)) {
323 		/*
324 		 * DDI does not have a specific FDI_TX register.
325 		 *
326 		 * FDI is never fed from EDP transcoder
327 		 * so pipe->transcoder cast is fine here.
328 		 */
329 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
330 		u32 val = intel_de_read(dev_priv,
331 					TRANS_DDI_FUNC_CTL(cpu_transcoder));
332 		cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
333 	} else {
334 		u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
335 		cur_state = !!(val & FDI_TX_ENABLE);
336 	}
337 	I915_STATE_WARN(cur_state != state,
338 	     "FDI TX state assertion failure (expected %s, current %s)\n",
339 			onoff(state), onoff(cur_state));
340 }
341 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
342 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
343 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)344 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
345 			  enum pipe pipe, bool state)
346 {
347 	u32 val;
348 	bool cur_state;
349 
350 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
351 	cur_state = !!(val & FDI_RX_ENABLE);
352 	I915_STATE_WARN(cur_state != state,
353 	     "FDI RX state assertion failure (expected %s, current %s)\n",
354 			onoff(state), onoff(cur_state));
355 }
356 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
357 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
358 
assert_fdi_tx_pll_enabled(struct drm_i915_private * dev_priv,enum pipe pipe)359 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
360 				      enum pipe pipe)
361 {
362 	u32 val;
363 
364 	/* ILK FDI PLL is always enabled */
365 	if (IS_IRONLAKE(dev_priv))
366 		return;
367 
368 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
369 	if (HAS_DDI(dev_priv))
370 		return;
371 
372 	val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
373 	I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
374 }
375 
assert_fdi_rx_pll(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)376 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
377 		       enum pipe pipe, bool state)
378 {
379 	u32 val;
380 	bool cur_state;
381 
382 	val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
383 	cur_state = !!(val & FDI_RX_PLL_ENABLE);
384 	I915_STATE_WARN(cur_state != state,
385 	     "FDI RX PLL assertion failure (expected %s, current %s)\n",
386 			onoff(state), onoff(cur_state));
387 }
388 
assert_panel_unlocked(struct drm_i915_private * dev_priv,enum pipe pipe)389 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
390 {
391 	i915_reg_t pp_reg;
392 	u32 val;
393 	enum pipe panel_pipe = INVALID_PIPE;
394 	bool locked = true;
395 
396 	if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
397 		return;
398 
399 	if (HAS_PCH_SPLIT(dev_priv)) {
400 		u32 port_sel;
401 
402 		pp_reg = PP_CONTROL(0);
403 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
404 
405 		switch (port_sel) {
406 		case PANEL_PORT_SELECT_LVDS:
407 			intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
408 			break;
409 		case PANEL_PORT_SELECT_DPA:
410 			g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
411 			break;
412 		case PANEL_PORT_SELECT_DPC:
413 			g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
414 			break;
415 		case PANEL_PORT_SELECT_DPD:
416 			g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
417 			break;
418 		default:
419 			MISSING_CASE(port_sel);
420 			break;
421 		}
422 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
423 		/* presumably write lock depends on pipe, not port select */
424 		pp_reg = PP_CONTROL(pipe);
425 		panel_pipe = pipe;
426 	} else {
427 		u32 port_sel;
428 
429 		pp_reg = PP_CONTROL(0);
430 		port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
431 
432 		drm_WARN_ON(&dev_priv->drm,
433 			    port_sel != PANEL_PORT_SELECT_LVDS);
434 		intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
435 	}
436 
437 	val = intel_de_read(dev_priv, pp_reg);
438 	if (!(val & PANEL_POWER_ON) ||
439 	    ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
440 		locked = false;
441 
442 	I915_STATE_WARN(panel_pipe == pipe && locked,
443 	     "panel assertion failure, pipe %c regs locked\n",
444 	     pipe_name(pipe));
445 }
446 
assert_pipe(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder,bool state)447 void assert_pipe(struct drm_i915_private *dev_priv,
448 		 enum transcoder cpu_transcoder, bool state)
449 {
450 	bool cur_state;
451 	enum intel_display_power_domain power_domain;
452 	intel_wakeref_t wakeref;
453 
454 	/* we keep both pipes enabled on 830 */
455 	if (IS_I830(dev_priv))
456 		state = true;
457 
458 	power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
459 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
460 	if (wakeref) {
461 		u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
462 		cur_state = !!(val & PIPECONF_ENABLE);
463 
464 		intel_display_power_put(dev_priv, power_domain, wakeref);
465 	} else {
466 		cur_state = false;
467 	}
468 
469 	I915_STATE_WARN(cur_state != state,
470 			"transcoder %s assertion failure (expected %s, current %s)\n",
471 			transcoder_name(cpu_transcoder),
472 			onoff(state), onoff(cur_state));
473 }
474 
assert_plane(struct intel_plane * plane,bool state)475 static void assert_plane(struct intel_plane *plane, bool state)
476 {
477 	enum pipe pipe;
478 	bool cur_state;
479 
480 	cur_state = plane->get_hw_state(plane, &pipe);
481 
482 	I915_STATE_WARN(cur_state != state,
483 			"%s assertion failure (expected %s, current %s)\n",
484 			plane->base.name, onoff(state), onoff(cur_state));
485 }
486 
487 #define assert_plane_enabled(p) assert_plane(p, true)
488 #define assert_plane_disabled(p) assert_plane(p, false)
489 
assert_planes_disabled(struct intel_crtc * crtc)490 static void assert_planes_disabled(struct intel_crtc *crtc)
491 {
492 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
493 	struct intel_plane *plane;
494 
495 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
496 		assert_plane_disabled(plane);
497 }
498 
assert_pch_transcoder_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)499 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
500 				    enum pipe pipe)
501 {
502 	u32 val;
503 	bool enabled;
504 
505 	val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
506 	enabled = !!(val & TRANS_ENABLE);
507 	I915_STATE_WARN(enabled,
508 	     "transcoder assertion failed, should be off on pipe %c but is still active\n",
509 	     pipe_name(pipe));
510 }
511 
assert_pch_dp_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t dp_reg)512 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
513 				   enum pipe pipe, enum port port,
514 				   i915_reg_t dp_reg)
515 {
516 	enum pipe port_pipe;
517 	bool state;
518 
519 	state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
520 
521 	I915_STATE_WARN(state && port_pipe == pipe,
522 			"PCH DP %c enabled on transcoder %c, should be disabled\n",
523 			port_name(port), pipe_name(pipe));
524 
525 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
526 			"IBX PCH DP %c still using transcoder B\n",
527 			port_name(port));
528 }
529 
assert_pch_hdmi_disabled(struct drm_i915_private * dev_priv,enum pipe pipe,enum port port,i915_reg_t hdmi_reg)530 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
531 				     enum pipe pipe, enum port port,
532 				     i915_reg_t hdmi_reg)
533 {
534 	enum pipe port_pipe;
535 	bool state;
536 
537 	state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
538 
539 	I915_STATE_WARN(state && port_pipe == pipe,
540 			"PCH HDMI %c enabled on transcoder %c, should be disabled\n",
541 			port_name(port), pipe_name(pipe));
542 
543 	I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
544 			"IBX PCH HDMI %c still using transcoder B\n",
545 			port_name(port));
546 }
547 
assert_pch_ports_disabled(struct drm_i915_private * dev_priv,enum pipe pipe)548 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
549 				      enum pipe pipe)
550 {
551 	enum pipe port_pipe;
552 
553 	assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
554 	assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
555 	assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
556 
557 	I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
558 			port_pipe == pipe,
559 			"PCH VGA enabled on transcoder %c, should be disabled\n",
560 			pipe_name(pipe));
561 
562 	I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
563 			port_pipe == pipe,
564 			"PCH LVDS enabled on transcoder %c, should be disabled\n",
565 			pipe_name(pipe));
566 
567 	/* PCH SDVOB multiplex with HDMIB */
568 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
569 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
570 	assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
571 }
572 
vlv_wait_port_ready(struct drm_i915_private * dev_priv,struct intel_digital_port * dig_port,unsigned int expected_mask)573 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
574 			 struct intel_digital_port *dig_port,
575 			 unsigned int expected_mask)
576 {
577 	u32 port_mask;
578 	i915_reg_t dpll_reg;
579 
580 	switch (dig_port->base.port) {
581 	case PORT_B:
582 		port_mask = DPLL_PORTB_READY_MASK;
583 		dpll_reg = DPLL(0);
584 		break;
585 	case PORT_C:
586 		port_mask = DPLL_PORTC_READY_MASK;
587 		dpll_reg = DPLL(0);
588 		expected_mask <<= 4;
589 		break;
590 	case PORT_D:
591 		port_mask = DPLL_PORTD_READY_MASK;
592 		dpll_reg = DPIO_PHY_STATUS;
593 		break;
594 	default:
595 		BUG();
596 	}
597 
598 	if (intel_de_wait_for_register(dev_priv, dpll_reg,
599 				       port_mask, expected_mask, 1000))
600 		drm_WARN(&dev_priv->drm, 1,
601 			 "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
602 			 dig_port->base.base.base.id, dig_port->base.base.name,
603 			 intel_de_read(dev_priv, dpll_reg) & port_mask,
604 			 expected_mask);
605 }
606 
ilk_enable_pch_transcoder(const struct intel_crtc_state * crtc_state)607 static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
608 {
609 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
610 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
611 	enum pipe pipe = crtc->pipe;
612 	i915_reg_t reg;
613 	u32 val, pipeconf_val;
614 
615 	/* Make sure PCH DPLL is enabled */
616 	assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
617 
618 	/* FDI must be feeding us bits for PCH ports */
619 	assert_fdi_tx_enabled(dev_priv, pipe);
620 	assert_fdi_rx_enabled(dev_priv, pipe);
621 
622 	if (HAS_PCH_CPT(dev_priv)) {
623 		reg = TRANS_CHICKEN2(pipe);
624 		val = intel_de_read(dev_priv, reg);
625 		/*
626 		 * Workaround: Set the timing override bit
627 		 * before enabling the pch transcoder.
628 		 */
629 		val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
630 		/* Configure frame start delay to match the CPU */
631 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
632 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
633 		intel_de_write(dev_priv, reg, val);
634 	}
635 
636 	reg = PCH_TRANSCONF(pipe);
637 	val = intel_de_read(dev_priv, reg);
638 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
639 
640 	if (HAS_PCH_IBX(dev_priv)) {
641 		/* Configure frame start delay to match the CPU */
642 		val &= ~TRANS_FRAME_START_DELAY_MASK;
643 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
644 
645 		/*
646 		 * Make the BPC in transcoder be consistent with
647 		 * that in pipeconf reg. For HDMI we must use 8bpc
648 		 * here for both 8bpc and 12bpc.
649 		 */
650 		val &= ~PIPECONF_BPC_MASK;
651 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
652 			val |= PIPECONF_8BPC;
653 		else
654 			val |= pipeconf_val & PIPECONF_BPC_MASK;
655 	}
656 
657 	val &= ~TRANS_INTERLACE_MASK;
658 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
659 		if (HAS_PCH_IBX(dev_priv) &&
660 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
661 			val |= TRANS_LEGACY_INTERLACED_ILK;
662 		else
663 			val |= TRANS_INTERLACED;
664 	} else {
665 		val |= TRANS_PROGRESSIVE;
666 	}
667 
668 	intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
669 	if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
670 		drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
671 			pipe_name(pipe));
672 }
673 
lpt_enable_pch_transcoder(struct drm_i915_private * dev_priv,enum transcoder cpu_transcoder)674 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
675 				      enum transcoder cpu_transcoder)
676 {
677 	u32 val, pipeconf_val;
678 
679 	/* FDI must be feeding us bits for PCH ports */
680 	assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
681 	assert_fdi_rx_enabled(dev_priv, PIPE_A);
682 
683 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
684 	/* Workaround: set timing override bit. */
685 	val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
686 	/* Configure frame start delay to match the CPU */
687 	val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
688 	val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
689 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
690 
691 	val = TRANS_ENABLE;
692 	pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
693 
694 	if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
695 	    PIPECONF_INTERLACED_ILK)
696 		val |= TRANS_INTERLACED;
697 	else
698 		val |= TRANS_PROGRESSIVE;
699 
700 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
701 	if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
702 				  TRANS_STATE_ENABLE, 100))
703 		drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
704 }
705 
ilk_disable_pch_transcoder(struct drm_i915_private * dev_priv,enum pipe pipe)706 static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
707 				       enum pipe pipe)
708 {
709 	i915_reg_t reg;
710 	u32 val;
711 
712 	/* FDI relies on the transcoder */
713 	assert_fdi_tx_disabled(dev_priv, pipe);
714 	assert_fdi_rx_disabled(dev_priv, pipe);
715 
716 	/* Ports must be off as well */
717 	assert_pch_ports_disabled(dev_priv, pipe);
718 
719 	reg = PCH_TRANSCONF(pipe);
720 	val = intel_de_read(dev_priv, reg);
721 	val &= ~TRANS_ENABLE;
722 	intel_de_write(dev_priv, reg, val);
723 	/* wait for PCH transcoder off, transcoder state */
724 	if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
725 		drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
726 			pipe_name(pipe));
727 
728 	if (HAS_PCH_CPT(dev_priv)) {
729 		/* Workaround: Clear the timing override chicken bit again. */
730 		reg = TRANS_CHICKEN2(pipe);
731 		val = intel_de_read(dev_priv, reg);
732 		val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
733 		intel_de_write(dev_priv, reg, val);
734 	}
735 }
736 
lpt_disable_pch_transcoder(struct drm_i915_private * dev_priv)737 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
738 {
739 	u32 val;
740 
741 	val = intel_de_read(dev_priv, LPT_TRANSCONF);
742 	val &= ~TRANS_ENABLE;
743 	intel_de_write(dev_priv, LPT_TRANSCONF, val);
744 	/* wait for PCH transcoder off, transcoder state */
745 	if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
746 				    TRANS_STATE_ENABLE, 50))
747 		drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
748 
749 	/* Workaround: clear timing override bit. */
750 	val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
751 	val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
752 	intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
753 }
754 
intel_crtc_pch_transcoder(struct intel_crtc * crtc)755 enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
756 {
757 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
758 
759 	if (HAS_PCH_LPT(dev_priv))
760 		return PIPE_A;
761 	else
762 		return crtc->pipe;
763 }
764 
intel_enable_pipe(const struct intel_crtc_state * new_crtc_state)765 void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
766 {
767 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
768 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
769 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
770 	enum pipe pipe = crtc->pipe;
771 	i915_reg_t reg;
772 	u32 val;
773 
774 	drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
775 
776 	assert_planes_disabled(crtc);
777 
778 	/*
779 	 * A pipe without a PLL won't actually be able to drive bits from
780 	 * a plane.  On ILK+ the pipe PLLs are integrated, so we don't
781 	 * need the check.
782 	 */
783 	if (HAS_GMCH(dev_priv)) {
784 		if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
785 			assert_dsi_pll_enabled(dev_priv);
786 		else
787 			assert_pll_enabled(dev_priv, pipe);
788 	} else {
789 		if (new_crtc_state->has_pch_encoder) {
790 			/* if driving the PCH, we need FDI enabled */
791 			assert_fdi_rx_pll_enabled(dev_priv,
792 						  intel_crtc_pch_transcoder(crtc));
793 			assert_fdi_tx_pll_enabled(dev_priv,
794 						  (enum pipe) cpu_transcoder);
795 		}
796 		/* FIXME: assert CPU port conditions for SNB+ */
797 	}
798 
799 	reg = PIPECONF(cpu_transcoder);
800 	val = intel_de_read(dev_priv, reg);
801 	if (val & PIPECONF_ENABLE) {
802 		/* we keep both pipes enabled on 830 */
803 		drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
804 		return;
805 	}
806 
807 	intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
808 	intel_de_posting_read(dev_priv, reg);
809 
810 	/*
811 	 * Until the pipe starts PIPEDSL reads will return a stale value,
812 	 * which causes an apparent vblank timestamp jump when PIPEDSL
813 	 * resets to its proper value. That also messes up the frame count
814 	 * when it's derived from the timestamps. So let's wait for the
815 	 * pipe to start properly before we call drm_crtc_vblank_on()
816 	 */
817 	if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
818 		intel_wait_for_pipe_scanline_moving(crtc);
819 }
820 
intel_disable_pipe(const struct intel_crtc_state * old_crtc_state)821 void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
822 {
823 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
824 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
825 	enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
826 	enum pipe pipe = crtc->pipe;
827 	i915_reg_t reg;
828 	u32 val;
829 
830 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
831 
832 	/*
833 	 * Make sure planes won't keep trying to pump pixels to us,
834 	 * or we might hang the display.
835 	 */
836 	assert_planes_disabled(crtc);
837 
838 	reg = PIPECONF(cpu_transcoder);
839 	val = intel_de_read(dev_priv, reg);
840 	if ((val & PIPECONF_ENABLE) == 0)
841 		return;
842 
843 	/*
844 	 * Double wide has implications for planes
845 	 * so best keep it disabled when not needed.
846 	 */
847 	if (old_crtc_state->double_wide)
848 		val &= ~PIPECONF_DOUBLE_WIDE;
849 
850 	/* Don't disable pipe or pipe PLLs if needed */
851 	if (!IS_I830(dev_priv))
852 		val &= ~PIPECONF_ENABLE;
853 
854 	intel_de_write(dev_priv, reg, val);
855 	if ((val & PIPECONF_ENABLE) == 0)
856 		intel_wait_for_pipe_off(old_crtc_state);
857 }
858 
859 bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info * info,u64 modifier)860 intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
861 				    u64 modifier)
862 {
863 	return info->is_yuv &&
864 	       info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
865 }
866 
867 unsigned int
intel_tile_width_bytes(const struct drm_framebuffer * fb,int color_plane)868 intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
869 {
870 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
871 	unsigned int cpp = fb->format->cpp[color_plane];
872 
873 	switch (fb->modifier) {
874 	case DRM_FORMAT_MOD_LINEAR:
875 		return intel_tile_size(dev_priv);
876 	case I915_FORMAT_MOD_X_TILED:
877 		if (IS_DISPLAY_VER(dev_priv, 2))
878 			return 128;
879 		else
880 			return 512;
881 	case I915_FORMAT_MOD_Y_TILED_CCS:
882 		if (is_ccs_plane(fb, color_plane))
883 			return 128;
884 		fallthrough;
885 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
886 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
887 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
888 		if (is_ccs_plane(fb, color_plane))
889 			return 64;
890 		fallthrough;
891 	case I915_FORMAT_MOD_Y_TILED:
892 		if (IS_DISPLAY_VER(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
893 			return 128;
894 		else
895 			return 512;
896 	case I915_FORMAT_MOD_Yf_TILED_CCS:
897 		if (is_ccs_plane(fb, color_plane))
898 			return 128;
899 		fallthrough;
900 	case I915_FORMAT_MOD_Yf_TILED:
901 		switch (cpp) {
902 		case 1:
903 			return 64;
904 		case 2:
905 		case 4:
906 			return 128;
907 		case 8:
908 		case 16:
909 			return 256;
910 		default:
911 			MISSING_CASE(cpp);
912 			return cpp;
913 		}
914 		break;
915 	default:
916 		MISSING_CASE(fb->modifier);
917 		return cpp;
918 	}
919 }
920 
921 unsigned int
intel_fb_align_height(const struct drm_framebuffer * fb,int color_plane,unsigned int height)922 intel_fb_align_height(const struct drm_framebuffer *fb,
923 		      int color_plane, unsigned int height)
924 {
925 	unsigned int tile_height = intel_tile_height(fb, color_plane);
926 
927 	return ALIGN(height, tile_height);
928 }
929 
intel_rotation_info_size(const struct intel_rotation_info * rot_info)930 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
931 {
932 	unsigned int size = 0;
933 	int i;
934 
935 	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
936 		size += rot_info->plane[i].dst_stride * rot_info->plane[i].width;
937 
938 	return size;
939 }
940 
intel_remapped_info_size(const struct intel_remapped_info * rem_info)941 unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
942 {
943 	unsigned int size = 0;
944 	int i;
945 
946 	for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
947 		size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
948 
949 	return size;
950 }
951 
intel_linear_alignment(const struct drm_i915_private * dev_priv)952 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
953 {
954 	if (DISPLAY_VER(dev_priv) >= 9)
955 		return 256 * 1024;
956 	else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
957 		 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
958 		return 128 * 1024;
959 	else if (DISPLAY_VER(dev_priv) >= 4)
960 		return 4 * 1024;
961 	else
962 		return 0;
963 }
964 
has_async_flips(struct drm_i915_private * i915)965 static bool has_async_flips(struct drm_i915_private *i915)
966 {
967 	return DISPLAY_VER(i915) >= 5;
968 }
969 
intel_surf_alignment(const struct drm_framebuffer * fb,int color_plane)970 unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
971 				  int color_plane)
972 {
973 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
974 
975 	/* AUX_DIST needs only 4K alignment */
976 	if ((DISPLAY_VER(dev_priv) < 12 && is_aux_plane(fb, color_plane)) ||
977 	    is_ccs_plane(fb, color_plane))
978 		return 4096;
979 
980 	switch (fb->modifier) {
981 	case DRM_FORMAT_MOD_LINEAR:
982 		return intel_linear_alignment(dev_priv);
983 	case I915_FORMAT_MOD_X_TILED:
984 		if (has_async_flips(dev_priv))
985 			return 256 * 1024;
986 		return 0;
987 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
988 		if (is_semiplanar_uv_plane(fb, color_plane))
989 			return intel_tile_row_size(fb, color_plane);
990 		fallthrough;
991 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
992 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
993 		return 16 * 1024;
994 	case I915_FORMAT_MOD_Y_TILED_CCS:
995 	case I915_FORMAT_MOD_Yf_TILED_CCS:
996 	case I915_FORMAT_MOD_Y_TILED:
997 		if (DISPLAY_VER(dev_priv) >= 12 &&
998 		    is_semiplanar_uv_plane(fb, color_plane))
999 			return intel_tile_row_size(fb, color_plane);
1000 		fallthrough;
1001 	case I915_FORMAT_MOD_Yf_TILED:
1002 		return 1 * 1024 * 1024;
1003 	default:
1004 		MISSING_CASE(fb->modifier);
1005 		return 0;
1006 	}
1007 }
1008 
intel_plane_uses_fence(const struct intel_plane_state * plane_state)1009 static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
1010 {
1011 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1012 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1013 
1014 	return DISPLAY_VER(dev_priv) < 4 ||
1015 		(plane->has_fbc &&
1016 		 plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
1017 }
1018 
1019 struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer * fb,bool phys_cursor,const struct i915_ggtt_view * view,bool uses_fence,unsigned long * out_flags)1020 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1021 			   bool phys_cursor,
1022 			   const struct i915_ggtt_view *view,
1023 			   bool uses_fence,
1024 			   unsigned long *out_flags)
1025 {
1026 	struct drm_device *dev = fb->dev;
1027 	struct drm_i915_private *dev_priv = to_i915(dev);
1028 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1029 	intel_wakeref_t wakeref;
1030 	struct i915_gem_ww_ctx ww;
1031 	struct i915_vma *vma;
1032 	unsigned int pinctl;
1033 	u32 alignment;
1034 	int ret;
1035 
1036 	if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
1037 		return ERR_PTR(-EINVAL);
1038 
1039 	if (phys_cursor)
1040 		alignment = intel_cursor_alignment(dev_priv);
1041 	else
1042 		alignment = intel_surf_alignment(fb, 0);
1043 	if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
1044 		return ERR_PTR(-EINVAL);
1045 
1046 	/* Note that the w/a also requires 64 PTE of padding following the
1047 	 * bo. We currently fill all unused PTE with the shadow page and so
1048 	 * we should always have valid PTE following the scanout preventing
1049 	 * the VT-d warning.
1050 	 */
1051 	if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
1052 		alignment = 256 * 1024;
1053 
1054 	/*
1055 	 * Global gtt pte registers are special registers which actually forward
1056 	 * writes to a chunk of system memory. Which means that there is no risk
1057 	 * that the register values disappear as soon as we call
1058 	 * intel_runtime_pm_put(), so it is correct to wrap only the
1059 	 * pin/unpin/fence and not more.
1060 	 */
1061 	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1062 
1063 	atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
1064 
1065 	/*
1066 	 * Valleyview is definitely limited to scanning out the first
1067 	 * 512MiB. Lets presume this behaviour was inherited from the
1068 	 * g4x display engine and that all earlier gen are similarly
1069 	 * limited. Testing suggests that it is a little more
1070 	 * complicated than this. For example, Cherryview appears quite
1071 	 * happy to scanout from anywhere within its global aperture.
1072 	 */
1073 	pinctl = 0;
1074 	if (HAS_GMCH(dev_priv))
1075 		pinctl |= PIN_MAPPABLE;
1076 
1077 	i915_gem_ww_ctx_init(&ww, true);
1078 retry:
1079 	ret = i915_gem_object_lock(obj, &ww);
1080 	if (!ret && phys_cursor)
1081 		ret = i915_gem_object_attach_phys(obj, alignment);
1082 	if (!ret)
1083 		ret = i915_gem_object_pin_pages(obj);
1084 	if (ret)
1085 		goto err;
1086 
1087 	if (!ret) {
1088 		vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
1089 							   view, pinctl);
1090 		if (IS_ERR(vma)) {
1091 			ret = PTR_ERR(vma);
1092 			goto err_unpin;
1093 		}
1094 	}
1095 
1096 	if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
1097 		/*
1098 		 * Install a fence for tiled scan-out. Pre-i965 always needs a
1099 		 * fence, whereas 965+ only requires a fence if using
1100 		 * framebuffer compression.  For simplicity, we always, when
1101 		 * possible, install a fence as the cost is not that onerous.
1102 		 *
1103 		 * If we fail to fence the tiled scanout, then either the
1104 		 * modeset will reject the change (which is highly unlikely as
1105 		 * the affected systems, all but one, do not have unmappable
1106 		 * space) or we will not be able to enable full powersaving
1107 		 * techniques (also likely not to apply due to various limits
1108 		 * FBC and the like impose on the size of the buffer, which
1109 		 * presumably we violated anyway with this unmappable buffer).
1110 		 * Anyway, it is presumably better to stumble onwards with
1111 		 * something and try to run the system in a "less than optimal"
1112 		 * mode that matches the user configuration.
1113 		 */
1114 		ret = i915_vma_pin_fence(vma);
1115 		if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
1116 			i915_vma_unpin(vma);
1117 			goto err_unpin;
1118 		}
1119 		ret = 0;
1120 
1121 		if (vma->fence)
1122 			*out_flags |= PLANE_HAS_FENCE;
1123 	}
1124 
1125 	i915_vma_get(vma);
1126 
1127 err_unpin:
1128 	i915_gem_object_unpin_pages(obj);
1129 err:
1130 	if (ret == -EDEADLK) {
1131 		ret = i915_gem_ww_ctx_backoff(&ww);
1132 		if (!ret)
1133 			goto retry;
1134 	}
1135 	i915_gem_ww_ctx_fini(&ww);
1136 	if (ret)
1137 		vma = ERR_PTR(ret);
1138 
1139 	atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
1140 	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1141 	return vma;
1142 }
1143 
intel_unpin_fb_vma(struct i915_vma * vma,unsigned long flags)1144 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1145 {
1146 	if (flags & PLANE_HAS_FENCE)
1147 		i915_vma_unpin_fence(vma);
1148 	i915_vma_unpin(vma);
1149 	i915_vma_put(vma);
1150 }
1151 
1152 /*
1153  * Convert the x/y offsets into a linear offset.
1154  * Only valid with 0/180 degree rotation, which is fine since linear
1155  * offset is only used with linear buffers on pre-hsw and tiled buffers
1156  * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
1157  */
intel_fb_xy_to_linear(int x,int y,const struct intel_plane_state * state,int color_plane)1158 u32 intel_fb_xy_to_linear(int x, int y,
1159 			  const struct intel_plane_state *state,
1160 			  int color_plane)
1161 {
1162 	const struct drm_framebuffer *fb = state->hw.fb;
1163 	unsigned int cpp = fb->format->cpp[color_plane];
1164 	unsigned int pitch = state->view.color_plane[color_plane].stride;
1165 
1166 	return y * pitch + x * cpp;
1167 }
1168 
1169 /*
1170  * Add the x/y offsets derived from fb->offsets[] to the user
1171  * specified plane src x/y offsets. The resulting x/y offsets
1172  * specify the start of scanout from the beginning of the gtt mapping.
1173  */
intel_add_fb_offsets(int * x,int * y,const struct intel_plane_state * state,int color_plane)1174 void intel_add_fb_offsets(int *x, int *y,
1175 			  const struct intel_plane_state *state,
1176 			  int color_plane)
1177 
1178 {
1179 	*x += state->view.color_plane[color_plane].x;
1180 	*y += state->view.color_plane[color_plane].y;
1181 }
1182 
intel_fb_modifier_to_tiling(u64 fb_modifier)1183 static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
1184 {
1185 	switch (fb_modifier) {
1186 	case I915_FORMAT_MOD_X_TILED:
1187 		return I915_TILING_X;
1188 	case I915_FORMAT_MOD_Y_TILED:
1189 	case I915_FORMAT_MOD_Y_TILED_CCS:
1190 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1191 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1192 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1193 		return I915_TILING_Y;
1194 	default:
1195 		return I915_TILING_NONE;
1196 	}
1197 }
1198 
1199 /*
1200  * From the Sky Lake PRM:
1201  * "The Color Control Surface (CCS) contains the compression status of
1202  *  the cache-line pairs. The compression state of the cache-line pair
1203  *  is specified by 2 bits in the CCS. Each CCS cache-line represents
1204  *  an area on the main surface of 16 x16 sets of 128 byte Y-tiled
1205  *  cache-line-pairs. CCS is always Y tiled."
1206  *
1207  * Since cache line pairs refers to horizontally adjacent cache lines,
1208  * each cache line in the CCS corresponds to an area of 32x16 cache
1209  * lines on the main surface. Since each pixel is 4 bytes, this gives
1210  * us a ratio of one byte in the CCS for each 8x16 pixels in the
1211  * main surface.
1212  */
1213 static const struct drm_format_info skl_ccs_formats[] = {
1214 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1215 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1216 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1217 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
1218 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1219 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1220 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1221 	  .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
1222 };
1223 
1224 /*
1225  * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
1226  * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
1227  * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
1228  * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
1229  * the main surface.
1230  */
1231 static const struct drm_format_info gen12_ccs_formats[] = {
1232 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
1233 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1234 	  .hsub = 1, .vsub = 1, },
1235 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
1236 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1237 	  .hsub = 1, .vsub = 1, },
1238 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
1239 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1240 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1241 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
1242 	  .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1243 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1244 	{ .format = DRM_FORMAT_YUYV, .num_planes = 2,
1245 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1246 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1247 	{ .format = DRM_FORMAT_YVYU, .num_planes = 2,
1248 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1249 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1250 	{ .format = DRM_FORMAT_UYVY, .num_planes = 2,
1251 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1252 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1253 	{ .format = DRM_FORMAT_VYUY, .num_planes = 2,
1254 	  .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
1255 	  .hsub = 2, .vsub = 1, .is_yuv = true },
1256 	{ .format = DRM_FORMAT_NV12, .num_planes = 4,
1257 	  .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
1258 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1259 	{ .format = DRM_FORMAT_P010, .num_planes = 4,
1260 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1261 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1262 	{ .format = DRM_FORMAT_P012, .num_planes = 4,
1263 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1264 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1265 	{ .format = DRM_FORMAT_P016, .num_planes = 4,
1266 	  .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
1267 	  .hsub = 2, .vsub = 2, .is_yuv = true },
1268 };
1269 
1270 /*
1271  * Same as gen12_ccs_formats[] above, but with additional surface used
1272  * to pass Clear Color information in plane 2 with 64 bits of data.
1273  */
1274 static const struct drm_format_info gen12_ccs_cc_formats[] = {
1275 	{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
1276 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1277 	  .hsub = 1, .vsub = 1, },
1278 	{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
1279 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1280 	  .hsub = 1, .vsub = 1, },
1281 	{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
1282 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1283 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1284 	{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
1285 	  .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
1286 	  .hsub = 1, .vsub = 1, .has_alpha = true },
1287 };
1288 
1289 static const struct drm_format_info *
lookup_format_info(const struct drm_format_info formats[],int num_formats,u32 format)1290 lookup_format_info(const struct drm_format_info formats[],
1291 		   int num_formats, u32 format)
1292 {
1293 	int i;
1294 
1295 	for (i = 0; i < num_formats; i++) {
1296 		if (formats[i].format == format)
1297 			return &formats[i];
1298 	}
1299 
1300 	return NULL;
1301 }
1302 
1303 static const struct drm_format_info *
intel_get_format_info(const struct drm_mode_fb_cmd2 * cmd)1304 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
1305 {
1306 	switch (cmd->modifier[0]) {
1307 	case I915_FORMAT_MOD_Y_TILED_CCS:
1308 	case I915_FORMAT_MOD_Yf_TILED_CCS:
1309 		return lookup_format_info(skl_ccs_formats,
1310 					  ARRAY_SIZE(skl_ccs_formats),
1311 					  cmd->pixel_format);
1312 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
1313 	case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
1314 		return lookup_format_info(gen12_ccs_formats,
1315 					  ARRAY_SIZE(gen12_ccs_formats),
1316 					  cmd->pixel_format);
1317 	case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
1318 		return lookup_format_info(gen12_ccs_cc_formats,
1319 					  ARRAY_SIZE(gen12_ccs_cc_formats),
1320 					  cmd->pixel_format);
1321 	default:
1322 		return NULL;
1323 	}
1324 }
1325 
gen12_ccs_aux_stride(struct drm_framebuffer * fb,int ccs_plane)1326 static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
1327 {
1328 	return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
1329 			    512) * 64;
1330 }
1331 
intel_plane_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)1332 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
1333 			      u32 pixel_format, u64 modifier)
1334 {
1335 	struct intel_crtc *crtc;
1336 	struct intel_plane *plane;
1337 
1338 	/*
1339 	 * We assume the primary plane for pipe A has
1340 	 * the highest stride limits of them all,
1341 	 * if in case pipe A is disabled, use the first pipe from pipe_mask.
1342 	 */
1343 	crtc = intel_get_first_crtc(dev_priv);
1344 	if (!crtc)
1345 		return 0;
1346 
1347 	plane = to_intel_plane(crtc->base.primary);
1348 
1349 	return plane->max_stride(plane, pixel_format, modifier,
1350 				 DRM_MODE_ROTATE_0);
1351 }
1352 
1353 static
intel_fb_max_stride(struct drm_i915_private * dev_priv,u32 pixel_format,u64 modifier)1354 u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
1355 			u32 pixel_format, u64 modifier)
1356 {
1357 	/*
1358 	 * Arbitrary limit for gen4+ chosen to match the
1359 	 * render engine max stride.
1360 	 *
1361 	 * The new CCS hash mode makes remapping impossible
1362 	 */
1363 	if (!is_ccs_modifier(modifier)) {
1364 		if (DISPLAY_VER(dev_priv) >= 7)
1365 			return 256*1024;
1366 		else if (DISPLAY_VER(dev_priv) >= 4)
1367 			return 128*1024;
1368 	}
1369 
1370 	return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
1371 }
1372 
1373 static u32
intel_fb_stride_alignment(const struct drm_framebuffer * fb,int color_plane)1374 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
1375 {
1376 	struct drm_i915_private *dev_priv = to_i915(fb->dev);
1377 	u32 tile_width;
1378 
1379 	if (is_surface_linear(fb, color_plane)) {
1380 		u32 max_stride = intel_plane_fb_max_stride(dev_priv,
1381 							   fb->format->format,
1382 							   fb->modifier);
1383 
1384 		/*
1385 		 * To make remapping with linear generally feasible
1386 		 * we need the stride to be page aligned.
1387 		 */
1388 		if (fb->pitches[color_plane] > max_stride &&
1389 		    !is_ccs_modifier(fb->modifier))
1390 			return intel_tile_size(dev_priv);
1391 		else
1392 			return 64;
1393 	}
1394 
1395 	tile_width = intel_tile_width_bytes(fb, color_plane);
1396 	if (is_ccs_modifier(fb->modifier)) {
1397 		/*
1398 		 * Display WA #0531: skl,bxt,kbl,glk
1399 		 *
1400 		 * Render decompression and plane width > 3840
1401 		 * combined with horizontal panning requires the
1402 		 * plane stride to be a multiple of 4. We'll just
1403 		 * require the entire fb to accommodate that to avoid
1404 		 * potential runtime errors at plane configuration time.
1405 		 */
1406 		if ((IS_DISPLAY_VER(dev_priv, 9) || IS_GEMINILAKE(dev_priv)) &&
1407 		    color_plane == 0 && fb->width > 3840)
1408 			tile_width *= 4;
1409 		/*
1410 		 * The main surface pitch must be padded to a multiple of four
1411 		 * tile widths.
1412 		 */
1413 		else if (DISPLAY_VER(dev_priv) >= 12)
1414 			tile_width *= 4;
1415 	}
1416 	return tile_width;
1417 }
1418 
1419 static struct i915_vma *
initial_plane_vma(struct drm_i915_private * i915,struct intel_initial_plane_config * plane_config)1420 initial_plane_vma(struct drm_i915_private *i915,
1421 		  struct intel_initial_plane_config *plane_config)
1422 {
1423 	struct drm_i915_gem_object *obj;
1424 	struct i915_vma *vma;
1425 	u32 base, size;
1426 
1427 	if (plane_config->size == 0)
1428 		return NULL;
1429 
1430 	base = round_down(plane_config->base,
1431 			  I915_GTT_MIN_ALIGNMENT);
1432 	size = round_up(plane_config->base + plane_config->size,
1433 			I915_GTT_MIN_ALIGNMENT);
1434 	size -= base;
1435 
1436 	/*
1437 	 * If the FB is too big, just don't use it since fbdev is not very
1438 	 * important and we should probably use that space with FBC or other
1439 	 * features.
1440 	 */
1441 	if (size * 2 > i915->stolen_usable_size)
1442 		return NULL;
1443 
1444 	obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
1445 	if (IS_ERR(obj))
1446 		return NULL;
1447 
1448 	/*
1449 	 * Mark it WT ahead of time to avoid changing the
1450 	 * cache_level during fbdev initialization. The
1451 	 * unbind there would get stuck waiting for rcu.
1452 	 */
1453 	i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
1454 					    I915_CACHE_WT : I915_CACHE_NONE);
1455 
1456 	switch (plane_config->tiling) {
1457 	case I915_TILING_NONE:
1458 		break;
1459 	case I915_TILING_X:
1460 	case I915_TILING_Y:
1461 		obj->tiling_and_stride =
1462 			plane_config->fb->base.pitches[0] |
1463 			plane_config->tiling;
1464 		break;
1465 	default:
1466 		MISSING_CASE(plane_config->tiling);
1467 		goto err_obj;
1468 	}
1469 
1470 	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1471 	if (IS_ERR(vma))
1472 		goto err_obj;
1473 
1474 	if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
1475 		goto err_obj;
1476 
1477 	if (i915_gem_object_is_tiled(obj) &&
1478 	    !i915_vma_is_map_and_fenceable(vma))
1479 		goto err_obj;
1480 
1481 	return vma;
1482 
1483 err_obj:
1484 	i915_gem_object_put(obj);
1485 	return NULL;
1486 }
1487 
1488 static bool
intel_alloc_initial_plane_obj(struct intel_crtc * crtc,struct intel_initial_plane_config * plane_config)1489 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
1490 			      struct intel_initial_plane_config *plane_config)
1491 {
1492 	struct drm_device *dev = crtc->base.dev;
1493 	struct drm_i915_private *dev_priv = to_i915(dev);
1494 	struct drm_mode_fb_cmd2 mode_cmd = { 0 };
1495 	struct drm_framebuffer *fb = &plane_config->fb->base;
1496 	struct i915_vma *vma;
1497 
1498 	switch (fb->modifier) {
1499 	case DRM_FORMAT_MOD_LINEAR:
1500 	case I915_FORMAT_MOD_X_TILED:
1501 	case I915_FORMAT_MOD_Y_TILED:
1502 		break;
1503 	default:
1504 		drm_dbg(&dev_priv->drm,
1505 			"Unsupported modifier for initial FB: 0x%llx\n",
1506 			fb->modifier);
1507 		return false;
1508 	}
1509 
1510 	vma = initial_plane_vma(dev_priv, plane_config);
1511 	if (!vma)
1512 		return false;
1513 
1514 	mode_cmd.pixel_format = fb->format->format;
1515 	mode_cmd.width = fb->width;
1516 	mode_cmd.height = fb->height;
1517 	mode_cmd.pitches[0] = fb->pitches[0];
1518 	mode_cmd.modifier[0] = fb->modifier;
1519 	mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
1520 
1521 	if (intel_framebuffer_init(to_intel_framebuffer(fb),
1522 				   vma->obj, &mode_cmd)) {
1523 		drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
1524 		goto err_vma;
1525 	}
1526 
1527 	plane_config->vma = vma;
1528 	return true;
1529 
1530 err_vma:
1531 	i915_vma_put(vma);
1532 	return false;
1533 }
1534 
1535 static void
intel_set_plane_visible(struct intel_crtc_state * crtc_state,struct intel_plane_state * plane_state,bool visible)1536 intel_set_plane_visible(struct intel_crtc_state *crtc_state,
1537 			struct intel_plane_state *plane_state,
1538 			bool visible)
1539 {
1540 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
1541 
1542 	plane_state->uapi.visible = visible;
1543 
1544 	if (visible)
1545 		crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base);
1546 	else
1547 		crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base);
1548 }
1549 
fixup_plane_bitmasks(struct intel_crtc_state * crtc_state)1550 static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
1551 {
1552 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
1553 	struct drm_plane *plane;
1554 
1555 	/*
1556 	 * Active_planes aliases if multiple "primary" or cursor planes
1557 	 * have been used on the same (or wrong) pipe. plane_mask uses
1558 	 * unique ids, hence we can use that to reconstruct active_planes.
1559 	 */
1560 	crtc_state->enabled_planes = 0;
1561 	crtc_state->active_planes = 0;
1562 
1563 	drm_for_each_plane_mask(plane, &dev_priv->drm,
1564 				crtc_state->uapi.plane_mask) {
1565 		crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id);
1566 		crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
1567 	}
1568 }
1569 
intel_plane_disable_noatomic(struct intel_crtc * crtc,struct intel_plane * plane)1570 static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
1571 					 struct intel_plane *plane)
1572 {
1573 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1574 	struct intel_crtc_state *crtc_state =
1575 		to_intel_crtc_state(crtc->base.state);
1576 	struct intel_plane_state *plane_state =
1577 		to_intel_plane_state(plane->base.state);
1578 
1579 	drm_dbg_kms(&dev_priv->drm,
1580 		    "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
1581 		    plane->base.base.id, plane->base.name,
1582 		    crtc->base.base.id, crtc->base.name);
1583 
1584 	intel_set_plane_visible(crtc_state, plane_state, false);
1585 	fixup_plane_bitmasks(crtc_state);
1586 	crtc_state->data_rate[plane->id] = 0;
1587 	crtc_state->min_cdclk[plane->id] = 0;
1588 
1589 	if (plane->id == PLANE_PRIMARY)
1590 		hsw_disable_ips(crtc_state);
1591 
1592 	/*
1593 	 * Vblank time updates from the shadow to live plane control register
1594 	 * are blocked if the memory self-refresh mode is active at that
1595 	 * moment. So to make sure the plane gets truly disabled, disable
1596 	 * first the self-refresh mode. The self-refresh enable bit in turn
1597 	 * will be checked/applied by the HW only at the next frame start
1598 	 * event which is after the vblank start event, so we need to have a
1599 	 * wait-for-vblank between disabling the plane and the pipe.
1600 	 */
1601 	if (HAS_GMCH(dev_priv) &&
1602 	    intel_set_memory_cxsr(dev_priv, false))
1603 		intel_wait_for_vblank(dev_priv, crtc->pipe);
1604 
1605 	/*
1606 	 * Gen2 reports pipe underruns whenever all planes are disabled.
1607 	 * So disable underrun reporting before all the planes get disabled.
1608 	 */
1609 	if (IS_DISPLAY_VER(dev_priv, 2) && !crtc_state->active_planes)
1610 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
1611 
1612 	intel_disable_plane(plane, crtc_state);
1613 	intel_wait_for_vblank(dev_priv, crtc->pipe);
1614 }
1615 
1616 static void
intel_find_initial_plane_obj(struct intel_crtc * intel_crtc,struct intel_initial_plane_config * plane_config)1617 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
1618 			     struct intel_initial_plane_config *plane_config)
1619 {
1620 	struct drm_device *dev = intel_crtc->base.dev;
1621 	struct drm_i915_private *dev_priv = to_i915(dev);
1622 	struct drm_crtc *c;
1623 	struct drm_plane *primary = intel_crtc->base.primary;
1624 	struct drm_plane_state *plane_state = primary->state;
1625 	struct intel_plane *intel_plane = to_intel_plane(primary);
1626 	struct intel_plane_state *intel_state =
1627 		to_intel_plane_state(plane_state);
1628 	struct intel_crtc_state *crtc_state =
1629 		to_intel_crtc_state(intel_crtc->base.state);
1630 	struct drm_framebuffer *fb;
1631 	struct i915_vma *vma;
1632 
1633 	/*
1634 	 * TODO:
1635 	 *   Disable planes if get_initial_plane_config() failed.
1636 	 *   Make sure things work if the surface base is not page aligned.
1637 	 */
1638 	if (!plane_config->fb)
1639 		return;
1640 
1641 	if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
1642 		fb = &plane_config->fb->base;
1643 		vma = plane_config->vma;
1644 		goto valid_fb;
1645 	}
1646 
1647 	/*
1648 	 * Failed to alloc the obj, check to see if we should share
1649 	 * an fb with another CRTC instead
1650 	 */
1651 	for_each_crtc(dev, c) {
1652 		struct intel_plane_state *state;
1653 
1654 		if (c == &intel_crtc->base)
1655 			continue;
1656 
1657 		if (!to_intel_crtc_state(c->state)->uapi.active)
1658 			continue;
1659 
1660 		state = to_intel_plane_state(c->primary->state);
1661 		if (!state->vma)
1662 			continue;
1663 
1664 		if (intel_plane_ggtt_offset(state) == plane_config->base) {
1665 			fb = state->hw.fb;
1666 			vma = state->vma;
1667 			goto valid_fb;
1668 		}
1669 	}
1670 
1671 	/*
1672 	 * We've failed to reconstruct the BIOS FB.  Current display state
1673 	 * indicates that the primary plane is visible, but has a NULL FB,
1674 	 * which will lead to problems later if we don't fix it up.  The
1675 	 * simplest solution is to just disable the primary plane now and
1676 	 * pretend the BIOS never had it enabled.
1677 	 */
1678 	intel_plane_disable_noatomic(intel_crtc, intel_plane);
1679 	if (crtc_state->bigjoiner) {
1680 		struct intel_crtc *slave =
1681 			crtc_state->bigjoiner_linked_crtc;
1682 		intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
1683 	}
1684 
1685 	return;
1686 
1687 valid_fb:
1688 	plane_state->rotation = plane_config->rotation;
1689 	intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
1690 			   &intel_state->view);
1691 
1692 	__i915_vma_pin(vma);
1693 	intel_state->vma = i915_vma_get(vma);
1694 	if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
1695 		if (vma->fence)
1696 			intel_state->flags |= PLANE_HAS_FENCE;
1697 
1698 	plane_state->src_x = 0;
1699 	plane_state->src_y = 0;
1700 	plane_state->src_w = fb->width << 16;
1701 	plane_state->src_h = fb->height << 16;
1702 
1703 	plane_state->crtc_x = 0;
1704 	plane_state->crtc_y = 0;
1705 	plane_state->crtc_w = fb->width;
1706 	plane_state->crtc_h = fb->height;
1707 
1708 	if (plane_config->tiling)
1709 		dev_priv->preserve_bios_swizzle = true;
1710 
1711 	plane_state->fb = fb;
1712 	drm_framebuffer_get(fb);
1713 
1714 	plane_state->crtc = &intel_crtc->base;
1715 	intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
1716 					  intel_crtc);
1717 
1718 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
1719 
1720 	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
1721 		  &to_intel_frontbuffer(fb)->bits);
1722 }
1723 
1724 unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state * plane_state)1725 intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
1726 {
1727 	int x = 0, y = 0;
1728 
1729 	intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
1730 					  plane_state->view.color_plane[0].offset, 0);
1731 
1732 	return y;
1733 }
1734 
1735 static int
__intel_display_resume(struct drm_device * dev,struct drm_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)1736 __intel_display_resume(struct drm_device *dev,
1737 		       struct drm_atomic_state *state,
1738 		       struct drm_modeset_acquire_ctx *ctx)
1739 {
1740 	struct drm_crtc_state *crtc_state;
1741 	struct drm_crtc *crtc;
1742 	int i, ret;
1743 
1744 	intel_modeset_setup_hw_state(dev, ctx);
1745 	intel_vga_redisable(to_i915(dev));
1746 
1747 	if (!state)
1748 		return 0;
1749 
1750 	/*
1751 	 * We've duplicated the state, pointers to the old state are invalid.
1752 	 *
1753 	 * Don't attempt to use the old state until we commit the duplicated state.
1754 	 */
1755 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1756 		/*
1757 		 * Force recalculation even if we restore
1758 		 * current state. With fast modeset this may not result
1759 		 * in a modeset when the state is compatible.
1760 		 */
1761 		crtc_state->mode_changed = true;
1762 	}
1763 
1764 	/* ignore any reset values/BIOS leftovers in the WM registers */
1765 	if (!HAS_GMCH(to_i915(dev)))
1766 		to_intel_atomic_state(state)->skip_intermediate_wm = true;
1767 
1768 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
1769 
1770 	drm_WARN_ON(dev, ret == -EDEADLK);
1771 	return ret;
1772 }
1773 
gpu_reset_clobbers_display(struct drm_i915_private * dev_priv)1774 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
1775 {
1776 	return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
1777 		intel_has_gpu_reset(&dev_priv->gt));
1778 }
1779 
intel_display_prepare_reset(struct drm_i915_private * dev_priv)1780 void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
1781 {
1782 	struct drm_device *dev = &dev_priv->drm;
1783 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1784 	struct drm_atomic_state *state;
1785 	int ret;
1786 
1787 	if (!HAS_DISPLAY(dev_priv))
1788 		return;
1789 
1790 	/* reset doesn't touch the display */
1791 	if (!dev_priv->params.force_reset_modeset_test &&
1792 	    !gpu_reset_clobbers_display(dev_priv))
1793 		return;
1794 
1795 	/* We have a modeset vs reset deadlock, defensively unbreak it. */
1796 	set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1797 	smp_mb__after_atomic();
1798 	wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
1799 
1800 	if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
1801 		drm_dbg_kms(&dev_priv->drm,
1802 			    "Modeset potentially stuck, unbreaking through wedging\n");
1803 		intel_gt_set_wedged(&dev_priv->gt);
1804 	}
1805 
1806 	/*
1807 	 * Need mode_config.mutex so that we don't
1808 	 * trample ongoing ->detect() and whatnot.
1809 	 */
1810 	mutex_lock(&dev->mode_config.mutex);
1811 	drm_modeset_acquire_init(ctx, 0);
1812 	while (1) {
1813 		ret = drm_modeset_lock_all_ctx(dev, ctx);
1814 		if (ret != -EDEADLK)
1815 			break;
1816 
1817 		drm_modeset_backoff(ctx);
1818 	}
1819 	/*
1820 	 * Disabling the crtcs gracefully seems nicer. Also the
1821 	 * g33 docs say we should at least disable all the planes.
1822 	 */
1823 	state = drm_atomic_helper_duplicate_state(dev, ctx);
1824 	if (IS_ERR(state)) {
1825 		ret = PTR_ERR(state);
1826 		drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
1827 			ret);
1828 		return;
1829 	}
1830 
1831 	ret = drm_atomic_helper_disable_all(dev, ctx);
1832 	if (ret) {
1833 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
1834 			ret);
1835 		drm_atomic_state_put(state);
1836 		return;
1837 	}
1838 
1839 	dev_priv->modeset_restore_state = state;
1840 	state->acquire_ctx = ctx;
1841 }
1842 
intel_display_finish_reset(struct drm_i915_private * dev_priv)1843 void intel_display_finish_reset(struct drm_i915_private *dev_priv)
1844 {
1845 	struct drm_device *dev = &dev_priv->drm;
1846 	struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
1847 	struct drm_atomic_state *state;
1848 	int ret;
1849 
1850 	if (!HAS_DISPLAY(dev_priv))
1851 		return;
1852 
1853 	/* reset doesn't touch the display */
1854 	if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
1855 		return;
1856 
1857 	state = fetch_and_zero(&dev_priv->modeset_restore_state);
1858 	if (!state)
1859 		goto unlock;
1860 
1861 	/* reset doesn't touch the display */
1862 	if (!gpu_reset_clobbers_display(dev_priv)) {
1863 		/* for testing only restore the display */
1864 		ret = __intel_display_resume(dev, state, ctx);
1865 		if (ret)
1866 			drm_err(&dev_priv->drm,
1867 				"Restoring old state failed with %i\n", ret);
1868 	} else {
1869 		/*
1870 		 * The display has been reset as well,
1871 		 * so need a full re-initialization.
1872 		 */
1873 		intel_pps_unlock_regs_wa(dev_priv);
1874 		intel_modeset_init_hw(dev_priv);
1875 		intel_init_clock_gating(dev_priv);
1876 		intel_hpd_init(dev_priv);
1877 
1878 		ret = __intel_display_resume(dev, state, ctx);
1879 		if (ret)
1880 			drm_err(&dev_priv->drm,
1881 				"Restoring old state failed with %i\n", ret);
1882 
1883 		intel_hpd_poll_disable(dev_priv);
1884 	}
1885 
1886 	drm_atomic_state_put(state);
1887 unlock:
1888 	drm_modeset_drop_locks(ctx);
1889 	drm_modeset_acquire_fini(ctx);
1890 	mutex_unlock(&dev->mode_config.mutex);
1891 
1892 	clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
1893 }
1894 
icl_set_pipe_chicken(struct intel_crtc * crtc)1895 static void icl_set_pipe_chicken(struct intel_crtc *crtc)
1896 {
1897 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1898 	enum pipe pipe = crtc->pipe;
1899 	u32 tmp;
1900 
1901 	tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
1902 
1903 	/*
1904 	 * Display WA #1153: icl
1905 	 * enable hardware to bypass the alpha math
1906 	 * and rounding for per-pixel values 00 and 0xff
1907 	 */
1908 	tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
1909 	/*
1910 	 * Display WA # 1605353570: icl
1911 	 * Set the pixel rounding bit to 1 for allowing
1912 	 * passthrough of Frame buffer pixels unmodified
1913 	 * across pipe
1914 	 */
1915 	tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
1916 	intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
1917 }
1918 
intel_has_pending_fb_unpin(struct drm_i915_private * dev_priv)1919 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
1920 {
1921 	struct drm_crtc *crtc;
1922 	bool cleanup_done;
1923 
1924 	drm_for_each_crtc(crtc, &dev_priv->drm) {
1925 		struct drm_crtc_commit *commit;
1926 		spin_lock(&crtc->commit_lock);
1927 		commit = list_first_entry_or_null(&crtc->commit_list,
1928 						  struct drm_crtc_commit, commit_entry);
1929 		cleanup_done = commit ?
1930 			try_wait_for_completion(&commit->cleanup_done) : true;
1931 		spin_unlock(&crtc->commit_lock);
1932 
1933 		if (cleanup_done)
1934 			continue;
1935 
1936 		drm_crtc_wait_one_vblank(crtc);
1937 
1938 		return true;
1939 	}
1940 
1941 	return false;
1942 }
1943 
lpt_disable_iclkip(struct drm_i915_private * dev_priv)1944 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
1945 {
1946 	u32 temp;
1947 
1948 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
1949 
1950 	mutex_lock(&dev_priv->sb_lock);
1951 
1952 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
1953 	temp |= SBI_SSCCTL_DISABLE;
1954 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
1955 
1956 	mutex_unlock(&dev_priv->sb_lock);
1957 }
1958 
1959 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)1960 static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
1961 {
1962 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1963 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1964 	int clock = crtc_state->hw.adjusted_mode.crtc_clock;
1965 	u32 divsel, phaseinc, auxdiv, phasedir = 0;
1966 	u32 temp;
1967 
1968 	lpt_disable_iclkip(dev_priv);
1969 
1970 	/* The iCLK virtual clock root frequency is in MHz,
1971 	 * but the adjusted_mode->crtc_clock in in KHz. To get the
1972 	 * divisors, it is necessary to divide one by another, so we
1973 	 * convert the virtual clock precision to KHz here for higher
1974 	 * precision.
1975 	 */
1976 	for (auxdiv = 0; auxdiv < 2; auxdiv++) {
1977 		u32 iclk_virtual_root_freq = 172800 * 1000;
1978 		u32 iclk_pi_range = 64;
1979 		u32 desired_divisor;
1980 
1981 		desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
1982 						    clock << auxdiv);
1983 		divsel = (desired_divisor / iclk_pi_range) - 2;
1984 		phaseinc = desired_divisor % iclk_pi_range;
1985 
1986 		/*
1987 		 * Near 20MHz is a corner case which is
1988 		 * out of range for the 7-bit divisor
1989 		 */
1990 		if (divsel <= 0x7f)
1991 			break;
1992 	}
1993 
1994 	/* This should not happen with any sane values */
1995 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
1996 		    ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
1997 	drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
1998 		    ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
1999 
2000 	drm_dbg_kms(&dev_priv->drm,
2001 		    "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2002 		    clock, auxdiv, divsel, phasedir, phaseinc);
2003 
2004 	mutex_lock(&dev_priv->sb_lock);
2005 
2006 	/* Program SSCDIVINTPHASE6 */
2007 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2008 	temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2009 	temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2010 	temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2011 	temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2012 	temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2013 	temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2014 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2015 
2016 	/* Program SSCAUXDIV */
2017 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2018 	temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2019 	temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2020 	intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2021 
2022 	/* Enable modulator and associated divider */
2023 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2024 	temp &= ~SBI_SSCCTL_DISABLE;
2025 	intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2026 
2027 	mutex_unlock(&dev_priv->sb_lock);
2028 
2029 	/* Wait for initialization time */
2030 	udelay(24);
2031 
2032 	intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2033 }
2034 
lpt_get_iclkip(struct drm_i915_private * dev_priv)2035 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
2036 {
2037 	u32 divsel, phaseinc, auxdiv;
2038 	u32 iclk_virtual_root_freq = 172800 * 1000;
2039 	u32 iclk_pi_range = 64;
2040 	u32 desired_divisor;
2041 	u32 temp;
2042 
2043 	if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
2044 		return 0;
2045 
2046 	mutex_lock(&dev_priv->sb_lock);
2047 
2048 	temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2049 	if (temp & SBI_SSCCTL_DISABLE) {
2050 		mutex_unlock(&dev_priv->sb_lock);
2051 		return 0;
2052 	}
2053 
2054 	temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2055 	divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
2056 		SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
2057 	phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
2058 		SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
2059 
2060 	temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2061 	auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
2062 		SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
2063 
2064 	mutex_unlock(&dev_priv->sb_lock);
2065 
2066 	desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
2067 
2068 	return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
2069 				 desired_divisor << auxdiv);
2070 }
2071 
ilk_pch_transcoder_set_timings(const struct intel_crtc_state * crtc_state,enum pipe pch_transcoder)2072 static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
2073 					   enum pipe pch_transcoder)
2074 {
2075 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2076 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2077 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2078 
2079 	intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
2080 		       intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
2081 	intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
2082 		       intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
2083 	intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
2084 		       intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
2085 
2086 	intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
2087 		       intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
2088 	intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
2089 		       intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
2090 	intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
2091 		       intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
2092 	intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
2093 		       intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
2094 }
2095 
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)2096 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
2097 {
2098 	u32 temp;
2099 
2100 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
2101 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
2102 		return;
2103 
2104 	drm_WARN_ON(&dev_priv->drm,
2105 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
2106 		    FDI_RX_ENABLE);
2107 	drm_WARN_ON(&dev_priv->drm,
2108 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
2109 		    FDI_RX_ENABLE);
2110 
2111 	temp &= ~FDI_BC_BIFURCATION_SELECT;
2112 	if (enable)
2113 		temp |= FDI_BC_BIFURCATION_SELECT;
2114 
2115 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
2116 		    enable ? "en" : "dis");
2117 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
2118 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
2119 }
2120 
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)2121 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
2122 {
2123 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2124 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2125 
2126 	switch (crtc->pipe) {
2127 	case PIPE_A:
2128 		break;
2129 	case PIPE_B:
2130 		if (crtc_state->fdi_lanes > 2)
2131 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
2132 		else
2133 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
2134 
2135 		break;
2136 	case PIPE_C:
2137 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
2138 
2139 		break;
2140 	default:
2141 		BUG();
2142 	}
2143 }
2144 
2145 /*
2146  * Finds the encoder associated with the given CRTC. This can only be
2147  * used when we know that the CRTC isn't feeding multiple encoders!
2148  */
2149 struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2150 intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
2151 			   const struct intel_crtc_state *crtc_state)
2152 {
2153 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2154 	const struct drm_connector_state *connector_state;
2155 	const struct drm_connector *connector;
2156 	struct intel_encoder *encoder = NULL;
2157 	int num_encoders = 0;
2158 	int i;
2159 
2160 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
2161 		if (connector_state->crtc != &crtc->base)
2162 			continue;
2163 
2164 		encoder = to_intel_encoder(connector_state->best_encoder);
2165 		num_encoders++;
2166 	}
2167 
2168 	drm_WARN(encoder->base.dev, num_encoders != 1,
2169 		 "%d encoders for pipe %c\n",
2170 		 num_encoders, pipe_name(crtc->pipe));
2171 
2172 	return encoder;
2173 }
2174 
2175 /*
2176  * Enable PCH resources required for PCH ports:
2177  *   - PCH PLLs
2178  *   - FDI training & RX/TX
2179  *   - update transcoder timings
2180  *   - DP transcoding bits
2181  *   - transcoder
2182  */
ilk_pch_enable(const struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)2183 static void ilk_pch_enable(const struct intel_atomic_state *state,
2184 			   const struct intel_crtc_state *crtc_state)
2185 {
2186 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2187 	struct drm_device *dev = crtc->base.dev;
2188 	struct drm_i915_private *dev_priv = to_i915(dev);
2189 	enum pipe pipe = crtc->pipe;
2190 	u32 temp;
2191 
2192 	assert_pch_transcoder_disabled(dev_priv, pipe);
2193 
2194 	if (IS_IVYBRIDGE(dev_priv))
2195 		ivb_update_fdi_bc_bifurcation(crtc_state);
2196 
2197 	/* Write the TU size bits before fdi link training, so that error
2198 	 * detection works. */
2199 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
2200 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2201 
2202 	/* For PCH output, training FDI link */
2203 	dev_priv->display.fdi_link_train(crtc, crtc_state);
2204 
2205 	/* We need to program the right clock selection before writing the pixel
2206 	 * mutliplier into the DPLL. */
2207 	if (HAS_PCH_CPT(dev_priv)) {
2208 		u32 sel;
2209 
2210 		temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
2211 		temp |= TRANS_DPLL_ENABLE(pipe);
2212 		sel = TRANS_DPLLB_SEL(pipe);
2213 		if (crtc_state->shared_dpll ==
2214 		    intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
2215 			temp |= sel;
2216 		else
2217 			temp &= ~sel;
2218 		intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
2219 	}
2220 
2221 	/* XXX: pch pll's can be enabled any time before we enable the PCH
2222 	 * transcoder, and we actually should do this to not upset any PCH
2223 	 * transcoder that already use the clock when we share it.
2224 	 *
2225 	 * Note that enable_shared_dpll tries to do the right thing, but
2226 	 * get_shared_dpll unconditionally resets the pll - we need that to have
2227 	 * the right LVDS enable sequence. */
2228 	intel_enable_shared_dpll(crtc_state);
2229 
2230 	/* set transcoder timing, panel must allow it */
2231 	assert_panel_unlocked(dev_priv, pipe);
2232 	ilk_pch_transcoder_set_timings(crtc_state, pipe);
2233 
2234 	intel_fdi_normal_train(crtc);
2235 
2236 	/* For PCH DP, enable TRANS_DP_CTL */
2237 	if (HAS_PCH_CPT(dev_priv) &&
2238 	    intel_crtc_has_dp_encoder(crtc_state)) {
2239 		const struct drm_display_mode *adjusted_mode =
2240 			&crtc_state->hw.adjusted_mode;
2241 		u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
2242 		i915_reg_t reg = TRANS_DP_CTL(pipe);
2243 		enum port port;
2244 
2245 		temp = intel_de_read(dev_priv, reg);
2246 		temp &= ~(TRANS_DP_PORT_SEL_MASK |
2247 			  TRANS_DP_SYNC_MASK |
2248 			  TRANS_DP_BPC_MASK);
2249 		temp |= TRANS_DP_OUTPUT_ENABLE;
2250 		temp |= bpc << 9; /* same format but at 11:9 */
2251 
2252 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2253 			temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
2254 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2255 			temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
2256 
2257 		port = intel_get_crtc_new_encoder(state, crtc_state)->port;
2258 		drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
2259 		temp |= TRANS_DP_PORT_SEL(port);
2260 
2261 		intel_de_write(dev_priv, reg, temp);
2262 	}
2263 
2264 	ilk_enable_pch_transcoder(crtc_state);
2265 }
2266 
lpt_pch_enable(const struct intel_crtc_state * crtc_state)2267 void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
2268 {
2269 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2270 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2271 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
2272 
2273 	assert_pch_transcoder_disabled(dev_priv, PIPE_A);
2274 
2275 	lpt_program_iclkip(crtc_state);
2276 
2277 	/* Set transcoder timing. */
2278 	ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
2279 
2280 	lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
2281 }
2282 
cpt_verify_modeset(struct drm_i915_private * dev_priv,enum pipe pipe)2283 static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
2284 			       enum pipe pipe)
2285 {
2286 	i915_reg_t dslreg = PIPEDSL(pipe);
2287 	u32 temp;
2288 
2289 	temp = intel_de_read(dev_priv, dslreg);
2290 	udelay(500);
2291 	if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
2292 		if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
2293 			drm_err(&dev_priv->drm,
2294 				"mode set failed: pipe %c stuck\n",
2295 				pipe_name(pipe));
2296 	}
2297 }
2298 
ilk_pfit_enable(const struct intel_crtc_state * crtc_state)2299 static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
2300 {
2301 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2302 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2303 	const struct drm_rect *dst = &crtc_state->pch_pfit.dst;
2304 	enum pipe pipe = crtc->pipe;
2305 	int width = drm_rect_width(dst);
2306 	int height = drm_rect_height(dst);
2307 	int x = dst->x1;
2308 	int y = dst->y1;
2309 
2310 	if (!crtc_state->pch_pfit.enabled)
2311 		return;
2312 
2313 	/* Force use of hard-coded filter coefficients
2314 	 * as some pre-programmed values are broken,
2315 	 * e.g. x201.
2316 	 */
2317 	if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
2318 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2319 			       PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
2320 	else
2321 		intel_de_write(dev_priv, PF_CTL(pipe), PF_ENABLE |
2322 			       PF_FILTER_MED_3x3);
2323 	intel_de_write(dev_priv, PF_WIN_POS(pipe), x << 16 | y);
2324 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), width << 16 | height);
2325 }
2326 
hsw_enable_ips(const struct intel_crtc_state * crtc_state)2327 void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
2328 {
2329 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2330 	struct drm_device *dev = crtc->base.dev;
2331 	struct drm_i915_private *dev_priv = to_i915(dev);
2332 
2333 	if (!crtc_state->ips_enabled)
2334 		return;
2335 
2336 	/*
2337 	 * We can only enable IPS after we enable a plane and wait for a vblank
2338 	 * This function is called from post_plane_update, which is run after
2339 	 * a vblank wait.
2340 	 */
2341 	drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
2342 
2343 	if (IS_BROADWELL(dev_priv)) {
2344 		drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
2345 							 IPS_ENABLE | IPS_PCODE_CONTROL));
2346 		/* Quoting Art Runyan: "its not safe to expect any particular
2347 		 * value in IPS_CTL bit 31 after enabling IPS through the
2348 		 * mailbox." Moreover, the mailbox may return a bogus state,
2349 		 * so we need to just enable it and continue on.
2350 		 */
2351 	} else {
2352 		intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
2353 		/* The bit only becomes 1 in the next vblank, so this wait here
2354 		 * is essentially intel_wait_for_vblank. If we don't have this
2355 		 * and don't wait for vblanks until the end of crtc_enable, then
2356 		 * the HW state readout code will complain that the expected
2357 		 * IPS_CTL value is not the one we read. */
2358 		if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
2359 			drm_err(&dev_priv->drm,
2360 				"Timed out waiting for IPS enable\n");
2361 	}
2362 }
2363 
hsw_disable_ips(const struct intel_crtc_state * crtc_state)2364 void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
2365 {
2366 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2367 	struct drm_device *dev = crtc->base.dev;
2368 	struct drm_i915_private *dev_priv = to_i915(dev);
2369 
2370 	if (!crtc_state->ips_enabled)
2371 		return;
2372 
2373 	if (IS_BROADWELL(dev_priv)) {
2374 		drm_WARN_ON(dev,
2375 			    sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
2376 		/*
2377 		 * Wait for PCODE to finish disabling IPS. The BSpec specified
2378 		 * 42ms timeout value leads to occasional timeouts so use 100ms
2379 		 * instead.
2380 		 */
2381 		if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
2382 			drm_err(&dev_priv->drm,
2383 				"Timed out waiting for IPS disable\n");
2384 	} else {
2385 		intel_de_write(dev_priv, IPS_CTL, 0);
2386 		intel_de_posting_read(dev_priv, IPS_CTL);
2387 	}
2388 
2389 	/* We need to wait for a vblank before we can disable the plane. */
2390 	intel_wait_for_vblank(dev_priv, crtc->pipe);
2391 }
2392 
intel_crtc_dpms_overlay_disable(struct intel_crtc * intel_crtc)2393 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
2394 {
2395 	if (intel_crtc->overlay)
2396 		(void) intel_overlay_switch_off(intel_crtc->overlay);
2397 
2398 	/* Let userspace switch the overlay on again. In most cases userspace
2399 	 * has to recompute where to put it anyway.
2400 	 */
2401 }
2402 
hsw_pre_update_disable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2403 static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
2404 				       const struct intel_crtc_state *new_crtc_state)
2405 {
2406 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2407 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2408 
2409 	if (!old_crtc_state->ips_enabled)
2410 		return false;
2411 
2412 	if (intel_crtc_needs_modeset(new_crtc_state))
2413 		return true;
2414 
2415 	/*
2416 	 * Workaround : Do not read or write the pipe palette/gamma data while
2417 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2418 	 *
2419 	 * Disable IPS before we program the LUT.
2420 	 */
2421 	if (IS_HASWELL(dev_priv) &&
2422 	    (new_crtc_state->uapi.color_mgmt_changed ||
2423 	     new_crtc_state->update_pipe) &&
2424 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2425 		return true;
2426 
2427 	return !new_crtc_state->ips_enabled;
2428 }
2429 
hsw_post_update_enable_ips(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2430 static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
2431 				       const struct intel_crtc_state *new_crtc_state)
2432 {
2433 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
2434 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2435 
2436 	if (!new_crtc_state->ips_enabled)
2437 		return false;
2438 
2439 	if (intel_crtc_needs_modeset(new_crtc_state))
2440 		return true;
2441 
2442 	/*
2443 	 * Workaround : Do not read or write the pipe palette/gamma data while
2444 	 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
2445 	 *
2446 	 * Re-enable IPS after the LUT has been programmed.
2447 	 */
2448 	if (IS_HASWELL(dev_priv) &&
2449 	    (new_crtc_state->uapi.color_mgmt_changed ||
2450 	     new_crtc_state->update_pipe) &&
2451 	    new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
2452 		return true;
2453 
2454 	/*
2455 	 * We can't read out IPS on broadwell, assume the worst and
2456 	 * forcibly enable IPS on the first fastset.
2457 	 */
2458 	if (new_crtc_state->update_pipe && old_crtc_state->inherited)
2459 		return true;
2460 
2461 	return !old_crtc_state->ips_enabled;
2462 }
2463 
needs_nv12_wa(const struct intel_crtc_state * crtc_state)2464 static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state)
2465 {
2466 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2467 
2468 	if (!crtc_state->nv12_planes)
2469 		return false;
2470 
2471 	/* WA Display #0827: Gen9:all */
2472 	if (IS_DISPLAY_VER(dev_priv, 9))
2473 		return true;
2474 
2475 	return false;
2476 }
2477 
needs_scalerclk_wa(const struct intel_crtc_state * crtc_state)2478 static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
2479 {
2480 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
2481 
2482 	/* Wa_2006604312:icl,ehl */
2483 	if (crtc_state->scaler_state.scaler_users > 0 && IS_DISPLAY_VER(dev_priv, 11))
2484 		return true;
2485 
2486 	return false;
2487 }
2488 
planes_enabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2489 static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
2490 			    const struct intel_crtc_state *new_crtc_state)
2491 {
2492 	return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) &&
2493 		new_crtc_state->active_planes;
2494 }
2495 
planes_disabling(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)2496 static bool planes_disabling(const struct intel_crtc_state *old_crtc_state,
2497 			     const struct intel_crtc_state *new_crtc_state)
2498 {
2499 	return old_crtc_state->active_planes &&
2500 		(!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state));
2501 }
2502 
intel_post_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2503 static void intel_post_plane_update(struct intel_atomic_state *state,
2504 				    struct intel_crtc *crtc)
2505 {
2506 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2507 	const struct intel_crtc_state *old_crtc_state =
2508 		intel_atomic_get_old_crtc_state(state, crtc);
2509 	const struct intel_crtc_state *new_crtc_state =
2510 		intel_atomic_get_new_crtc_state(state, crtc);
2511 	enum pipe pipe = crtc->pipe;
2512 
2513 	intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
2514 
2515 	if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
2516 		intel_update_watermarks(crtc);
2517 
2518 	if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
2519 		hsw_enable_ips(new_crtc_state);
2520 
2521 	intel_fbc_post_update(state, crtc);
2522 
2523 	if (needs_nv12_wa(old_crtc_state) &&
2524 	    !needs_nv12_wa(new_crtc_state))
2525 		skl_wa_827(dev_priv, pipe, false);
2526 
2527 	if (needs_scalerclk_wa(old_crtc_state) &&
2528 	    !needs_scalerclk_wa(new_crtc_state))
2529 		icl_wa_scalerclkgating(dev_priv, pipe, false);
2530 }
2531 
intel_crtc_enable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)2532 static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
2533 					struct intel_crtc *crtc)
2534 {
2535 	const struct intel_crtc_state *crtc_state =
2536 		intel_atomic_get_new_crtc_state(state, crtc);
2537 	u8 update_planes = crtc_state->update_planes;
2538 	const struct intel_plane_state *plane_state;
2539 	struct intel_plane *plane;
2540 	int i;
2541 
2542 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2543 		if (plane->enable_flip_done &&
2544 		    plane->pipe == crtc->pipe &&
2545 		    update_planes & BIT(plane->id))
2546 			plane->enable_flip_done(plane);
2547 	}
2548 }
2549 
intel_crtc_disable_flip_done(struct intel_atomic_state * state,struct intel_crtc * crtc)2550 static void intel_crtc_disable_flip_done(struct intel_atomic_state *state,
2551 					 struct intel_crtc *crtc)
2552 {
2553 	const struct intel_crtc_state *crtc_state =
2554 		intel_atomic_get_new_crtc_state(state, crtc);
2555 	u8 update_planes = crtc_state->update_planes;
2556 	const struct intel_plane_state *plane_state;
2557 	struct intel_plane *plane;
2558 	int i;
2559 
2560 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
2561 		if (plane->disable_flip_done &&
2562 		    plane->pipe == crtc->pipe &&
2563 		    update_planes & BIT(plane->id))
2564 			plane->disable_flip_done(plane);
2565 	}
2566 }
2567 
intel_crtc_async_flip_disable_wa(struct intel_atomic_state * state,struct intel_crtc * crtc)2568 static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
2569 					     struct intel_crtc *crtc)
2570 {
2571 	struct drm_i915_private *i915 = to_i915(state->base.dev);
2572 	const struct intel_crtc_state *old_crtc_state =
2573 		intel_atomic_get_old_crtc_state(state, crtc);
2574 	const struct intel_crtc_state *new_crtc_state =
2575 		intel_atomic_get_new_crtc_state(state, crtc);
2576 	u8 update_planes = new_crtc_state->update_planes;
2577 	const struct intel_plane_state *old_plane_state;
2578 	struct intel_plane *plane;
2579 	bool need_vbl_wait = false;
2580 	int i;
2581 
2582 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2583 		if (plane->need_async_flip_disable_wa &&
2584 		    plane->pipe == crtc->pipe &&
2585 		    update_planes & BIT(plane->id)) {
2586 			/*
2587 			 * Apart from the async flip bit we want to
2588 			 * preserve the old state for the plane.
2589 			 */
2590 			plane->async_flip(plane, old_crtc_state,
2591 					  old_plane_state, false);
2592 			need_vbl_wait = true;
2593 		}
2594 	}
2595 
2596 	if (need_vbl_wait)
2597 		intel_wait_for_vblank(i915, crtc->pipe);
2598 }
2599 
intel_pre_plane_update(struct intel_atomic_state * state,struct intel_crtc * crtc)2600 static void intel_pre_plane_update(struct intel_atomic_state *state,
2601 				   struct intel_crtc *crtc)
2602 {
2603 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
2604 	const struct intel_crtc_state *old_crtc_state =
2605 		intel_atomic_get_old_crtc_state(state, crtc);
2606 	const struct intel_crtc_state *new_crtc_state =
2607 		intel_atomic_get_new_crtc_state(state, crtc);
2608 	enum pipe pipe = crtc->pipe;
2609 
2610 	if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
2611 		hsw_disable_ips(old_crtc_state);
2612 
2613 	if (intel_fbc_pre_update(state, crtc))
2614 		intel_wait_for_vblank(dev_priv, pipe);
2615 
2616 	/* Display WA 827 */
2617 	if (!needs_nv12_wa(old_crtc_state) &&
2618 	    needs_nv12_wa(new_crtc_state))
2619 		skl_wa_827(dev_priv, pipe, true);
2620 
2621 	/* Wa_2006604312:icl,ehl */
2622 	if (!needs_scalerclk_wa(old_crtc_state) &&
2623 	    needs_scalerclk_wa(new_crtc_state))
2624 		icl_wa_scalerclkgating(dev_priv, pipe, true);
2625 
2626 	/*
2627 	 * Vblank time updates from the shadow to live plane control register
2628 	 * are blocked if the memory self-refresh mode is active at that
2629 	 * moment. So to make sure the plane gets truly disabled, disable
2630 	 * first the self-refresh mode. The self-refresh enable bit in turn
2631 	 * will be checked/applied by the HW only at the next frame start
2632 	 * event which is after the vblank start event, so we need to have a
2633 	 * wait-for-vblank between disabling the plane and the pipe.
2634 	 */
2635 	if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
2636 	    new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
2637 		intel_wait_for_vblank(dev_priv, pipe);
2638 
2639 	/*
2640 	 * IVB workaround: must disable low power watermarks for at least
2641 	 * one frame before enabling scaling.  LP watermarks can be re-enabled
2642 	 * when scaling is disabled.
2643 	 *
2644 	 * WaCxSRDisabledForSpriteScaling:ivb
2645 	 */
2646 	if (old_crtc_state->hw.active &&
2647 	    new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
2648 		intel_wait_for_vblank(dev_priv, pipe);
2649 
2650 	/*
2651 	 * If we're doing a modeset we don't need to do any
2652 	 * pre-vblank watermark programming here.
2653 	 */
2654 	if (!intel_crtc_needs_modeset(new_crtc_state)) {
2655 		/*
2656 		 * For platforms that support atomic watermarks, program the
2657 		 * 'intermediate' watermarks immediately.  On pre-gen9 platforms, these
2658 		 * will be the intermediate values that are safe for both pre- and
2659 		 * post- vblank; when vblank happens, the 'active' values will be set
2660 		 * to the final 'target' values and we'll do this again to get the
2661 		 * optimal watermarks.  For gen9+ platforms, the values we program here
2662 		 * will be the final target values which will get automatically latched
2663 		 * at vblank time; no further programming will be necessary.
2664 		 *
2665 		 * If a platform hasn't been transitioned to atomic watermarks yet,
2666 		 * we'll continue to update watermarks the old way, if flags tell
2667 		 * us to.
2668 		 */
2669 		if (dev_priv->display.initial_watermarks)
2670 			dev_priv->display.initial_watermarks(state, crtc);
2671 		else if (new_crtc_state->update_wm_pre)
2672 			intel_update_watermarks(crtc);
2673 	}
2674 
2675 	/*
2676 	 * Gen2 reports pipe underruns whenever all planes are disabled.
2677 	 * So disable underrun reporting before all the planes get disabled.
2678 	 *
2679 	 * We do this after .initial_watermarks() so that we have a
2680 	 * chance of catching underruns with the intermediate watermarks
2681 	 * vs. the old plane configuration.
2682 	 */
2683 	if (IS_DISPLAY_VER(dev_priv, 2) && planes_disabling(old_crtc_state, new_crtc_state))
2684 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2685 
2686 	/*
2687 	 * WA for platforms where async address update enable bit
2688 	 * is double buffered and only latched at start of vblank.
2689 	 */
2690 	if (old_crtc_state->uapi.async_flip && !new_crtc_state->uapi.async_flip)
2691 		intel_crtc_async_flip_disable_wa(state, crtc);
2692 }
2693 
intel_crtc_disable_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)2694 static void intel_crtc_disable_planes(struct intel_atomic_state *state,
2695 				      struct intel_crtc *crtc)
2696 {
2697 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2698 	const struct intel_crtc_state *new_crtc_state =
2699 		intel_atomic_get_new_crtc_state(state, crtc);
2700 	unsigned int update_mask = new_crtc_state->update_planes;
2701 	const struct intel_plane_state *old_plane_state;
2702 	struct intel_plane *plane;
2703 	unsigned fb_bits = 0;
2704 	int i;
2705 
2706 	intel_crtc_dpms_overlay_disable(crtc);
2707 
2708 	for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
2709 		if (crtc->pipe != plane->pipe ||
2710 		    !(update_mask & BIT(plane->id)))
2711 			continue;
2712 
2713 		intel_disable_plane(plane, new_crtc_state);
2714 
2715 		if (old_plane_state->uapi.visible)
2716 			fb_bits |= plane->frontbuffer_bit;
2717 	}
2718 
2719 	intel_frontbuffer_flip(dev_priv, fb_bits);
2720 }
2721 
2722 /*
2723  * intel_connector_primary_encoder - get the primary encoder for a connector
2724  * @connector: connector for which to return the encoder
2725  *
2726  * Returns the primary encoder for a connector. There is a 1:1 mapping from
2727  * all connectors to their encoder, except for DP-MST connectors which have
2728  * both a virtual and a primary encoder. These DP-MST primary encoders can be
2729  * pointed to by as many DP-MST connectors as there are pipes.
2730  */
2731 static struct intel_encoder *
intel_connector_primary_encoder(struct intel_connector * connector)2732 intel_connector_primary_encoder(struct intel_connector *connector)
2733 {
2734 	struct intel_encoder *encoder;
2735 
2736 	if (connector->mst_port)
2737 		return &dp_to_dig_port(connector->mst_port)->base;
2738 
2739 	encoder = intel_attached_encoder(connector);
2740 	drm_WARN_ON(connector->base.dev, !encoder);
2741 
2742 	return encoder;
2743 }
2744 
intel_encoders_update_prepare(struct intel_atomic_state * state)2745 static void intel_encoders_update_prepare(struct intel_atomic_state *state)
2746 {
2747 	struct drm_connector_state *new_conn_state;
2748 	struct drm_connector *connector;
2749 	int i;
2750 
2751 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2752 					i) {
2753 		struct intel_connector *intel_connector;
2754 		struct intel_encoder *encoder;
2755 		struct intel_crtc *crtc;
2756 
2757 		if (!intel_connector_needs_modeset(state, connector))
2758 			continue;
2759 
2760 		intel_connector = to_intel_connector(connector);
2761 		encoder = intel_connector_primary_encoder(intel_connector);
2762 		if (!encoder->update_prepare)
2763 			continue;
2764 
2765 		crtc = new_conn_state->crtc ?
2766 			to_intel_crtc(new_conn_state->crtc) : NULL;
2767 		encoder->update_prepare(state, encoder, crtc);
2768 	}
2769 }
2770 
intel_encoders_update_complete(struct intel_atomic_state * state)2771 static void intel_encoders_update_complete(struct intel_atomic_state *state)
2772 {
2773 	struct drm_connector_state *new_conn_state;
2774 	struct drm_connector *connector;
2775 	int i;
2776 
2777 	for_each_new_connector_in_state(&state->base, connector, new_conn_state,
2778 					i) {
2779 		struct intel_connector *intel_connector;
2780 		struct intel_encoder *encoder;
2781 		struct intel_crtc *crtc;
2782 
2783 		if (!intel_connector_needs_modeset(state, connector))
2784 			continue;
2785 
2786 		intel_connector = to_intel_connector(connector);
2787 		encoder = intel_connector_primary_encoder(intel_connector);
2788 		if (!encoder->update_complete)
2789 			continue;
2790 
2791 		crtc = new_conn_state->crtc ?
2792 			to_intel_crtc(new_conn_state->crtc) : NULL;
2793 		encoder->update_complete(state, encoder, crtc);
2794 	}
2795 }
2796 
intel_encoders_pre_pll_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2797 static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state,
2798 					  struct intel_crtc *crtc)
2799 {
2800 	const struct intel_crtc_state *crtc_state =
2801 		intel_atomic_get_new_crtc_state(state, crtc);
2802 	const struct drm_connector_state *conn_state;
2803 	struct drm_connector *conn;
2804 	int i;
2805 
2806 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2807 		struct intel_encoder *encoder =
2808 			to_intel_encoder(conn_state->best_encoder);
2809 
2810 		if (conn_state->crtc != &crtc->base)
2811 			continue;
2812 
2813 		if (encoder->pre_pll_enable)
2814 			encoder->pre_pll_enable(state, encoder,
2815 						crtc_state, conn_state);
2816 	}
2817 }
2818 
intel_encoders_pre_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2819 static void intel_encoders_pre_enable(struct intel_atomic_state *state,
2820 				      struct intel_crtc *crtc)
2821 {
2822 	const struct intel_crtc_state *crtc_state =
2823 		intel_atomic_get_new_crtc_state(state, crtc);
2824 	const struct drm_connector_state *conn_state;
2825 	struct drm_connector *conn;
2826 	int i;
2827 
2828 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2829 		struct intel_encoder *encoder =
2830 			to_intel_encoder(conn_state->best_encoder);
2831 
2832 		if (conn_state->crtc != &crtc->base)
2833 			continue;
2834 
2835 		if (encoder->pre_enable)
2836 			encoder->pre_enable(state, encoder,
2837 					    crtc_state, conn_state);
2838 	}
2839 }
2840 
intel_encoders_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2841 static void intel_encoders_enable(struct intel_atomic_state *state,
2842 				  struct intel_crtc *crtc)
2843 {
2844 	const struct intel_crtc_state *crtc_state =
2845 		intel_atomic_get_new_crtc_state(state, crtc);
2846 	const struct drm_connector_state *conn_state;
2847 	struct drm_connector *conn;
2848 	int i;
2849 
2850 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2851 		struct intel_encoder *encoder =
2852 			to_intel_encoder(conn_state->best_encoder);
2853 
2854 		if (conn_state->crtc != &crtc->base)
2855 			continue;
2856 
2857 		if (encoder->enable)
2858 			encoder->enable(state, encoder,
2859 					crtc_state, conn_state);
2860 		intel_opregion_notify_encoder(encoder, true);
2861 	}
2862 }
2863 
intel_encoders_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2864 static void intel_encoders_disable(struct intel_atomic_state *state,
2865 				   struct intel_crtc *crtc)
2866 {
2867 	const struct intel_crtc_state *old_crtc_state =
2868 		intel_atomic_get_old_crtc_state(state, crtc);
2869 	const struct drm_connector_state *old_conn_state;
2870 	struct drm_connector *conn;
2871 	int i;
2872 
2873 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2874 		struct intel_encoder *encoder =
2875 			to_intel_encoder(old_conn_state->best_encoder);
2876 
2877 		if (old_conn_state->crtc != &crtc->base)
2878 			continue;
2879 
2880 		intel_opregion_notify_encoder(encoder, false);
2881 		if (encoder->disable)
2882 			encoder->disable(state, encoder,
2883 					 old_crtc_state, old_conn_state);
2884 	}
2885 }
2886 
intel_encoders_post_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2887 static void intel_encoders_post_disable(struct intel_atomic_state *state,
2888 					struct intel_crtc *crtc)
2889 {
2890 	const struct intel_crtc_state *old_crtc_state =
2891 		intel_atomic_get_old_crtc_state(state, crtc);
2892 	const struct drm_connector_state *old_conn_state;
2893 	struct drm_connector *conn;
2894 	int i;
2895 
2896 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2897 		struct intel_encoder *encoder =
2898 			to_intel_encoder(old_conn_state->best_encoder);
2899 
2900 		if (old_conn_state->crtc != &crtc->base)
2901 			continue;
2902 
2903 		if (encoder->post_disable)
2904 			encoder->post_disable(state, encoder,
2905 					      old_crtc_state, old_conn_state);
2906 	}
2907 }
2908 
intel_encoders_post_pll_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)2909 static void intel_encoders_post_pll_disable(struct intel_atomic_state *state,
2910 					    struct intel_crtc *crtc)
2911 {
2912 	const struct intel_crtc_state *old_crtc_state =
2913 		intel_atomic_get_old_crtc_state(state, crtc);
2914 	const struct drm_connector_state *old_conn_state;
2915 	struct drm_connector *conn;
2916 	int i;
2917 
2918 	for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
2919 		struct intel_encoder *encoder =
2920 			to_intel_encoder(old_conn_state->best_encoder);
2921 
2922 		if (old_conn_state->crtc != &crtc->base)
2923 			continue;
2924 
2925 		if (encoder->post_pll_disable)
2926 			encoder->post_pll_disable(state, encoder,
2927 						  old_crtc_state, old_conn_state);
2928 	}
2929 }
2930 
intel_encoders_update_pipe(struct intel_atomic_state * state,struct intel_crtc * crtc)2931 static void intel_encoders_update_pipe(struct intel_atomic_state *state,
2932 				       struct intel_crtc *crtc)
2933 {
2934 	const struct intel_crtc_state *crtc_state =
2935 		intel_atomic_get_new_crtc_state(state, crtc);
2936 	const struct drm_connector_state *conn_state;
2937 	struct drm_connector *conn;
2938 	int i;
2939 
2940 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
2941 		struct intel_encoder *encoder =
2942 			to_intel_encoder(conn_state->best_encoder);
2943 
2944 		if (conn_state->crtc != &crtc->base)
2945 			continue;
2946 
2947 		if (encoder->update_pipe)
2948 			encoder->update_pipe(state, encoder,
2949 					     crtc_state, conn_state);
2950 	}
2951 }
2952 
intel_disable_primary_plane(const struct intel_crtc_state * crtc_state)2953 static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
2954 {
2955 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2956 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
2957 
2958 	plane->disable_plane(plane, crtc_state);
2959 }
2960 
ilk_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)2961 static void ilk_crtc_enable(struct intel_atomic_state *state,
2962 			    struct intel_crtc *crtc)
2963 {
2964 	const struct intel_crtc_state *new_crtc_state =
2965 		intel_atomic_get_new_crtc_state(state, crtc);
2966 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2967 	enum pipe pipe = crtc->pipe;
2968 
2969 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
2970 		return;
2971 
2972 	/*
2973 	 * Sometimes spurious CPU pipe underruns happen during FDI
2974 	 * training, at least with VGA+HDMI cloning. Suppress them.
2975 	 *
2976 	 * On ILK we get an occasional spurious CPU pipe underruns
2977 	 * between eDP port A enable and vdd enable. Also PCH port
2978 	 * enable seems to result in the occasional CPU pipe underrun.
2979 	 *
2980 	 * Spurious PCH underruns also occur during PCH enabling.
2981 	 */
2982 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2983 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
2984 
2985 	if (new_crtc_state->has_pch_encoder)
2986 		intel_prepare_shared_dpll(new_crtc_state);
2987 
2988 	if (intel_crtc_has_dp_encoder(new_crtc_state))
2989 		intel_dp_set_m_n(new_crtc_state, M1_N1);
2990 
2991 	intel_set_transcoder_timings(new_crtc_state);
2992 	intel_set_pipe_src_size(new_crtc_state);
2993 
2994 	if (new_crtc_state->has_pch_encoder)
2995 		intel_cpu_transcoder_set_m_n(new_crtc_state,
2996 					     &new_crtc_state->fdi_m_n, NULL);
2997 
2998 	ilk_set_pipeconf(new_crtc_state);
2999 
3000 	crtc->active = true;
3001 
3002 	intel_encoders_pre_enable(state, crtc);
3003 
3004 	if (new_crtc_state->has_pch_encoder) {
3005 		/* Note: FDI PLL enabling _must_ be done before we enable the
3006 		 * cpu pipes, hence this is separate from all the other fdi/pch
3007 		 * enabling. */
3008 		ilk_fdi_pll_enable(new_crtc_state);
3009 	} else {
3010 		assert_fdi_tx_disabled(dev_priv, pipe);
3011 		assert_fdi_rx_disabled(dev_priv, pipe);
3012 	}
3013 
3014 	ilk_pfit_enable(new_crtc_state);
3015 
3016 	/*
3017 	 * On ILK+ LUT must be loaded before the pipe is running but with
3018 	 * clocks enabled
3019 	 */
3020 	intel_color_load_luts(new_crtc_state);
3021 	intel_color_commit(new_crtc_state);
3022 	/* update DSPCNTR to configure gamma for pipe bottom color */
3023 	intel_disable_primary_plane(new_crtc_state);
3024 
3025 	if (dev_priv->display.initial_watermarks)
3026 		dev_priv->display.initial_watermarks(state, crtc);
3027 	intel_enable_pipe(new_crtc_state);
3028 
3029 	if (new_crtc_state->has_pch_encoder)
3030 		ilk_pch_enable(state, new_crtc_state);
3031 
3032 	intel_crtc_vblank_on(new_crtc_state);
3033 
3034 	intel_encoders_enable(state, crtc);
3035 
3036 	if (HAS_PCH_CPT(dev_priv))
3037 		cpt_verify_modeset(dev_priv, pipe);
3038 
3039 	/*
3040 	 * Must wait for vblank to avoid spurious PCH FIFO underruns.
3041 	 * And a second vblank wait is needed at least on ILK with
3042 	 * some interlaced HDMI modes. Let's do the double wait always
3043 	 * in case there are more corner cases we don't know about.
3044 	 */
3045 	if (new_crtc_state->has_pch_encoder) {
3046 		intel_wait_for_vblank(dev_priv, pipe);
3047 		intel_wait_for_vblank(dev_priv, pipe);
3048 	}
3049 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3050 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3051 }
3052 
3053 /* IPS only exists on ULT machines and is tied to pipe A. */
hsw_crtc_supports_ips(struct intel_crtc * crtc)3054 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3055 {
3056 	return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
3057 }
3058 
glk_pipe_scaler_clock_gating_wa(struct drm_i915_private * dev_priv,enum pipe pipe,bool apply)3059 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
3060 					    enum pipe pipe, bool apply)
3061 {
3062 	u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
3063 	u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
3064 
3065 	if (apply)
3066 		val |= mask;
3067 	else
3068 		val &= ~mask;
3069 
3070 	intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
3071 }
3072 
icl_pipe_mbus_enable(struct intel_crtc * crtc)3073 static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
3074 {
3075 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3076 	enum pipe pipe = crtc->pipe;
3077 	u32 val;
3078 
3079 	val = MBUS_DBOX_A_CREDIT(2);
3080 
3081 	if (DISPLAY_VER(dev_priv) >= 12) {
3082 		val |= MBUS_DBOX_BW_CREDIT(2);
3083 		val |= MBUS_DBOX_B_CREDIT(12);
3084 	} else {
3085 		val |= MBUS_DBOX_BW_CREDIT(1);
3086 		val |= MBUS_DBOX_B_CREDIT(8);
3087 	}
3088 
3089 	intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
3090 }
3091 
hsw_set_linetime_wm(const struct intel_crtc_state * crtc_state)3092 static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
3093 {
3094 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3095 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3096 
3097 	intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
3098 		       HSW_LINETIME(crtc_state->linetime) |
3099 		       HSW_IPS_LINETIME(crtc_state->ips_linetime));
3100 }
3101 
hsw_set_frame_start_delay(const struct intel_crtc_state * crtc_state)3102 static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
3103 {
3104 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3105 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3106 	i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
3107 	u32 val;
3108 
3109 	val = intel_de_read(dev_priv, reg);
3110 	val &= ~HSW_FRAME_START_DELAY_MASK;
3111 	val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
3112 	intel_de_write(dev_priv, reg, val);
3113 }
3114 
icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state * state,const struct intel_crtc_state * crtc_state)3115 static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
3116 					 const struct intel_crtc_state *crtc_state)
3117 {
3118 	struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
3119 	struct intel_crtc_state *master_crtc_state;
3120 	struct drm_connector_state *conn_state;
3121 	struct drm_connector *conn;
3122 	struct intel_encoder *encoder = NULL;
3123 	int i;
3124 
3125 	if (crtc_state->bigjoiner_slave)
3126 		master = crtc_state->bigjoiner_linked_crtc;
3127 
3128 	master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
3129 
3130 	for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
3131 		if (conn_state->crtc != &master->base)
3132 			continue;
3133 
3134 		encoder = to_intel_encoder(conn_state->best_encoder);
3135 		break;
3136 	}
3137 
3138 	if (!crtc_state->bigjoiner_slave) {
3139 		/* need to enable VDSC, which we skipped in pre-enable */
3140 		intel_dsc_enable(encoder, crtc_state);
3141 	} else {
3142 		/*
3143 		 * Enable sequence steps 1-7 on bigjoiner master
3144 		 */
3145 		intel_encoders_pre_pll_enable(state, master);
3146 		intel_enable_shared_dpll(master_crtc_state);
3147 		intel_encoders_pre_enable(state, master);
3148 
3149 		/* and DSC on slave */
3150 		intel_dsc_enable(NULL, crtc_state);
3151 	}
3152 }
3153 
hsw_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3154 static void hsw_crtc_enable(struct intel_atomic_state *state,
3155 			    struct intel_crtc *crtc)
3156 {
3157 	const struct intel_crtc_state *new_crtc_state =
3158 		intel_atomic_get_new_crtc_state(state, crtc);
3159 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3160 	enum pipe pipe = crtc->pipe, hsw_workaround_pipe;
3161 	enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
3162 	bool psl_clkgate_wa;
3163 
3164 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3165 		return;
3166 
3167 	if (!new_crtc_state->bigjoiner) {
3168 		intel_encoders_pre_pll_enable(state, crtc);
3169 
3170 		if (new_crtc_state->shared_dpll)
3171 			intel_enable_shared_dpll(new_crtc_state);
3172 
3173 		intel_encoders_pre_enable(state, crtc);
3174 	} else {
3175 		icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
3176 	}
3177 
3178 	intel_set_pipe_src_size(new_crtc_state);
3179 	if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
3180 		bdw_set_pipemisc(new_crtc_state);
3181 
3182 	if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
3183 		intel_set_transcoder_timings(new_crtc_state);
3184 
3185 		if (cpu_transcoder != TRANSCODER_EDP)
3186 			intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
3187 				       new_crtc_state->pixel_multiplier - 1);
3188 
3189 		if (new_crtc_state->has_pch_encoder)
3190 			intel_cpu_transcoder_set_m_n(new_crtc_state,
3191 						     &new_crtc_state->fdi_m_n, NULL);
3192 
3193 		hsw_set_frame_start_delay(new_crtc_state);
3194 	}
3195 
3196 	if (!transcoder_is_dsi(cpu_transcoder))
3197 		hsw_set_pipeconf(new_crtc_state);
3198 
3199 	crtc->active = true;
3200 
3201 	/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
3202 	psl_clkgate_wa = IS_DISPLAY_VER(dev_priv, 10) &&
3203 		new_crtc_state->pch_pfit.enabled;
3204 	if (psl_clkgate_wa)
3205 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
3206 
3207 	if (DISPLAY_VER(dev_priv) >= 9)
3208 		skl_pfit_enable(new_crtc_state);
3209 	else
3210 		ilk_pfit_enable(new_crtc_state);
3211 
3212 	/*
3213 	 * On ILK+ LUT must be loaded before the pipe is running but with
3214 	 * clocks enabled
3215 	 */
3216 	intel_color_load_luts(new_crtc_state);
3217 	intel_color_commit(new_crtc_state);
3218 	/* update DSPCNTR to configure gamma/csc for pipe bottom color */
3219 	if (DISPLAY_VER(dev_priv) < 9)
3220 		intel_disable_primary_plane(new_crtc_state);
3221 
3222 	hsw_set_linetime_wm(new_crtc_state);
3223 
3224 	if (DISPLAY_VER(dev_priv) >= 11)
3225 		icl_set_pipe_chicken(crtc);
3226 
3227 	if (dev_priv->display.initial_watermarks)
3228 		dev_priv->display.initial_watermarks(state, crtc);
3229 
3230 	if (DISPLAY_VER(dev_priv) >= 11)
3231 		icl_pipe_mbus_enable(crtc);
3232 
3233 	if (new_crtc_state->bigjoiner_slave)
3234 		intel_crtc_vblank_on(new_crtc_state);
3235 
3236 	intel_encoders_enable(state, crtc);
3237 
3238 	if (psl_clkgate_wa) {
3239 		intel_wait_for_vblank(dev_priv, pipe);
3240 		glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
3241 	}
3242 
3243 	/* If we change the relative order between pipe/planes enabling, we need
3244 	 * to change the workaround. */
3245 	hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
3246 	if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
3247 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3248 		intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
3249 	}
3250 }
3251 
ilk_pfit_disable(const struct intel_crtc_state * old_crtc_state)3252 void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3253 {
3254 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3255 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3256 	enum pipe pipe = crtc->pipe;
3257 
3258 	/* To avoid upsetting the power well on haswell only disable the pfit if
3259 	 * it's in use. The hw state code will make sure we get this right. */
3260 	if (!old_crtc_state->pch_pfit.enabled)
3261 		return;
3262 
3263 	intel_de_write(dev_priv, PF_CTL(pipe), 0);
3264 	intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
3265 	intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
3266 }
3267 
ilk_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3268 static void ilk_crtc_disable(struct intel_atomic_state *state,
3269 			     struct intel_crtc *crtc)
3270 {
3271 	const struct intel_crtc_state *old_crtc_state =
3272 		intel_atomic_get_old_crtc_state(state, crtc);
3273 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3274 	enum pipe pipe = crtc->pipe;
3275 
3276 	/*
3277 	 * Sometimes spurious CPU pipe underruns happen when the
3278 	 * pipe is already disabled, but FDI RX/TX is still enabled.
3279 	 * Happens at least with VGA+HDMI cloning. Suppress them.
3280 	 */
3281 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3282 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
3283 
3284 	intel_encoders_disable(state, crtc);
3285 
3286 	intel_crtc_vblank_off(old_crtc_state);
3287 
3288 	intel_disable_pipe(old_crtc_state);
3289 
3290 	ilk_pfit_disable(old_crtc_state);
3291 
3292 	if (old_crtc_state->has_pch_encoder)
3293 		ilk_fdi_disable(crtc);
3294 
3295 	intel_encoders_post_disable(state, crtc);
3296 
3297 	if (old_crtc_state->has_pch_encoder) {
3298 		ilk_disable_pch_transcoder(dev_priv, pipe);
3299 
3300 		if (HAS_PCH_CPT(dev_priv)) {
3301 			i915_reg_t reg;
3302 			u32 temp;
3303 
3304 			/* disable TRANS_DP_CTL */
3305 			reg = TRANS_DP_CTL(pipe);
3306 			temp = intel_de_read(dev_priv, reg);
3307 			temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3308 				  TRANS_DP_PORT_SEL_MASK);
3309 			temp |= TRANS_DP_PORT_SEL_NONE;
3310 			intel_de_write(dev_priv, reg, temp);
3311 
3312 			/* disable DPLL_SEL */
3313 			temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
3314 			temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
3315 			intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
3316 		}
3317 
3318 		ilk_fdi_pll_disable(crtc);
3319 	}
3320 
3321 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3322 	intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
3323 }
3324 
hsw_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3325 static void hsw_crtc_disable(struct intel_atomic_state *state,
3326 			     struct intel_crtc *crtc)
3327 {
3328 	/*
3329 	 * FIXME collapse everything to one hook.
3330 	 * Need care with mst->ddi interactions.
3331 	 */
3332 	intel_encoders_disable(state, crtc);
3333 	intel_encoders_post_disable(state, crtc);
3334 }
3335 
i9xx_pfit_enable(const struct intel_crtc_state * crtc_state)3336 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
3337 {
3338 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3339 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3340 
3341 	if (!crtc_state->gmch_pfit.control)
3342 		return;
3343 
3344 	/*
3345 	 * The panel fitter should only be adjusted whilst the pipe is disabled,
3346 	 * according to register description and PRM.
3347 	 */
3348 	drm_WARN_ON(&dev_priv->drm,
3349 		    intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
3350 	assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
3351 
3352 	intel_de_write(dev_priv, PFIT_PGM_RATIOS,
3353 		       crtc_state->gmch_pfit.pgm_ratios);
3354 	intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
3355 
3356 	/* Border color in case we don't scale up to the full screen. Black by
3357 	 * default, change to something else for debugging. */
3358 	intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
3359 }
3360 
intel_phy_is_combo(struct drm_i915_private * dev_priv,enum phy phy)3361 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
3362 {
3363 	if (phy == PHY_NONE)
3364 		return false;
3365 	else if (IS_ALDERLAKE_S(dev_priv))
3366 		return phy <= PHY_E;
3367 	else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
3368 		return phy <= PHY_D;
3369 	else if (IS_JSL_EHL(dev_priv))
3370 		return phy <= PHY_C;
3371 	else if (DISPLAY_VER(dev_priv) >= 11)
3372 		return phy <= PHY_B;
3373 	else
3374 		return false;
3375 }
3376 
intel_phy_is_tc(struct drm_i915_private * dev_priv,enum phy phy)3377 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
3378 {
3379 	if (IS_TIGERLAKE(dev_priv))
3380 		return phy >= PHY_D && phy <= PHY_I;
3381 	else if (IS_ICELAKE(dev_priv))
3382 		return phy >= PHY_C && phy <= PHY_F;
3383 	else
3384 		return false;
3385 }
3386 
intel_port_to_phy(struct drm_i915_private * i915,enum port port)3387 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
3388 {
3389 	if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1)
3390 		return PHY_B + port - PORT_TC1;
3391 	else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1)
3392 		return PHY_C + port - PORT_TC1;
3393 	else if (IS_JSL_EHL(i915) && port == PORT_D)
3394 		return PHY_A;
3395 
3396 	return PHY_A + port - PORT_A;
3397 }
3398 
intel_port_to_tc(struct drm_i915_private * dev_priv,enum port port)3399 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
3400 {
3401 	if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port)))
3402 		return TC_PORT_NONE;
3403 
3404 	if (DISPLAY_VER(dev_priv) >= 12)
3405 		return TC_PORT_1 + port - PORT_TC1;
3406 	else
3407 		return TC_PORT_1 + port - PORT_C;
3408 }
3409 
intel_port_to_power_domain(enum port port)3410 enum intel_display_power_domain intel_port_to_power_domain(enum port port)
3411 {
3412 	switch (port) {
3413 	case PORT_A:
3414 		return POWER_DOMAIN_PORT_DDI_A_LANES;
3415 	case PORT_B:
3416 		return POWER_DOMAIN_PORT_DDI_B_LANES;
3417 	case PORT_C:
3418 		return POWER_DOMAIN_PORT_DDI_C_LANES;
3419 	case PORT_D:
3420 		return POWER_DOMAIN_PORT_DDI_D_LANES;
3421 	case PORT_E:
3422 		return POWER_DOMAIN_PORT_DDI_E_LANES;
3423 	case PORT_F:
3424 		return POWER_DOMAIN_PORT_DDI_F_LANES;
3425 	case PORT_G:
3426 		return POWER_DOMAIN_PORT_DDI_G_LANES;
3427 	case PORT_H:
3428 		return POWER_DOMAIN_PORT_DDI_H_LANES;
3429 	case PORT_I:
3430 		return POWER_DOMAIN_PORT_DDI_I_LANES;
3431 	default:
3432 		MISSING_CASE(port);
3433 		return POWER_DOMAIN_PORT_OTHER;
3434 	}
3435 }
3436 
3437 enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port * dig_port)3438 intel_aux_power_domain(struct intel_digital_port *dig_port)
3439 {
3440 	struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3441 	enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
3442 
3443 	if (intel_phy_is_tc(dev_priv, phy) &&
3444 	    dig_port->tc_mode == TC_PORT_TBT_ALT) {
3445 		switch (dig_port->aux_ch) {
3446 		case AUX_CH_C:
3447 			return POWER_DOMAIN_AUX_C_TBT;
3448 		case AUX_CH_D:
3449 			return POWER_DOMAIN_AUX_D_TBT;
3450 		case AUX_CH_E:
3451 			return POWER_DOMAIN_AUX_E_TBT;
3452 		case AUX_CH_F:
3453 			return POWER_DOMAIN_AUX_F_TBT;
3454 		case AUX_CH_G:
3455 			return POWER_DOMAIN_AUX_G_TBT;
3456 		case AUX_CH_H:
3457 			return POWER_DOMAIN_AUX_H_TBT;
3458 		case AUX_CH_I:
3459 			return POWER_DOMAIN_AUX_I_TBT;
3460 		default:
3461 			MISSING_CASE(dig_port->aux_ch);
3462 			return POWER_DOMAIN_AUX_C_TBT;
3463 		}
3464 	}
3465 
3466 	return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
3467 }
3468 
3469 /*
3470  * Converts aux_ch to power_domain without caring about TBT ports for that use
3471  * intel_aux_power_domain()
3472  */
3473 enum intel_display_power_domain
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)3474 intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
3475 {
3476 	switch (aux_ch) {
3477 	case AUX_CH_A:
3478 		return POWER_DOMAIN_AUX_A;
3479 	case AUX_CH_B:
3480 		return POWER_DOMAIN_AUX_B;
3481 	case AUX_CH_C:
3482 		return POWER_DOMAIN_AUX_C;
3483 	case AUX_CH_D:
3484 		return POWER_DOMAIN_AUX_D;
3485 	case AUX_CH_E:
3486 		return POWER_DOMAIN_AUX_E;
3487 	case AUX_CH_F:
3488 		return POWER_DOMAIN_AUX_F;
3489 	case AUX_CH_G:
3490 		return POWER_DOMAIN_AUX_G;
3491 	case AUX_CH_H:
3492 		return POWER_DOMAIN_AUX_H;
3493 	case AUX_CH_I:
3494 		return POWER_DOMAIN_AUX_I;
3495 	default:
3496 		MISSING_CASE(aux_ch);
3497 		return POWER_DOMAIN_AUX_A;
3498 	}
3499 }
3500 
get_crtc_power_domains(struct intel_crtc_state * crtc_state)3501 static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3502 {
3503 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3504 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3505 	struct drm_encoder *encoder;
3506 	enum pipe pipe = crtc->pipe;
3507 	u64 mask;
3508 	enum transcoder transcoder = crtc_state->cpu_transcoder;
3509 
3510 	if (!crtc_state->hw.active)
3511 		return 0;
3512 
3513 	mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
3514 	mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
3515 	if (crtc_state->pch_pfit.enabled ||
3516 	    crtc_state->pch_pfit.force_thru)
3517 		mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
3518 
3519 	drm_for_each_encoder_mask(encoder, &dev_priv->drm,
3520 				  crtc_state->uapi.encoder_mask) {
3521 		struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3522 
3523 		mask |= BIT_ULL(intel_encoder->power_domain);
3524 	}
3525 
3526 	if (HAS_DDI(dev_priv) && crtc_state->has_audio)
3527 		mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
3528 
3529 	if (crtc_state->shared_dpll)
3530 		mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
3531 
3532 	if (crtc_state->dsc.compression_enable)
3533 		mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
3534 
3535 	return mask;
3536 }
3537 
3538 static u64
modeset_get_crtc_power_domains(struct intel_crtc_state * crtc_state)3539 modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
3540 {
3541 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3542 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3543 	enum intel_display_power_domain domain;
3544 	u64 domains, new_domains, old_domains;
3545 
3546 	domains = get_crtc_power_domains(crtc_state);
3547 
3548 	new_domains = domains & ~crtc->enabled_power_domains.mask;
3549 	old_domains = crtc->enabled_power_domains.mask & ~domains;
3550 
3551 	for_each_power_domain(domain, new_domains)
3552 		intel_display_power_get_in_set(dev_priv,
3553 					       &crtc->enabled_power_domains,
3554 					       domain);
3555 
3556 	return old_domains;
3557 }
3558 
modeset_put_crtc_power_domains(struct intel_crtc * crtc,u64 domains)3559 static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
3560 					   u64 domains)
3561 {
3562 	intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
3563 					    &crtc->enabled_power_domains,
3564 					    domains);
3565 }
3566 
valleyview_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3567 static void valleyview_crtc_enable(struct intel_atomic_state *state,
3568 				   struct intel_crtc *crtc)
3569 {
3570 	const struct intel_crtc_state *new_crtc_state =
3571 		intel_atomic_get_new_crtc_state(state, crtc);
3572 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3573 	enum pipe pipe = crtc->pipe;
3574 
3575 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3576 		return;
3577 
3578 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3579 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3580 
3581 	intel_set_transcoder_timings(new_crtc_state);
3582 	intel_set_pipe_src_size(new_crtc_state);
3583 
3584 	if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
3585 		intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
3586 		intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
3587 	}
3588 
3589 	i9xx_set_pipeconf(new_crtc_state);
3590 
3591 	crtc->active = true;
3592 
3593 	intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3594 
3595 	intel_encoders_pre_pll_enable(state, crtc);
3596 
3597 	if (IS_CHERRYVIEW(dev_priv)) {
3598 		chv_prepare_pll(crtc, new_crtc_state);
3599 		chv_enable_pll(crtc, new_crtc_state);
3600 	} else {
3601 		vlv_prepare_pll(crtc, new_crtc_state);
3602 		vlv_enable_pll(crtc, new_crtc_state);
3603 	}
3604 
3605 	intel_encoders_pre_enable(state, crtc);
3606 
3607 	i9xx_pfit_enable(new_crtc_state);
3608 
3609 	intel_color_load_luts(new_crtc_state);
3610 	intel_color_commit(new_crtc_state);
3611 	/* update DSPCNTR to configure gamma for pipe bottom color */
3612 	intel_disable_primary_plane(new_crtc_state);
3613 
3614 	dev_priv->display.initial_watermarks(state, crtc);
3615 	intel_enable_pipe(new_crtc_state);
3616 
3617 	intel_crtc_vblank_on(new_crtc_state);
3618 
3619 	intel_encoders_enable(state, crtc);
3620 }
3621 
i9xx_set_pll_dividers(const struct intel_crtc_state * crtc_state)3622 static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
3623 {
3624 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3625 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3626 
3627 	intel_de_write(dev_priv, FP0(crtc->pipe),
3628 		       crtc_state->dpll_hw_state.fp0);
3629 	intel_de_write(dev_priv, FP1(crtc->pipe),
3630 		       crtc_state->dpll_hw_state.fp1);
3631 }
3632 
i9xx_crtc_enable(struct intel_atomic_state * state,struct intel_crtc * crtc)3633 static void i9xx_crtc_enable(struct intel_atomic_state *state,
3634 			     struct intel_crtc *crtc)
3635 {
3636 	const struct intel_crtc_state *new_crtc_state =
3637 		intel_atomic_get_new_crtc_state(state, crtc);
3638 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3639 	enum pipe pipe = crtc->pipe;
3640 
3641 	if (drm_WARN_ON(&dev_priv->drm, crtc->active))
3642 		return;
3643 
3644 	i9xx_set_pll_dividers(new_crtc_state);
3645 
3646 	if (intel_crtc_has_dp_encoder(new_crtc_state))
3647 		intel_dp_set_m_n(new_crtc_state, M1_N1);
3648 
3649 	intel_set_transcoder_timings(new_crtc_state);
3650 	intel_set_pipe_src_size(new_crtc_state);
3651 
3652 	i9xx_set_pipeconf(new_crtc_state);
3653 
3654 	crtc->active = true;
3655 
3656 	if (!IS_DISPLAY_VER(dev_priv, 2))
3657 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
3658 
3659 	intel_encoders_pre_enable(state, crtc);
3660 
3661 	i9xx_enable_pll(crtc, new_crtc_state);
3662 
3663 	i9xx_pfit_enable(new_crtc_state);
3664 
3665 	intel_color_load_luts(new_crtc_state);
3666 	intel_color_commit(new_crtc_state);
3667 	/* update DSPCNTR to configure gamma for pipe bottom color */
3668 	intel_disable_primary_plane(new_crtc_state);
3669 
3670 	if (dev_priv->display.initial_watermarks)
3671 		dev_priv->display.initial_watermarks(state, crtc);
3672 	else
3673 		intel_update_watermarks(crtc);
3674 	intel_enable_pipe(new_crtc_state);
3675 
3676 	intel_crtc_vblank_on(new_crtc_state);
3677 
3678 	intel_encoders_enable(state, crtc);
3679 
3680 	/* prevents spurious underruns */
3681 	if (IS_DISPLAY_VER(dev_priv, 2))
3682 		intel_wait_for_vblank(dev_priv, pipe);
3683 }
3684 
i9xx_pfit_disable(const struct intel_crtc_state * old_crtc_state)3685 static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3686 {
3687 	struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
3688 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3689 
3690 	if (!old_crtc_state->gmch_pfit.control)
3691 		return;
3692 
3693 	assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
3694 
3695 	drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
3696 		    intel_de_read(dev_priv, PFIT_CONTROL));
3697 	intel_de_write(dev_priv, PFIT_CONTROL, 0);
3698 }
3699 
i9xx_crtc_disable(struct intel_atomic_state * state,struct intel_crtc * crtc)3700 static void i9xx_crtc_disable(struct intel_atomic_state *state,
3701 			      struct intel_crtc *crtc)
3702 {
3703 	struct intel_crtc_state *old_crtc_state =
3704 		intel_atomic_get_old_crtc_state(state, crtc);
3705 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3706 	enum pipe pipe = crtc->pipe;
3707 
3708 	/*
3709 	 * On gen2 planes are double buffered but the pipe isn't, so we must
3710 	 * wait for planes to fully turn off before disabling the pipe.
3711 	 */
3712 	if (IS_DISPLAY_VER(dev_priv, 2))
3713 		intel_wait_for_vblank(dev_priv, pipe);
3714 
3715 	intel_encoders_disable(state, crtc);
3716 
3717 	intel_crtc_vblank_off(old_crtc_state);
3718 
3719 	intel_disable_pipe(old_crtc_state);
3720 
3721 	i9xx_pfit_disable(old_crtc_state);
3722 
3723 	intel_encoders_post_disable(state, crtc);
3724 
3725 	if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
3726 		if (IS_CHERRYVIEW(dev_priv))
3727 			chv_disable_pll(dev_priv, pipe);
3728 		else if (IS_VALLEYVIEW(dev_priv))
3729 			vlv_disable_pll(dev_priv, pipe);
3730 		else
3731 			i9xx_disable_pll(old_crtc_state);
3732 	}
3733 
3734 	intel_encoders_post_pll_disable(state, crtc);
3735 
3736 	if (!IS_DISPLAY_VER(dev_priv, 2))
3737 		intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
3738 
3739 	if (!dev_priv->display.initial_watermarks)
3740 		intel_update_watermarks(crtc);
3741 
3742 	/* clock the pipe down to 640x480@60 to potentially save power */
3743 	if (IS_I830(dev_priv))
3744 		i830_enable_pipe(dev_priv, pipe);
3745 }
3746 
intel_crtc_disable_noatomic(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)3747 static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
3748 					struct drm_modeset_acquire_ctx *ctx)
3749 {
3750 	struct intel_encoder *encoder;
3751 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3752 	struct intel_bw_state *bw_state =
3753 		to_intel_bw_state(dev_priv->bw_obj.state);
3754 	struct intel_cdclk_state *cdclk_state =
3755 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
3756 	struct intel_dbuf_state *dbuf_state =
3757 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
3758 	struct intel_crtc_state *crtc_state =
3759 		to_intel_crtc_state(crtc->base.state);
3760 	struct intel_plane *plane;
3761 	struct drm_atomic_state *state;
3762 	struct intel_crtc_state *temp_crtc_state;
3763 	enum pipe pipe = crtc->pipe;
3764 	int ret;
3765 
3766 	if (!crtc_state->hw.active)
3767 		return;
3768 
3769 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
3770 		const struct intel_plane_state *plane_state =
3771 			to_intel_plane_state(plane->base.state);
3772 
3773 		if (plane_state->uapi.visible)
3774 			intel_plane_disable_noatomic(crtc, plane);
3775 	}
3776 
3777 	state = drm_atomic_state_alloc(&dev_priv->drm);
3778 	if (!state) {
3779 		drm_dbg_kms(&dev_priv->drm,
3780 			    "failed to disable [CRTC:%d:%s], out of memory",
3781 			    crtc->base.base.id, crtc->base.name);
3782 		return;
3783 	}
3784 
3785 	state->acquire_ctx = ctx;
3786 
3787 	/* Everything's already locked, -EDEADLK can't happen. */
3788 	temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
3789 	ret = drm_atomic_add_affected_connectors(state, &crtc->base);
3790 
3791 	drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
3792 
3793 	dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
3794 
3795 	drm_atomic_state_put(state);
3796 
3797 	drm_dbg_kms(&dev_priv->drm,
3798 		    "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
3799 		    crtc->base.base.id, crtc->base.name);
3800 
3801 	crtc->active = false;
3802 	crtc->base.enabled = false;
3803 
3804 	drm_WARN_ON(&dev_priv->drm,
3805 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
3806 	crtc_state->uapi.active = false;
3807 	crtc_state->uapi.connector_mask = 0;
3808 	crtc_state->uapi.encoder_mask = 0;
3809 	intel_crtc_free_hw_state(crtc_state);
3810 	memset(&crtc_state->hw, 0, sizeof(crtc_state->hw));
3811 
3812 	for_each_encoder_on_crtc(&dev_priv->drm, &crtc->base, encoder)
3813 		encoder->base.crtc = NULL;
3814 
3815 	intel_fbc_disable(crtc);
3816 	intel_update_watermarks(crtc);
3817 	intel_disable_shared_dpll(crtc_state);
3818 
3819 	intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
3820 
3821 	dev_priv->active_pipes &= ~BIT(pipe);
3822 	cdclk_state->min_cdclk[pipe] = 0;
3823 	cdclk_state->min_voltage_level[pipe] = 0;
3824 	cdclk_state->active_pipes &= ~BIT(pipe);
3825 
3826 	dbuf_state->active_pipes &= ~BIT(pipe);
3827 
3828 	bw_state->data_rate[pipe] = 0;
3829 	bw_state->num_active_planes[pipe] = 0;
3830 }
3831 
3832 /*
3833  * turn all crtc's off, but do not adjust state
3834  * This has to be paired with a call to intel_modeset_setup_hw_state.
3835  */
intel_display_suspend(struct drm_device * dev)3836 int intel_display_suspend(struct drm_device *dev)
3837 {
3838 	struct drm_i915_private *dev_priv = to_i915(dev);
3839 	struct drm_atomic_state *state;
3840 	int ret;
3841 
3842 	state = drm_atomic_helper_suspend(dev);
3843 	ret = PTR_ERR_OR_ZERO(state);
3844 	if (ret)
3845 		drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
3846 			ret);
3847 	else
3848 		dev_priv->modeset_restore_state = state;
3849 	return ret;
3850 }
3851 
intel_encoder_destroy(struct drm_encoder * encoder)3852 void intel_encoder_destroy(struct drm_encoder *encoder)
3853 {
3854 	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3855 
3856 	drm_encoder_cleanup(encoder);
3857 	kfree(intel_encoder);
3858 }
3859 
3860 /* Cross check the actual hw state with our own modeset state tracking (and it's
3861  * internal consistency). */
intel_connector_verify_state(struct intel_crtc_state * crtc_state,struct drm_connector_state * conn_state)3862 static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
3863 					 struct drm_connector_state *conn_state)
3864 {
3865 	struct intel_connector *connector = to_intel_connector(conn_state->connector);
3866 	struct drm_i915_private *i915 = to_i915(connector->base.dev);
3867 
3868 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
3869 		    connector->base.base.id, connector->base.name);
3870 
3871 	if (connector->get_hw_state(connector)) {
3872 		struct intel_encoder *encoder = intel_attached_encoder(connector);
3873 
3874 		I915_STATE_WARN(!crtc_state,
3875 			 "connector enabled without attached crtc\n");
3876 
3877 		if (!crtc_state)
3878 			return;
3879 
3880 		I915_STATE_WARN(!crtc_state->hw.active,
3881 				"connector is active, but attached crtc isn't\n");
3882 
3883 		if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
3884 			return;
3885 
3886 		I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
3887 			"atomic encoder doesn't match attached encoder\n");
3888 
3889 		I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
3890 			"attached encoder crtc differs from connector crtc\n");
3891 	} else {
3892 		I915_STATE_WARN(crtc_state && crtc_state->hw.active,
3893 				"attached crtc is active, but connector isn't\n");
3894 		I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
3895 			"best encoder set without crtc!\n");
3896 	}
3897 }
3898 
hsw_crtc_state_ips_capable(const struct intel_crtc_state * crtc_state)3899 bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
3900 {
3901 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
3902 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3903 
3904 	/* IPS only exists on ULT machines and is tied to pipe A. */
3905 	if (!hsw_crtc_supports_ips(crtc))
3906 		return false;
3907 
3908 	if (!dev_priv->params.enable_ips)
3909 		return false;
3910 
3911 	if (crtc_state->pipe_bpp > 24)
3912 		return false;
3913 
3914 	/*
3915 	 * We compare against max which means we must take
3916 	 * the increased cdclk requirement into account when
3917 	 * calculating the new cdclk.
3918 	 *
3919 	 * Should measure whether using a lower cdclk w/o IPS
3920 	 */
3921 	if (IS_BROADWELL(dev_priv) &&
3922 	    crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
3923 		return false;
3924 
3925 	return true;
3926 }
3927 
hsw_compute_ips_config(struct intel_crtc_state * crtc_state)3928 static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
3929 {
3930 	struct drm_i915_private *dev_priv =
3931 		to_i915(crtc_state->uapi.crtc->dev);
3932 	struct intel_atomic_state *state =
3933 		to_intel_atomic_state(crtc_state->uapi.state);
3934 
3935 	crtc_state->ips_enabled = false;
3936 
3937 	if (!hsw_crtc_state_ips_capable(crtc_state))
3938 		return 0;
3939 
3940 	/*
3941 	 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3942 	 * enabled and disabled dynamically based on package C states,
3943 	 * user space can't make reliable use of the CRCs, so let's just
3944 	 * completely disable it.
3945 	 */
3946 	if (crtc_state->crc_enabled)
3947 		return 0;
3948 
3949 	/* IPS should be fine as long as at least one plane is enabled. */
3950 	if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
3951 		return 0;
3952 
3953 	if (IS_BROADWELL(dev_priv)) {
3954 		const struct intel_cdclk_state *cdclk_state;
3955 
3956 		cdclk_state = intel_atomic_get_cdclk_state(state);
3957 		if (IS_ERR(cdclk_state))
3958 			return PTR_ERR(cdclk_state);
3959 
3960 		/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
3961 		if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
3962 			return 0;
3963 	}
3964 
3965 	crtc_state->ips_enabled = true;
3966 
3967 	return 0;
3968 }
3969 
intel_crtc_supports_double_wide(const struct intel_crtc * crtc)3970 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
3971 {
3972 	const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3973 
3974 	/* GDG double wide on either pipe, otherwise pipe A only */
3975 	return DISPLAY_VER(dev_priv) < 4 &&
3976 		(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
3977 }
3978 
ilk_pipe_pixel_rate(const struct intel_crtc_state * crtc_state)3979 static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state)
3980 {
3981 	u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock;
3982 	unsigned int pipe_w, pipe_h, pfit_w, pfit_h;
3983 
3984 	/*
3985 	 * We only use IF-ID interlacing. If we ever use
3986 	 * PF-ID we'll need to adjust the pixel_rate here.
3987 	 */
3988 
3989 	if (!crtc_state->pch_pfit.enabled)
3990 		return pixel_rate;
3991 
3992 	pipe_w = crtc_state->pipe_src_w;
3993 	pipe_h = crtc_state->pipe_src_h;
3994 
3995 	pfit_w = drm_rect_width(&crtc_state->pch_pfit.dst);
3996 	pfit_h = drm_rect_height(&crtc_state->pch_pfit.dst);
3997 
3998 	if (pipe_w < pfit_w)
3999 		pipe_w = pfit_w;
4000 	if (pipe_h < pfit_h)
4001 		pipe_h = pfit_h;
4002 
4003 	if (drm_WARN_ON(crtc_state->uapi.crtc->dev,
4004 			!pfit_w || !pfit_h))
4005 		return pixel_rate;
4006 
4007 	return div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
4008 		       pfit_w * pfit_h);
4009 }
4010 
intel_mode_from_crtc_timings(struct drm_display_mode * mode,const struct drm_display_mode * timings)4011 static void intel_mode_from_crtc_timings(struct drm_display_mode *mode,
4012 					 const struct drm_display_mode *timings)
4013 {
4014 	mode->hdisplay = timings->crtc_hdisplay;
4015 	mode->htotal = timings->crtc_htotal;
4016 	mode->hsync_start = timings->crtc_hsync_start;
4017 	mode->hsync_end = timings->crtc_hsync_end;
4018 
4019 	mode->vdisplay = timings->crtc_vdisplay;
4020 	mode->vtotal = timings->crtc_vtotal;
4021 	mode->vsync_start = timings->crtc_vsync_start;
4022 	mode->vsync_end = timings->crtc_vsync_end;
4023 
4024 	mode->flags = timings->flags;
4025 	mode->type = DRM_MODE_TYPE_DRIVER;
4026 
4027 	mode->clock = timings->crtc_clock;
4028 
4029 	drm_mode_set_name(mode);
4030 }
4031 
intel_crtc_compute_pixel_rate(struct intel_crtc_state * crtc_state)4032 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
4033 {
4034 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4035 
4036 	if (HAS_GMCH(dev_priv))
4037 		/* FIXME calculate proper pipe pixel rate for GMCH pfit */
4038 		crtc_state->pixel_rate =
4039 			crtc_state->hw.pipe_mode.crtc_clock;
4040 	else
4041 		crtc_state->pixel_rate =
4042 			ilk_pipe_pixel_rate(crtc_state);
4043 }
4044 
intel_crtc_readout_derived_state(struct intel_crtc_state * crtc_state)4045 static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state)
4046 {
4047 	struct drm_display_mode *mode = &crtc_state->hw.mode;
4048 	struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
4049 	struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4050 
4051 	drm_mode_copy(pipe_mode, adjusted_mode);
4052 
4053 	if (crtc_state->bigjoiner) {
4054 		/*
4055 		 * transcoder is programmed to the full mode,
4056 		 * but pipe timings are half of the transcoder mode
4057 		 */
4058 		pipe_mode->crtc_hdisplay /= 2;
4059 		pipe_mode->crtc_hblank_start /= 2;
4060 		pipe_mode->crtc_hblank_end /= 2;
4061 		pipe_mode->crtc_hsync_start /= 2;
4062 		pipe_mode->crtc_hsync_end /= 2;
4063 		pipe_mode->crtc_htotal /= 2;
4064 		pipe_mode->crtc_clock /= 2;
4065 	}
4066 
4067 	if (crtc_state->splitter.enable) {
4068 		int n = crtc_state->splitter.link_count;
4069 		int overlap = crtc_state->splitter.pixel_overlap;
4070 
4071 		/*
4072 		 * eDP MSO uses segment timings from EDID for transcoder
4073 		 * timings, but full mode for everything else.
4074 		 *
4075 		 * h_full = (h_segment - pixel_overlap) * link_count
4076 		 */
4077 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4078 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4079 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4080 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4081 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4082 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4083 		pipe_mode->crtc_clock *= n;
4084 
4085 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4086 		intel_mode_from_crtc_timings(adjusted_mode, pipe_mode);
4087 	} else {
4088 		intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4089 		intel_mode_from_crtc_timings(adjusted_mode, adjusted_mode);
4090 	}
4091 
4092 	intel_crtc_compute_pixel_rate(crtc_state);
4093 
4094 	drm_mode_copy(mode, adjusted_mode);
4095 	mode->hdisplay = crtc_state->pipe_src_w << crtc_state->bigjoiner;
4096 	mode->vdisplay = crtc_state->pipe_src_h;
4097 }
4098 
intel_encoder_get_config(struct intel_encoder * encoder,struct intel_crtc_state * crtc_state)4099 static void intel_encoder_get_config(struct intel_encoder *encoder,
4100 				     struct intel_crtc_state *crtc_state)
4101 {
4102 	encoder->get_config(encoder, crtc_state);
4103 
4104 	intel_crtc_readout_derived_state(crtc_state);
4105 }
4106 
intel_crtc_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4107 static int intel_crtc_compute_config(struct intel_crtc *crtc,
4108 				     struct intel_crtc_state *pipe_config)
4109 {
4110 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4111 	struct drm_display_mode *pipe_mode = &pipe_config->hw.pipe_mode;
4112 	int clock_limit = dev_priv->max_dotclk_freq;
4113 
4114 	drm_mode_copy(pipe_mode, &pipe_config->hw.adjusted_mode);
4115 
4116 	/* Adjust pipe_mode for bigjoiner, with half the horizontal mode */
4117 	if (pipe_config->bigjoiner) {
4118 		pipe_mode->crtc_clock /= 2;
4119 		pipe_mode->crtc_hdisplay /= 2;
4120 		pipe_mode->crtc_hblank_start /= 2;
4121 		pipe_mode->crtc_hblank_end /= 2;
4122 		pipe_mode->crtc_hsync_start /= 2;
4123 		pipe_mode->crtc_hsync_end /= 2;
4124 		pipe_mode->crtc_htotal /= 2;
4125 		pipe_config->pipe_src_w /= 2;
4126 	}
4127 
4128 	if (pipe_config->splitter.enable) {
4129 		int n = pipe_config->splitter.link_count;
4130 		int overlap = pipe_config->splitter.pixel_overlap;
4131 
4132 		pipe_mode->crtc_hdisplay = (pipe_mode->crtc_hdisplay - overlap) * n;
4133 		pipe_mode->crtc_hblank_start = (pipe_mode->crtc_hblank_start - overlap) * n;
4134 		pipe_mode->crtc_hblank_end = (pipe_mode->crtc_hblank_end - overlap) * n;
4135 		pipe_mode->crtc_hsync_start = (pipe_mode->crtc_hsync_start - overlap) * n;
4136 		pipe_mode->crtc_hsync_end = (pipe_mode->crtc_hsync_end - overlap) * n;
4137 		pipe_mode->crtc_htotal = (pipe_mode->crtc_htotal - overlap) * n;
4138 		pipe_mode->crtc_clock *= n;
4139 	}
4140 
4141 	intel_mode_from_crtc_timings(pipe_mode, pipe_mode);
4142 
4143 	if (DISPLAY_VER(dev_priv) < 4) {
4144 		clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
4145 
4146 		/*
4147 		 * Enable double wide mode when the dot clock
4148 		 * is > 90% of the (display) core speed.
4149 		 */
4150 		if (intel_crtc_supports_double_wide(crtc) &&
4151 		    pipe_mode->crtc_clock > clock_limit) {
4152 			clock_limit = dev_priv->max_dotclk_freq;
4153 			pipe_config->double_wide = true;
4154 		}
4155 	}
4156 
4157 	if (pipe_mode->crtc_clock > clock_limit) {
4158 		drm_dbg_kms(&dev_priv->drm,
4159 			    "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
4160 			    pipe_mode->crtc_clock, clock_limit,
4161 			    yesno(pipe_config->double_wide));
4162 		return -EINVAL;
4163 	}
4164 
4165 	/*
4166 	 * Pipe horizontal size must be even in:
4167 	 * - DVO ganged mode
4168 	 * - LVDS dual channel mode
4169 	 * - Double wide pipe
4170 	 */
4171 	if (pipe_config->pipe_src_w & 1) {
4172 		if (pipe_config->double_wide) {
4173 			drm_dbg_kms(&dev_priv->drm,
4174 				    "Odd pipe source width not supported with double wide pipe\n");
4175 			return -EINVAL;
4176 		}
4177 
4178 		if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
4179 		    intel_is_dual_link_lvds(dev_priv)) {
4180 			drm_dbg_kms(&dev_priv->drm,
4181 				    "Odd pipe source width not supported with dual link LVDS\n");
4182 			return -EINVAL;
4183 		}
4184 	}
4185 
4186 	/* Cantiga+ cannot handle modes with a hsync front porch of 0.
4187 	 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
4188 	 */
4189 	if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
4190 	    pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
4191 		return -EINVAL;
4192 
4193 	intel_crtc_compute_pixel_rate(pipe_config);
4194 
4195 	if (pipe_config->has_pch_encoder)
4196 		return ilk_fdi_compute_config(crtc, pipe_config);
4197 
4198 	return 0;
4199 }
4200 
4201 static void
intel_reduce_m_n_ratio(u32 * num,u32 * den)4202 intel_reduce_m_n_ratio(u32 *num, u32 *den)
4203 {
4204 	while (*num > DATA_LINK_M_N_MASK ||
4205 	       *den > DATA_LINK_M_N_MASK) {
4206 		*num >>= 1;
4207 		*den >>= 1;
4208 	}
4209 }
4210 
compute_m_n(unsigned int m,unsigned int n,u32 * ret_m,u32 * ret_n,bool constant_n)4211 static void compute_m_n(unsigned int m, unsigned int n,
4212 			u32 *ret_m, u32 *ret_n,
4213 			bool constant_n)
4214 {
4215 	/*
4216 	 * Several DP dongles in particular seem to be fussy about
4217 	 * too large link M/N values. Give N value as 0x8000 that
4218 	 * should be acceptable by specific devices. 0x8000 is the
4219 	 * specified fixed N value for asynchronous clock mode,
4220 	 * which the devices expect also in synchronous clock mode.
4221 	 */
4222 	if (constant_n)
4223 		*ret_n = DP_LINK_CONSTANT_N_VALUE;
4224 	else
4225 		*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4226 
4227 	*ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
4228 	intel_reduce_m_n_ratio(ret_m, ret_n);
4229 }
4230 
4231 void
intel_link_compute_m_n(u16 bits_per_pixel,int nlanes,int pixel_clock,int link_clock,struct intel_link_m_n * m_n,bool constant_n,bool fec_enable)4232 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
4233 		       int pixel_clock, int link_clock,
4234 		       struct intel_link_m_n *m_n,
4235 		       bool constant_n, bool fec_enable)
4236 {
4237 	u32 data_clock = bits_per_pixel * pixel_clock;
4238 
4239 	if (fec_enable)
4240 		data_clock = intel_dp_mode_to_fec_clock(data_clock);
4241 
4242 	m_n->tu = 64;
4243 	compute_m_n(data_clock,
4244 		    link_clock * nlanes * 8,
4245 		    &m_n->gmch_m, &m_n->gmch_n,
4246 		    constant_n);
4247 
4248 	compute_m_n(pixel_clock, link_clock,
4249 		    &m_n->link_m, &m_n->link_n,
4250 		    constant_n);
4251 }
4252 
intel_panel_sanitize_ssc(struct drm_i915_private * dev_priv)4253 static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
4254 {
4255 	/*
4256 	 * There may be no VBT; and if the BIOS enabled SSC we can
4257 	 * just keep using it to avoid unnecessary flicker.  Whereas if the
4258 	 * BIOS isn't using it, don't assume it will work even if the VBT
4259 	 * indicates as much.
4260 	 */
4261 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
4262 		bool bios_lvds_use_ssc = intel_de_read(dev_priv,
4263 						       PCH_DREF_CONTROL) &
4264 			DREF_SSC1_ENABLE;
4265 
4266 		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
4267 			drm_dbg_kms(&dev_priv->drm,
4268 				    "SSC %s by BIOS, overriding VBT which says %s\n",
4269 				    enableddisabled(bios_lvds_use_ssc),
4270 				    enableddisabled(dev_priv->vbt.lvds_use_ssc));
4271 			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
4272 		}
4273 	}
4274 }
4275 
intel_pch_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n)4276 static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4277 					 const struct intel_link_m_n *m_n)
4278 {
4279 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4280 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4281 	enum pipe pipe = crtc->pipe;
4282 
4283 	intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
4284 		       TU_SIZE(m_n->tu) | m_n->gmch_m);
4285 	intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4286 	intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4287 	intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
4288 }
4289 
transcoder_has_m2_n2(struct drm_i915_private * dev_priv,enum transcoder transcoder)4290 static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
4291 				 enum transcoder transcoder)
4292 {
4293 	if (IS_HASWELL(dev_priv))
4294 		return transcoder == TRANSCODER_EDP;
4295 
4296 	/*
4297 	 * Strictly speaking some registers are available before
4298 	 * gen7, but we only support DRRS on gen7+
4299 	 */
4300 	return IS_DISPLAY_VER(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
4301 }
4302 
intel_cpu_transcoder_set_m_n(const struct intel_crtc_state * crtc_state,const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2)4303 static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
4304 					 const struct intel_link_m_n *m_n,
4305 					 const struct intel_link_m_n *m2_n2)
4306 {
4307 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4308 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4309 	enum pipe pipe = crtc->pipe;
4310 	enum transcoder transcoder = crtc_state->cpu_transcoder;
4311 
4312 	if (DISPLAY_VER(dev_priv) >= 5) {
4313 		intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
4314 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4315 		intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
4316 			       m_n->gmch_n);
4317 		intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
4318 			       m_n->link_m);
4319 		intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
4320 			       m_n->link_n);
4321 		/*
4322 		 *  M2_N2 registers are set only if DRRS is supported
4323 		 * (to make sure the registers are not unnecessarily accessed).
4324 		 */
4325 		if (m2_n2 && crtc_state->has_drrs &&
4326 		    transcoder_has_m2_n2(dev_priv, transcoder)) {
4327 			intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
4328 				       TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
4329 			intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
4330 				       m2_n2->gmch_n);
4331 			intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
4332 				       m2_n2->link_m);
4333 			intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
4334 				       m2_n2->link_n);
4335 		}
4336 	} else {
4337 		intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
4338 			       TU_SIZE(m_n->tu) | m_n->gmch_m);
4339 		intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4340 		intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
4341 		intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
4342 	}
4343 }
4344 
intel_dp_set_m_n(const struct intel_crtc_state * crtc_state,enum link_m_n_set m_n)4345 void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
4346 {
4347 	const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
4348 	struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
4349 
4350 	if (m_n == M1_N1) {
4351 		dp_m_n = &crtc_state->dp_m_n;
4352 		dp_m2_n2 = &crtc_state->dp_m2_n2;
4353 	} else if (m_n == M2_N2) {
4354 
4355 		/*
4356 		 * M2_N2 registers are not supported. Hence m2_n2 divider value
4357 		 * needs to be programmed into M1_N1.
4358 		 */
4359 		dp_m_n = &crtc_state->dp_m2_n2;
4360 	} else {
4361 		drm_err(&i915->drm, "Unsupported divider value\n");
4362 		return;
4363 	}
4364 
4365 	if (crtc_state->has_pch_encoder)
4366 		intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
4367 	else
4368 		intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
4369 }
4370 
intel_set_transcoder_timings(const struct intel_crtc_state * crtc_state)4371 static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
4372 {
4373 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4374 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4375 	enum pipe pipe = crtc->pipe;
4376 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4377 	const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
4378 	u32 crtc_vtotal, crtc_vblank_end;
4379 	int vsyncshift = 0;
4380 
4381 	/* We need to be careful not to changed the adjusted mode, for otherwise
4382 	 * the hw state checker will get angry at the mismatch. */
4383 	crtc_vtotal = adjusted_mode->crtc_vtotal;
4384 	crtc_vblank_end = adjusted_mode->crtc_vblank_end;
4385 
4386 	if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4387 		/* the chip adds 2 halflines automatically */
4388 		crtc_vtotal -= 1;
4389 		crtc_vblank_end -= 1;
4390 
4391 		if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4392 			vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
4393 		else
4394 			vsyncshift = adjusted_mode->crtc_hsync_start -
4395 				adjusted_mode->crtc_htotal / 2;
4396 		if (vsyncshift < 0)
4397 			vsyncshift += adjusted_mode->crtc_htotal;
4398 	}
4399 
4400 	if (DISPLAY_VER(dev_priv) > 3)
4401 		intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
4402 		               vsyncshift);
4403 
4404 	intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
4405 		       (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
4406 	intel_de_write(dev_priv, HBLANK(cpu_transcoder),
4407 		       (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
4408 	intel_de_write(dev_priv, HSYNC(cpu_transcoder),
4409 		       (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
4410 
4411 	intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
4412 		       (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
4413 	intel_de_write(dev_priv, VBLANK(cpu_transcoder),
4414 		       (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
4415 	intel_de_write(dev_priv, VSYNC(cpu_transcoder),
4416 		       (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
4417 
4418 	/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4419 	 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4420 	 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4421 	 * bits. */
4422 	if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
4423 	    (pipe == PIPE_B || pipe == PIPE_C))
4424 		intel_de_write(dev_priv, VTOTAL(pipe),
4425 		               intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
4426 
4427 }
4428 
intel_set_pipe_src_size(const struct intel_crtc_state * crtc_state)4429 static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
4430 {
4431 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4432 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4433 	enum pipe pipe = crtc->pipe;
4434 
4435 	/* pipesrc controls the size that is scaled from, which should
4436 	 * always be the user's requested size.
4437 	 */
4438 	intel_de_write(dev_priv, PIPESRC(pipe),
4439 		       ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
4440 }
4441 
intel_pipe_is_interlaced(const struct intel_crtc_state * crtc_state)4442 static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
4443 {
4444 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
4445 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
4446 
4447 	if (IS_DISPLAY_VER(dev_priv, 2))
4448 		return false;
4449 
4450 	if (DISPLAY_VER(dev_priv) >= 9 ||
4451 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
4452 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
4453 	else
4454 		return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
4455 }
4456 
intel_get_transcoder_timings(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4457 static void intel_get_transcoder_timings(struct intel_crtc *crtc,
4458 					 struct intel_crtc_state *pipe_config)
4459 {
4460 	struct drm_device *dev = crtc->base.dev;
4461 	struct drm_i915_private *dev_priv = to_i915(dev);
4462 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4463 	u32 tmp;
4464 
4465 	tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
4466 	pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4467 	pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4468 
4469 	if (!transcoder_is_dsi(cpu_transcoder)) {
4470 		tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
4471 		pipe_config->hw.adjusted_mode.crtc_hblank_start =
4472 							(tmp & 0xffff) + 1;
4473 		pipe_config->hw.adjusted_mode.crtc_hblank_end =
4474 						((tmp >> 16) & 0xffff) + 1;
4475 	}
4476 	tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
4477 	pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4478 	pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4479 
4480 	tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
4481 	pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4482 	pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4483 
4484 	if (!transcoder_is_dsi(cpu_transcoder)) {
4485 		tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
4486 		pipe_config->hw.adjusted_mode.crtc_vblank_start =
4487 							(tmp & 0xffff) + 1;
4488 		pipe_config->hw.adjusted_mode.crtc_vblank_end =
4489 						((tmp >> 16) & 0xffff) + 1;
4490 	}
4491 	tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
4492 	pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4493 	pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4494 
4495 	if (intel_pipe_is_interlaced(pipe_config)) {
4496 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4497 		pipe_config->hw.adjusted_mode.crtc_vtotal += 1;
4498 		pipe_config->hw.adjusted_mode.crtc_vblank_end += 1;
4499 	}
4500 }
4501 
intel_get_pipe_src_size(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4502 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
4503 				    struct intel_crtc_state *pipe_config)
4504 {
4505 	struct drm_device *dev = crtc->base.dev;
4506 	struct drm_i915_private *dev_priv = to_i915(dev);
4507 	u32 tmp;
4508 
4509 	tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
4510 	pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4511 	pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4512 }
4513 
i9xx_set_pipeconf(const struct intel_crtc_state * crtc_state)4514 static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
4515 {
4516 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4517 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4518 	u32 pipeconf;
4519 
4520 	pipeconf = 0;
4521 
4522 	/* we keep both pipes enabled on 830 */
4523 	if (IS_I830(dev_priv))
4524 		pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
4525 
4526 	if (crtc_state->double_wide)
4527 		pipeconf |= PIPECONF_DOUBLE_WIDE;
4528 
4529 	/* only g4x and later have fancy bpc/dither controls */
4530 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4531 	    IS_CHERRYVIEW(dev_priv)) {
4532 		/* Bspec claims that we can't use dithering for 30bpp pipes. */
4533 		if (crtc_state->dither && crtc_state->pipe_bpp != 30)
4534 			pipeconf |= PIPECONF_DITHER_EN |
4535 				    PIPECONF_DITHER_TYPE_SP;
4536 
4537 		switch (crtc_state->pipe_bpp) {
4538 		case 18:
4539 			pipeconf |= PIPECONF_6BPC;
4540 			break;
4541 		case 24:
4542 			pipeconf |= PIPECONF_8BPC;
4543 			break;
4544 		case 30:
4545 			pipeconf |= PIPECONF_10BPC;
4546 			break;
4547 		default:
4548 			/* Case prevented by intel_choose_pipe_bpp_dither. */
4549 			BUG();
4550 		}
4551 	}
4552 
4553 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
4554 		if (DISPLAY_VER(dev_priv) < 4 ||
4555 		    intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
4556 			pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4557 		else
4558 			pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
4559 	} else {
4560 		pipeconf |= PIPECONF_PROGRESSIVE;
4561 	}
4562 
4563 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4564 	     crtc_state->limited_color_range)
4565 		pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
4566 
4567 	pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
4568 
4569 	pipeconf |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
4570 
4571 	intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
4572 	intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
4573 }
4574 
i9xx_has_pfit(struct drm_i915_private * dev_priv)4575 static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
4576 {
4577 	if (IS_I830(dev_priv))
4578 		return false;
4579 
4580 	return DISPLAY_VER(dev_priv) >= 4 ||
4581 		IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
4582 }
4583 
i9xx_get_pfit_config(struct intel_crtc_state * crtc_state)4584 static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
4585 {
4586 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4587 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4588 	u32 tmp;
4589 
4590 	if (!i9xx_has_pfit(dev_priv))
4591 		return;
4592 
4593 	tmp = intel_de_read(dev_priv, PFIT_CONTROL);
4594 	if (!(tmp & PFIT_ENABLE))
4595 		return;
4596 
4597 	/* Check whether the pfit is attached to our pipe. */
4598 	if (DISPLAY_VER(dev_priv) < 4) {
4599 		if (crtc->pipe != PIPE_B)
4600 			return;
4601 	} else {
4602 		if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
4603 			return;
4604 	}
4605 
4606 	crtc_state->gmch_pfit.control = tmp;
4607 	crtc_state->gmch_pfit.pgm_ratios =
4608 		intel_de_read(dev_priv, PFIT_PGM_RATIOS);
4609 }
4610 
vlv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4611 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
4612 			       struct intel_crtc_state *pipe_config)
4613 {
4614 	struct drm_device *dev = crtc->base.dev;
4615 	struct drm_i915_private *dev_priv = to_i915(dev);
4616 	enum pipe pipe = crtc->pipe;
4617 	struct dpll clock;
4618 	u32 mdiv;
4619 	int refclk = 100000;
4620 
4621 	/* In case of DSI, DPLL will not be used */
4622 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4623 		return;
4624 
4625 	vlv_dpio_get(dev_priv);
4626 	mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
4627 	vlv_dpio_put(dev_priv);
4628 
4629 	clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
4630 	clock.m2 = mdiv & DPIO_M2DIV_MASK;
4631 	clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
4632 	clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
4633 	clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
4634 
4635 	pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
4636 }
4637 
chv_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4638 static void chv_crtc_clock_get(struct intel_crtc *crtc,
4639 			       struct intel_crtc_state *pipe_config)
4640 {
4641 	struct drm_device *dev = crtc->base.dev;
4642 	struct drm_i915_private *dev_priv = to_i915(dev);
4643 	enum pipe pipe = crtc->pipe;
4644 	enum dpio_channel port = vlv_pipe_to_channel(pipe);
4645 	struct dpll clock;
4646 	u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
4647 	int refclk = 100000;
4648 
4649 	/* In case of DSI, DPLL will not be used */
4650 	if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
4651 		return;
4652 
4653 	vlv_dpio_get(dev_priv);
4654 	cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
4655 	pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
4656 	pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
4657 	pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
4658 	pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
4659 	vlv_dpio_put(dev_priv);
4660 
4661 	clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
4662 	clock.m2 = (pll_dw0 & 0xff) << 22;
4663 	if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
4664 		clock.m2 |= pll_dw2 & 0x3fffff;
4665 	clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
4666 	clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
4667 	clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
4668 
4669 	pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
4670 }
4671 
4672 static enum intel_output_format
bdw_get_pipemisc_output_format(struct intel_crtc * crtc)4673 bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
4674 {
4675 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4676 	u32 tmp;
4677 
4678 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
4679 
4680 	if (tmp & PIPEMISC_YUV420_ENABLE) {
4681 		/* We support 4:2:0 in full blend mode only */
4682 		drm_WARN_ON(&dev_priv->drm,
4683 			    (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
4684 
4685 		return INTEL_OUTPUT_FORMAT_YCBCR420;
4686 	} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
4687 		return INTEL_OUTPUT_FORMAT_YCBCR444;
4688 	} else {
4689 		return INTEL_OUTPUT_FORMAT_RGB;
4690 	}
4691 }
4692 
i9xx_get_pipe_color_config(struct intel_crtc_state * crtc_state)4693 static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
4694 {
4695 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
4696 	struct intel_plane *plane = to_intel_plane(crtc->base.primary);
4697 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4698 	enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
4699 	u32 tmp;
4700 
4701 	tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
4702 
4703 	if (tmp & DISPPLANE_GAMMA_ENABLE)
4704 		crtc_state->gamma_enable = true;
4705 
4706 	if (!HAS_GMCH(dev_priv) &&
4707 	    tmp & DISPPLANE_PIPE_CSC_ENABLE)
4708 		crtc_state->csc_enable = true;
4709 }
4710 
i9xx_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)4711 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
4712 				 struct intel_crtc_state *pipe_config)
4713 {
4714 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4715 	enum intel_display_power_domain power_domain;
4716 	intel_wakeref_t wakeref;
4717 	u32 tmp;
4718 	bool ret;
4719 
4720 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
4721 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
4722 	if (!wakeref)
4723 		return false;
4724 
4725 	pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
4726 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
4727 	pipe_config->shared_dpll = NULL;
4728 
4729 	ret = false;
4730 
4731 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
4732 	if (!(tmp & PIPECONF_ENABLE))
4733 		goto out;
4734 
4735 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
4736 	    IS_CHERRYVIEW(dev_priv)) {
4737 		switch (tmp & PIPECONF_BPC_MASK) {
4738 		case PIPECONF_6BPC:
4739 			pipe_config->pipe_bpp = 18;
4740 			break;
4741 		case PIPECONF_8BPC:
4742 			pipe_config->pipe_bpp = 24;
4743 			break;
4744 		case PIPECONF_10BPC:
4745 			pipe_config->pipe_bpp = 30;
4746 			break;
4747 		default:
4748 			break;
4749 		}
4750 	}
4751 
4752 	if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
4753 	    (tmp & PIPECONF_COLOR_RANGE_SELECT))
4754 		pipe_config->limited_color_range = true;
4755 
4756 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
4757 		PIPECONF_GAMMA_MODE_SHIFT;
4758 
4759 	if (IS_CHERRYVIEW(dev_priv))
4760 		pipe_config->cgm_mode = intel_de_read(dev_priv,
4761 						      CGM_PIPE_MODE(crtc->pipe));
4762 
4763 	i9xx_get_pipe_color_config(pipe_config);
4764 	intel_color_get_config(pipe_config);
4765 
4766 	if (DISPLAY_VER(dev_priv) < 4)
4767 		pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
4768 
4769 	intel_get_transcoder_timings(crtc, pipe_config);
4770 	intel_get_pipe_src_size(crtc, pipe_config);
4771 
4772 	i9xx_get_pfit_config(pipe_config);
4773 
4774 	if (DISPLAY_VER(dev_priv) >= 4) {
4775 		/* No way to read it out on pipes B and C */
4776 		if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
4777 			tmp = dev_priv->chv_dpll_md[crtc->pipe];
4778 		else
4779 			tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
4780 		pipe_config->pixel_multiplier =
4781 			((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
4782 			 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
4783 		pipe_config->dpll_hw_state.dpll_md = tmp;
4784 	} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
4785 		   IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
4786 		tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
4787 		pipe_config->pixel_multiplier =
4788 			((tmp & SDVO_MULTIPLIER_MASK)
4789 			 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
4790 	} else {
4791 		/* Note that on i915G/GM the pixel multiplier is in the sdvo
4792 		 * port and will be fixed up in the encoder->get_config
4793 		 * function. */
4794 		pipe_config->pixel_multiplier = 1;
4795 	}
4796 	pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
4797 							DPLL(crtc->pipe));
4798 	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
4799 		pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
4800 							       FP0(crtc->pipe));
4801 		pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
4802 							       FP1(crtc->pipe));
4803 	} else {
4804 		/* Mask out read-only status bits. */
4805 		pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
4806 						     DPLL_PORTC_READY_MASK |
4807 						     DPLL_PORTB_READY_MASK);
4808 	}
4809 
4810 	if (IS_CHERRYVIEW(dev_priv))
4811 		chv_crtc_clock_get(crtc, pipe_config);
4812 	else if (IS_VALLEYVIEW(dev_priv))
4813 		vlv_crtc_clock_get(crtc, pipe_config);
4814 	else
4815 		i9xx_crtc_clock_get(crtc, pipe_config);
4816 
4817 	/*
4818 	 * Normally the dotclock is filled in by the encoder .get_config()
4819 	 * but in case the pipe is enabled w/o any ports we need a sane
4820 	 * default.
4821 	 */
4822 	pipe_config->hw.adjusted_mode.crtc_clock =
4823 		pipe_config->port_clock / pipe_config->pixel_multiplier;
4824 
4825 	ret = true;
4826 
4827 out:
4828 	intel_display_power_put(dev_priv, power_domain, wakeref);
4829 
4830 	return ret;
4831 }
4832 
ilk_init_pch_refclk(struct drm_i915_private * dev_priv)4833 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
4834 {
4835 	struct intel_encoder *encoder;
4836 	int i;
4837 	u32 val, final;
4838 	bool has_lvds = false;
4839 	bool has_cpu_edp = false;
4840 	bool has_panel = false;
4841 	bool has_ck505 = false;
4842 	bool can_ssc = false;
4843 	bool using_ssc_source = false;
4844 
4845 	/* We need to take the global config into account */
4846 	for_each_intel_encoder(&dev_priv->drm, encoder) {
4847 		switch (encoder->type) {
4848 		case INTEL_OUTPUT_LVDS:
4849 			has_panel = true;
4850 			has_lvds = true;
4851 			break;
4852 		case INTEL_OUTPUT_EDP:
4853 			has_panel = true;
4854 			if (encoder->port == PORT_A)
4855 				has_cpu_edp = true;
4856 			break;
4857 		default:
4858 			break;
4859 		}
4860 	}
4861 
4862 	if (HAS_PCH_IBX(dev_priv)) {
4863 		has_ck505 = dev_priv->vbt.display_clock_mode;
4864 		can_ssc = has_ck505;
4865 	} else {
4866 		has_ck505 = false;
4867 		can_ssc = true;
4868 	}
4869 
4870 	/* Check if any DPLLs are using the SSC source */
4871 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
4872 		u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
4873 
4874 		if (!(temp & DPLL_VCO_ENABLE))
4875 			continue;
4876 
4877 		if ((temp & PLL_REF_INPUT_MASK) ==
4878 		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
4879 			using_ssc_source = true;
4880 			break;
4881 		}
4882 	}
4883 
4884 	drm_dbg_kms(&dev_priv->drm,
4885 		    "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
4886 		    has_panel, has_lvds, has_ck505, using_ssc_source);
4887 
4888 	/* Ironlake: try to setup display ref clock before DPLL
4889 	 * enabling. This is only under driver's control after
4890 	 * PCH B stepping, previous chipset stepping should be
4891 	 * ignoring this setting.
4892 	 */
4893 	val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
4894 
4895 	/* As we must carefully and slowly disable/enable each source in turn,
4896 	 * compute the final state we want first and check if we need to
4897 	 * make any changes at all.
4898 	 */
4899 	final = val;
4900 	final &= ~DREF_NONSPREAD_SOURCE_MASK;
4901 	if (has_ck505)
4902 		final |= DREF_NONSPREAD_CK505_ENABLE;
4903 	else
4904 		final |= DREF_NONSPREAD_SOURCE_ENABLE;
4905 
4906 	final &= ~DREF_SSC_SOURCE_MASK;
4907 	final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4908 	final &= ~DREF_SSC1_ENABLE;
4909 
4910 	if (has_panel) {
4911 		final |= DREF_SSC_SOURCE_ENABLE;
4912 
4913 		if (intel_panel_use_ssc(dev_priv) && can_ssc)
4914 			final |= DREF_SSC1_ENABLE;
4915 
4916 		if (has_cpu_edp) {
4917 			if (intel_panel_use_ssc(dev_priv) && can_ssc)
4918 				final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4919 			else
4920 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4921 		} else
4922 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4923 	} else if (using_ssc_source) {
4924 		final |= DREF_SSC_SOURCE_ENABLE;
4925 		final |= DREF_SSC1_ENABLE;
4926 	}
4927 
4928 	if (final == val)
4929 		return;
4930 
4931 	/* Always enable nonspread source */
4932 	val &= ~DREF_NONSPREAD_SOURCE_MASK;
4933 
4934 	if (has_ck505)
4935 		val |= DREF_NONSPREAD_CK505_ENABLE;
4936 	else
4937 		val |= DREF_NONSPREAD_SOURCE_ENABLE;
4938 
4939 	if (has_panel) {
4940 		val &= ~DREF_SSC_SOURCE_MASK;
4941 		val |= DREF_SSC_SOURCE_ENABLE;
4942 
4943 		/* SSC must be turned on before enabling the CPU output  */
4944 		if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4945 			drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
4946 			val |= DREF_SSC1_ENABLE;
4947 		} else
4948 			val &= ~DREF_SSC1_ENABLE;
4949 
4950 		/* Get SSC going before enabling the outputs */
4951 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4952 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4953 		udelay(200);
4954 
4955 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4956 
4957 		/* Enable CPU source on CPU attached eDP */
4958 		if (has_cpu_edp) {
4959 			if (intel_panel_use_ssc(dev_priv) && can_ssc) {
4960 				drm_dbg_kms(&dev_priv->drm,
4961 					    "Using SSC on eDP\n");
4962 				val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
4963 			} else
4964 				val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
4965 		} else
4966 			val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4967 
4968 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4969 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4970 		udelay(200);
4971 	} else {
4972 		drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
4973 
4974 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4975 
4976 		/* Turn off CPU output */
4977 		val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4978 
4979 		intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4980 		intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4981 		udelay(200);
4982 
4983 		if (!using_ssc_source) {
4984 			drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
4985 
4986 			/* Turn off the SSC source */
4987 			val &= ~DREF_SSC_SOURCE_MASK;
4988 			val |= DREF_SSC_SOURCE_DISABLE;
4989 
4990 			/* Turn off SSC1 */
4991 			val &= ~DREF_SSC1_ENABLE;
4992 
4993 			intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
4994 			intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
4995 			udelay(200);
4996 		}
4997 	}
4998 
4999 	BUG_ON(val != final);
5000 }
5001 
lpt_reset_fdi_mphy(struct drm_i915_private * dev_priv)5002 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
5003 {
5004 	u32 tmp;
5005 
5006 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5007 	tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5008 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5009 
5010 	if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5011 			FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5012 		drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
5013 
5014 	tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
5015 	tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5016 	intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
5017 
5018 	if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
5019 			 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5020 		drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
5021 }
5022 
5023 /* WaMPhyProgramming:hsw */
lpt_program_fdi_mphy(struct drm_i915_private * dev_priv)5024 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5025 {
5026 	u32 tmp;
5027 
5028 	tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5029 	tmp &= ~(0xFF << 24);
5030 	tmp |= (0x12 << 24);
5031 	intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5032 
5033 	tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5034 	tmp |= (1 << 11);
5035 	intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5036 
5037 	tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5038 	tmp |= (1 << 11);
5039 	intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5040 
5041 	tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5042 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5043 	intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5044 
5045 	tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5046 	tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5047 	intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5048 
5049 	tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5050 	tmp &= ~(7 << 13);
5051 	tmp |= (5 << 13);
5052 	intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5053 
5054 	tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5055 	tmp &= ~(7 << 13);
5056 	tmp |= (5 << 13);
5057 	intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5058 
5059 	tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5060 	tmp &= ~0xFF;
5061 	tmp |= 0x1C;
5062 	intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5063 
5064 	tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5065 	tmp &= ~0xFF;
5066 	tmp |= 0x1C;
5067 	intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5068 
5069 	tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5070 	tmp &= ~(0xFF << 16);
5071 	tmp |= (0x1C << 16);
5072 	intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5073 
5074 	tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5075 	tmp &= ~(0xFF << 16);
5076 	tmp |= (0x1C << 16);
5077 	intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5078 
5079 	tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5080 	tmp |= (1 << 27);
5081 	intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5082 
5083 	tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5084 	tmp |= (1 << 27);
5085 	intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5086 
5087 	tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5088 	tmp &= ~(0xF << 28);
5089 	tmp |= (4 << 28);
5090 	intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5091 
5092 	tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5093 	tmp &= ~(0xF << 28);
5094 	tmp |= (4 << 28);
5095 	intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5096 }
5097 
5098 /* Implements 3 different sequences from BSpec chapter "Display iCLK
5099  * Programming" based on the parameters passed:
5100  * - Sequence to enable CLKOUT_DP
5101  * - Sequence to enable CLKOUT_DP without spread
5102  * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5103  */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)5104 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
5105 				 bool with_spread, bool with_fdi)
5106 {
5107 	u32 reg, tmp;
5108 
5109 	if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
5110 		     "FDI requires downspread\n"))
5111 		with_spread = true;
5112 	if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
5113 		     with_fdi, "LP PCH doesn't have FDI\n"))
5114 		with_fdi = false;
5115 
5116 	mutex_lock(&dev_priv->sb_lock);
5117 
5118 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5119 	tmp &= ~SBI_SSCCTL_DISABLE;
5120 	tmp |= SBI_SSCCTL_PATHALT;
5121 	intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5122 
5123 	udelay(24);
5124 
5125 	if (with_spread) {
5126 		tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5127 		tmp &= ~SBI_SSCCTL_PATHALT;
5128 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5129 
5130 		if (with_fdi) {
5131 			lpt_reset_fdi_mphy(dev_priv);
5132 			lpt_program_fdi_mphy(dev_priv);
5133 		}
5134 	}
5135 
5136 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5137 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5138 	tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5139 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5140 
5141 	mutex_unlock(&dev_priv->sb_lock);
5142 }
5143 
5144 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)5145 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
5146 {
5147 	u32 reg, tmp;
5148 
5149 	mutex_lock(&dev_priv->sb_lock);
5150 
5151 	reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
5152 	tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5153 	tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5154 	intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5155 
5156 	tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5157 	if (!(tmp & SBI_SSCCTL_DISABLE)) {
5158 		if (!(tmp & SBI_SSCCTL_PATHALT)) {
5159 			tmp |= SBI_SSCCTL_PATHALT;
5160 			intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5161 			udelay(32);
5162 		}
5163 		tmp |= SBI_SSCCTL_DISABLE;
5164 		intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5165 	}
5166 
5167 	mutex_unlock(&dev_priv->sb_lock);
5168 }
5169 
5170 #define BEND_IDX(steps) ((50 + (steps)) / 5)
5171 
5172 static const u16 sscdivintphase[] = {
5173 	[BEND_IDX( 50)] = 0x3B23,
5174 	[BEND_IDX( 45)] = 0x3B23,
5175 	[BEND_IDX( 40)] = 0x3C23,
5176 	[BEND_IDX( 35)] = 0x3C23,
5177 	[BEND_IDX( 30)] = 0x3D23,
5178 	[BEND_IDX( 25)] = 0x3D23,
5179 	[BEND_IDX( 20)] = 0x3E23,
5180 	[BEND_IDX( 15)] = 0x3E23,
5181 	[BEND_IDX( 10)] = 0x3F23,
5182 	[BEND_IDX(  5)] = 0x3F23,
5183 	[BEND_IDX(  0)] = 0x0025,
5184 	[BEND_IDX( -5)] = 0x0025,
5185 	[BEND_IDX(-10)] = 0x0125,
5186 	[BEND_IDX(-15)] = 0x0125,
5187 	[BEND_IDX(-20)] = 0x0225,
5188 	[BEND_IDX(-25)] = 0x0225,
5189 	[BEND_IDX(-30)] = 0x0325,
5190 	[BEND_IDX(-35)] = 0x0325,
5191 	[BEND_IDX(-40)] = 0x0425,
5192 	[BEND_IDX(-45)] = 0x0425,
5193 	[BEND_IDX(-50)] = 0x0525,
5194 };
5195 
5196 /*
5197  * Bend CLKOUT_DP
5198  * steps -50 to 50 inclusive, in steps of 5
5199  * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
5200  * change in clock period = -(steps / 10) * 5.787 ps
5201  */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)5202 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
5203 {
5204 	u32 tmp;
5205 	int idx = BEND_IDX(steps);
5206 
5207 	if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
5208 		return;
5209 
5210 	if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
5211 		return;
5212 
5213 	mutex_lock(&dev_priv->sb_lock);
5214 
5215 	if (steps % 10 != 0)
5216 		tmp = 0xAAAAAAAB;
5217 	else
5218 		tmp = 0x00000000;
5219 	intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
5220 
5221 	tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
5222 	tmp &= 0xffff0000;
5223 	tmp |= sscdivintphase[idx];
5224 	intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
5225 
5226 	mutex_unlock(&dev_priv->sb_lock);
5227 }
5228 
5229 #undef BEND_IDX
5230 
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)5231 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
5232 {
5233 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5234 	u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
5235 
5236 	if ((ctl & SPLL_PLL_ENABLE) == 0)
5237 		return false;
5238 
5239 	if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
5240 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5241 		return true;
5242 
5243 	if (IS_BROADWELL(dev_priv) &&
5244 	    (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
5245 		return true;
5246 
5247 	return false;
5248 }
5249 
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)5250 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
5251 			       enum intel_dpll_id id)
5252 {
5253 	u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
5254 	u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
5255 
5256 	if ((ctl & WRPLL_PLL_ENABLE) == 0)
5257 		return false;
5258 
5259 	if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
5260 		return true;
5261 
5262 	if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
5263 	    (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
5264 	    (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
5265 		return true;
5266 
5267 	return false;
5268 }
5269 
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)5270 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
5271 {
5272 	struct intel_encoder *encoder;
5273 	bool has_fdi = false;
5274 
5275 	for_each_intel_encoder(&dev_priv->drm, encoder) {
5276 		switch (encoder->type) {
5277 		case INTEL_OUTPUT_ANALOG:
5278 			has_fdi = true;
5279 			break;
5280 		default:
5281 			break;
5282 		}
5283 	}
5284 
5285 	/*
5286 	 * The BIOS may have decided to use the PCH SSC
5287 	 * reference so we must not disable it until the
5288 	 * relevant PLLs have stopped relying on it. We'll
5289 	 * just leave the PCH SSC reference enabled in case
5290 	 * any active PLL is using it. It will get disabled
5291 	 * after runtime suspend if we don't have FDI.
5292 	 *
5293 	 * TODO: Move the whole reference clock handling
5294 	 * to the modeset sequence proper so that we can
5295 	 * actually enable/disable/reconfigure these things
5296 	 * safely. To do that we need to introduce a real
5297 	 * clock hierarchy. That would also allow us to do
5298 	 * clock bending finally.
5299 	 */
5300 	dev_priv->pch_ssc_use = 0;
5301 
5302 	if (spll_uses_pch_ssc(dev_priv)) {
5303 		drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
5304 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
5305 	}
5306 
5307 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
5308 		drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
5309 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
5310 	}
5311 
5312 	if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
5313 		drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
5314 		dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
5315 	}
5316 
5317 	if (dev_priv->pch_ssc_use)
5318 		return;
5319 
5320 	if (has_fdi) {
5321 		lpt_bend_clkout_dp(dev_priv, 0);
5322 		lpt_enable_clkout_dp(dev_priv, true, true);
5323 	} else {
5324 		lpt_disable_clkout_dp(dev_priv);
5325 	}
5326 }
5327 
5328 /*
5329  * Initialize reference clocks when the driver loads
5330  */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)5331 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
5332 {
5333 	if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
5334 		ilk_init_pch_refclk(dev_priv);
5335 	else if (HAS_PCH_LPT(dev_priv))
5336 		lpt_init_pch_refclk(dev_priv);
5337 }
5338 
ilk_set_pipeconf(const struct intel_crtc_state * crtc_state)5339 static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
5340 {
5341 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5342 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5343 	enum pipe pipe = crtc->pipe;
5344 	u32 val;
5345 
5346 	val = 0;
5347 
5348 	switch (crtc_state->pipe_bpp) {
5349 	case 18:
5350 		val |= PIPECONF_6BPC;
5351 		break;
5352 	case 24:
5353 		val |= PIPECONF_8BPC;
5354 		break;
5355 	case 30:
5356 		val |= PIPECONF_10BPC;
5357 		break;
5358 	case 36:
5359 		val |= PIPECONF_12BPC;
5360 		break;
5361 	default:
5362 		/* Case prevented by intel_choose_pipe_bpp_dither. */
5363 		BUG();
5364 	}
5365 
5366 	if (crtc_state->dither)
5367 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5368 
5369 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5370 		val |= PIPECONF_INTERLACED_ILK;
5371 	else
5372 		val |= PIPECONF_PROGRESSIVE;
5373 
5374 	/*
5375 	 * This would end up with an odd purple hue over
5376 	 * the entire display. Make sure we don't do it.
5377 	 */
5378 	drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
5379 		    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
5380 
5381 	if (crtc_state->limited_color_range &&
5382 	    !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
5383 		val |= PIPECONF_COLOR_RANGE_SELECT;
5384 
5385 	if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5386 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV709;
5387 
5388 	val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
5389 
5390 	val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
5391 
5392 	intel_de_write(dev_priv, PIPECONF(pipe), val);
5393 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
5394 }
5395 
hsw_set_pipeconf(const struct intel_crtc_state * crtc_state)5396 static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
5397 {
5398 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5399 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5400 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
5401 	u32 val = 0;
5402 
5403 	if (IS_HASWELL(dev_priv) && crtc_state->dither)
5404 		val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5405 
5406 	if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5407 		val |= PIPECONF_INTERLACED_ILK;
5408 	else
5409 		val |= PIPECONF_PROGRESSIVE;
5410 
5411 	if (IS_HASWELL(dev_priv) &&
5412 	    crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
5413 		val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
5414 
5415 	intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
5416 	intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
5417 }
5418 
bdw_set_pipemisc(const struct intel_crtc_state * crtc_state)5419 static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
5420 {
5421 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5422 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5423 	u32 val = 0;
5424 
5425 	switch (crtc_state->pipe_bpp) {
5426 	case 18:
5427 		val |= PIPEMISC_DITHER_6_BPC;
5428 		break;
5429 	case 24:
5430 		val |= PIPEMISC_DITHER_8_BPC;
5431 		break;
5432 	case 30:
5433 		val |= PIPEMISC_DITHER_10_BPC;
5434 		break;
5435 	case 36:
5436 		val |= PIPEMISC_DITHER_12_BPC;
5437 		break;
5438 	default:
5439 		MISSING_CASE(crtc_state->pipe_bpp);
5440 		break;
5441 	}
5442 
5443 	if (crtc_state->dither)
5444 		val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
5445 
5446 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
5447 	    crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
5448 		val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
5449 
5450 	if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5451 		val |= PIPEMISC_YUV420_ENABLE |
5452 			PIPEMISC_YUV420_MODE_FULL_BLEND;
5453 
5454 	if (DISPLAY_VER(dev_priv) >= 11 &&
5455 	    (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
5456 					   BIT(PLANE_CURSOR))) == 0)
5457 		val |= PIPEMISC_HDR_MODE_PRECISION;
5458 
5459 	if (DISPLAY_VER(dev_priv) >= 12)
5460 		val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
5461 
5462 	intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
5463 }
5464 
bdw_get_pipemisc_bpp(struct intel_crtc * crtc)5465 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
5466 {
5467 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5468 	u32 tmp;
5469 
5470 	tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
5471 
5472 	switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
5473 	case PIPEMISC_DITHER_6_BPC:
5474 		return 18;
5475 	case PIPEMISC_DITHER_8_BPC:
5476 		return 24;
5477 	case PIPEMISC_DITHER_10_BPC:
5478 		return 30;
5479 	case PIPEMISC_DITHER_12_BPC:
5480 		return 36;
5481 	default:
5482 		MISSING_CASE(tmp);
5483 		return 0;
5484 	}
5485 }
5486 
ilk_get_lanes_required(int target_clock,int link_bw,int bpp)5487 int ilk_get_lanes_required(int target_clock, int link_bw, int bpp)
5488 {
5489 	/*
5490 	 * Account for spread spectrum to avoid
5491 	 * oversubscribing the link. Max center spread
5492 	 * is 2.5%; use 5% for safety's sake.
5493 	 */
5494 	u32 bps = target_clock * bpp * 21 / 20;
5495 	return DIV_ROUND_UP(bps, link_bw * 8);
5496 }
5497 
intel_pch_transcoder_get_m_n(struct intel_crtc * crtc,struct intel_link_m_n * m_n)5498 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
5499 					 struct intel_link_m_n *m_n)
5500 {
5501 	struct drm_device *dev = crtc->base.dev;
5502 	struct drm_i915_private *dev_priv = to_i915(dev);
5503 	enum pipe pipe = crtc->pipe;
5504 
5505 	m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
5506 	m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
5507 	m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5508 		& ~TU_SIZE_MASK;
5509 	m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
5510 	m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
5511 		    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5512 }
5513 
intel_cpu_transcoder_get_m_n(struct intel_crtc * crtc,enum transcoder transcoder,struct intel_link_m_n * m_n,struct intel_link_m_n * m2_n2)5514 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
5515 					 enum transcoder transcoder,
5516 					 struct intel_link_m_n *m_n,
5517 					 struct intel_link_m_n *m2_n2)
5518 {
5519 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5520 	enum pipe pipe = crtc->pipe;
5521 
5522 	if (DISPLAY_VER(dev_priv) >= 5) {
5523 		m_n->link_m = intel_de_read(dev_priv,
5524 					    PIPE_LINK_M1(transcoder));
5525 		m_n->link_n = intel_de_read(dev_priv,
5526 					    PIPE_LINK_N1(transcoder));
5527 		m_n->gmch_m = intel_de_read(dev_priv,
5528 					    PIPE_DATA_M1(transcoder))
5529 			& ~TU_SIZE_MASK;
5530 		m_n->gmch_n = intel_de_read(dev_priv,
5531 					    PIPE_DATA_N1(transcoder));
5532 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
5533 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5534 
5535 		if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
5536 			m2_n2->link_m = intel_de_read(dev_priv,
5537 						      PIPE_LINK_M2(transcoder));
5538 			m2_n2->link_n =	intel_de_read(dev_priv,
5539 							     PIPE_LINK_N2(transcoder));
5540 			m2_n2->gmch_m =	intel_de_read(dev_priv,
5541 							     PIPE_DATA_M2(transcoder))
5542 					& ~TU_SIZE_MASK;
5543 			m2_n2->gmch_n =	intel_de_read(dev_priv,
5544 							     PIPE_DATA_N2(transcoder));
5545 			m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
5546 					& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5547 		}
5548 	} else {
5549 		m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
5550 		m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
5551 		m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5552 			& ~TU_SIZE_MASK;
5553 		m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
5554 		m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
5555 			    & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
5556 	}
5557 }
5558 
intel_dp_get_m_n(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5559 void intel_dp_get_m_n(struct intel_crtc *crtc,
5560 		      struct intel_crtc_state *pipe_config)
5561 {
5562 	if (pipe_config->has_pch_encoder)
5563 		intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
5564 	else
5565 		intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5566 					     &pipe_config->dp_m_n,
5567 					     &pipe_config->dp_m2_n2);
5568 }
5569 
ilk_get_fdi_m_n_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5570 static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
5571 				   struct intel_crtc_state *pipe_config)
5572 {
5573 	intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
5574 				     &pipe_config->fdi_m_n, NULL);
5575 }
5576 
ilk_get_pfit_pos_size(struct intel_crtc_state * crtc_state,u32 pos,u32 size)5577 static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
5578 				  u32 pos, u32 size)
5579 {
5580 	drm_rect_init(&crtc_state->pch_pfit.dst,
5581 		      pos >> 16, pos & 0xffff,
5582 		      size >> 16, size & 0xffff);
5583 }
5584 
skl_get_pfit_config(struct intel_crtc_state * crtc_state)5585 static void skl_get_pfit_config(struct intel_crtc_state *crtc_state)
5586 {
5587 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5588 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5589 	struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
5590 	int id = -1;
5591 	int i;
5592 
5593 	/* find scaler attached to this pipe */
5594 	for (i = 0; i < crtc->num_scalers; i++) {
5595 		u32 ctl, pos, size;
5596 
5597 		ctl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
5598 		if ((ctl & (PS_SCALER_EN | PS_PLANE_SEL_MASK)) != PS_SCALER_EN)
5599 			continue;
5600 
5601 		id = i;
5602 		crtc_state->pch_pfit.enabled = true;
5603 
5604 		pos = intel_de_read(dev_priv, SKL_PS_WIN_POS(crtc->pipe, i));
5605 		size = intel_de_read(dev_priv, SKL_PS_WIN_SZ(crtc->pipe, i));
5606 
5607 		ilk_get_pfit_pos_size(crtc_state, pos, size);
5608 
5609 		scaler_state->scalers[i].in_use = true;
5610 		break;
5611 	}
5612 
5613 	scaler_state->scaler_id = id;
5614 	if (id >= 0)
5615 		scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
5616 	else
5617 		scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
5618 }
5619 
ilk_get_pfit_config(struct intel_crtc_state * crtc_state)5620 static void ilk_get_pfit_config(struct intel_crtc_state *crtc_state)
5621 {
5622 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
5623 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5624 	u32 ctl, pos, size;
5625 
5626 	ctl = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
5627 	if ((ctl & PF_ENABLE) == 0)
5628 		return;
5629 
5630 	crtc_state->pch_pfit.enabled = true;
5631 
5632 	pos = intel_de_read(dev_priv, PF_WIN_POS(crtc->pipe));
5633 	size = intel_de_read(dev_priv, PF_WIN_SZ(crtc->pipe));
5634 
5635 	ilk_get_pfit_pos_size(crtc_state, pos, size);
5636 
5637 	/*
5638 	 * We currently do not free assignements of panel fitters on
5639 	 * ivb/hsw (since we don't use the higher upscaling modes which
5640 	 * differentiates them) so just WARN about this case for now.
5641 	 */
5642 	drm_WARN_ON(&dev_priv->drm, IS_DISPLAY_VER(dev_priv, 7) &&
5643 		    (ctl & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe));
5644 }
5645 
ilk_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5646 static bool ilk_get_pipe_config(struct intel_crtc *crtc,
5647 				struct intel_crtc_state *pipe_config)
5648 {
5649 	struct drm_device *dev = crtc->base.dev;
5650 	struct drm_i915_private *dev_priv = to_i915(dev);
5651 	enum intel_display_power_domain power_domain;
5652 	intel_wakeref_t wakeref;
5653 	u32 tmp;
5654 	bool ret;
5655 
5656 	power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
5657 	wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
5658 	if (!wakeref)
5659 		return false;
5660 
5661 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5662 	pipe_config->shared_dpll = NULL;
5663 
5664 	ret = false;
5665 	tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
5666 	if (!(tmp & PIPECONF_ENABLE))
5667 		goto out;
5668 
5669 	switch (tmp & PIPECONF_BPC_MASK) {
5670 	case PIPECONF_6BPC:
5671 		pipe_config->pipe_bpp = 18;
5672 		break;
5673 	case PIPECONF_8BPC:
5674 		pipe_config->pipe_bpp = 24;
5675 		break;
5676 	case PIPECONF_10BPC:
5677 		pipe_config->pipe_bpp = 30;
5678 		break;
5679 	case PIPECONF_12BPC:
5680 		pipe_config->pipe_bpp = 36;
5681 		break;
5682 	default:
5683 		break;
5684 	}
5685 
5686 	if (tmp & PIPECONF_COLOR_RANGE_SELECT)
5687 		pipe_config->limited_color_range = true;
5688 
5689 	switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) {
5690 	case PIPECONF_OUTPUT_COLORSPACE_YUV601:
5691 	case PIPECONF_OUTPUT_COLORSPACE_YUV709:
5692 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5693 		break;
5694 	default:
5695 		pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5696 		break;
5697 	}
5698 
5699 	pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
5700 		PIPECONF_GAMMA_MODE_SHIFT;
5701 
5702 	pipe_config->csc_mode = intel_de_read(dev_priv,
5703 					      PIPE_CSC_MODE(crtc->pipe));
5704 
5705 	i9xx_get_pipe_color_config(pipe_config);
5706 	intel_color_get_config(pipe_config);
5707 
5708 	if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
5709 		struct intel_shared_dpll *pll;
5710 		enum intel_dpll_id pll_id;
5711 		bool pll_active;
5712 
5713 		pipe_config->has_pch_encoder = true;
5714 
5715 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
5716 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5717 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5718 
5719 		ilk_get_fdi_m_n_config(crtc, pipe_config);
5720 
5721 		if (HAS_PCH_IBX(dev_priv)) {
5722 			/*
5723 			 * The pipe->pch transcoder and pch transcoder->pll
5724 			 * mapping is fixed.
5725 			 */
5726 			pll_id = (enum intel_dpll_id) crtc->pipe;
5727 		} else {
5728 			tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
5729 			if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
5730 				pll_id = DPLL_ID_PCH_PLL_B;
5731 			else
5732 				pll_id= DPLL_ID_PCH_PLL_A;
5733 		}
5734 
5735 		pipe_config->shared_dpll =
5736 			intel_get_shared_dpll_by_id(dev_priv, pll_id);
5737 		pll = pipe_config->shared_dpll;
5738 
5739 		pll_active = intel_dpll_get_hw_state(dev_priv, pll,
5740 						     &pipe_config->dpll_hw_state);
5741 		drm_WARN_ON(dev, !pll_active);
5742 
5743 		tmp = pipe_config->dpll_hw_state.dpll;
5744 		pipe_config->pixel_multiplier =
5745 			((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
5746 			 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
5747 
5748 		ilk_pch_clock_get(crtc, pipe_config);
5749 	} else {
5750 		pipe_config->pixel_multiplier = 1;
5751 	}
5752 
5753 	intel_get_transcoder_timings(crtc, pipe_config);
5754 	intel_get_pipe_src_size(crtc, pipe_config);
5755 
5756 	ilk_get_pfit_config(pipe_config);
5757 
5758 	ret = true;
5759 
5760 out:
5761 	intel_display_power_put(dev_priv, power_domain, wakeref);
5762 
5763 	return ret;
5764 }
5765 
hsw_get_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)5766 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
5767 				     struct intel_crtc_state *pipe_config,
5768 				     struct intel_display_power_domain_set *power_domain_set)
5769 {
5770 	struct drm_device *dev = crtc->base.dev;
5771 	struct drm_i915_private *dev_priv = to_i915(dev);
5772 	unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
5773 	unsigned long enabled_panel_transcoders = 0;
5774 	enum transcoder panel_transcoder;
5775 	u32 tmp;
5776 
5777 	if (DISPLAY_VER(dev_priv) >= 11)
5778 		panel_transcoder_mask |=
5779 			BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
5780 
5781 	/*
5782 	 * The pipe->transcoder mapping is fixed with the exception of the eDP
5783 	 * and DSI transcoders handled below.
5784 	 */
5785 	pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
5786 
5787 	/*
5788 	 * XXX: Do intel_display_power_get_if_enabled before reading this (for
5789 	 * consistency and less surprising code; it's in always on power).
5790 	 */
5791 	for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
5792 				       panel_transcoder_mask) {
5793 		bool force_thru = false;
5794 		enum pipe trans_pipe;
5795 
5796 		tmp = intel_de_read(dev_priv,
5797 				    TRANS_DDI_FUNC_CTL(panel_transcoder));
5798 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5799 			continue;
5800 
5801 		/*
5802 		 * Log all enabled ones, only use the first one.
5803 		 *
5804 		 * FIXME: This won't work for two separate DSI displays.
5805 		 */
5806 		enabled_panel_transcoders |= BIT(panel_transcoder);
5807 		if (enabled_panel_transcoders != BIT(panel_transcoder))
5808 			continue;
5809 
5810 		switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
5811 		default:
5812 			drm_WARN(dev, 1,
5813 				 "unknown pipe linked to transcoder %s\n",
5814 				 transcoder_name(panel_transcoder));
5815 			fallthrough;
5816 		case TRANS_DDI_EDP_INPUT_A_ONOFF:
5817 			force_thru = true;
5818 			fallthrough;
5819 		case TRANS_DDI_EDP_INPUT_A_ON:
5820 			trans_pipe = PIPE_A;
5821 			break;
5822 		case TRANS_DDI_EDP_INPUT_B_ONOFF:
5823 			trans_pipe = PIPE_B;
5824 			break;
5825 		case TRANS_DDI_EDP_INPUT_C_ONOFF:
5826 			trans_pipe = PIPE_C;
5827 			break;
5828 		case TRANS_DDI_EDP_INPUT_D_ONOFF:
5829 			trans_pipe = PIPE_D;
5830 			break;
5831 		}
5832 
5833 		if (trans_pipe == crtc->pipe) {
5834 			pipe_config->cpu_transcoder = panel_transcoder;
5835 			pipe_config->pch_pfit.force_thru = force_thru;
5836 		}
5837 	}
5838 
5839 	/*
5840 	 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
5841 	 */
5842 	drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
5843 		    enabled_panel_transcoders != BIT(TRANSCODER_EDP));
5844 
5845 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5846 						       POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
5847 		return false;
5848 
5849 	tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
5850 
5851 	return tmp & PIPECONF_ENABLE;
5852 }
5853 
bxt_get_dsi_transcoder_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_display_power_domain_set * power_domain_set)5854 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
5855 					 struct intel_crtc_state *pipe_config,
5856 					 struct intel_display_power_domain_set *power_domain_set)
5857 {
5858 	struct drm_device *dev = crtc->base.dev;
5859 	struct drm_i915_private *dev_priv = to_i915(dev);
5860 	enum transcoder cpu_transcoder;
5861 	enum port port;
5862 	u32 tmp;
5863 
5864 	for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
5865 		if (port == PORT_A)
5866 			cpu_transcoder = TRANSCODER_DSI_A;
5867 		else
5868 			cpu_transcoder = TRANSCODER_DSI_C;
5869 
5870 		if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
5871 							       POWER_DOMAIN_TRANSCODER(cpu_transcoder)))
5872 			continue;
5873 
5874 		/*
5875 		 * The PLL needs to be enabled with a valid divider
5876 		 * configuration, otherwise accessing DSI registers will hang
5877 		 * the machine. See BSpec North Display Engine
5878 		 * registers/MIPI[BXT]. We can break out here early, since we
5879 		 * need the same DSI PLL to be enabled for both DSI ports.
5880 		 */
5881 		if (!bxt_dsi_pll_is_enabled(dev_priv))
5882 			break;
5883 
5884 		/* XXX: this works for video mode only */
5885 		tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
5886 		if (!(tmp & DPI_ENABLE))
5887 			continue;
5888 
5889 		tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
5890 		if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
5891 			continue;
5892 
5893 		pipe_config->cpu_transcoder = cpu_transcoder;
5894 		break;
5895 	}
5896 
5897 	return transcoder_is_dsi(pipe_config->cpu_transcoder);
5898 }
5899 
hsw_get_ddi_port_state(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5900 static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
5901 				   struct intel_crtc_state *pipe_config)
5902 {
5903 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5904 	enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
5905 	enum port port;
5906 	u32 tmp;
5907 
5908 	if (transcoder_is_dsi(cpu_transcoder)) {
5909 		port = (cpu_transcoder == TRANSCODER_DSI_A) ?
5910 						PORT_A : PORT_B;
5911 	} else {
5912 		tmp = intel_de_read(dev_priv,
5913 				    TRANS_DDI_FUNC_CTL(cpu_transcoder));
5914 		if (!(tmp & TRANS_DDI_FUNC_ENABLE))
5915 			return;
5916 		if (DISPLAY_VER(dev_priv) >= 12)
5917 			port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5918 		else
5919 			port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
5920 	}
5921 
5922 	/*
5923 	 * Haswell has only FDI/PCH transcoder A. It is which is connected to
5924 	 * DDI E. So just check whether this pipe is wired to DDI E and whether
5925 	 * the PCH transcoder is on.
5926 	 */
5927 	if (DISPLAY_VER(dev_priv) < 9 &&
5928 	    (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
5929 		pipe_config->has_pch_encoder = true;
5930 
5931 		tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
5932 		pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
5933 					  FDI_DP_PORT_WIDTH_SHIFT) + 1;
5934 
5935 		ilk_get_fdi_m_n_config(crtc, pipe_config);
5936 	}
5937 }
5938 
hsw_get_pipe_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)5939 static bool hsw_get_pipe_config(struct intel_crtc *crtc,
5940 				struct intel_crtc_state *pipe_config)
5941 {
5942 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5943 	struct intel_display_power_domain_set power_domain_set = { };
5944 	bool active;
5945 	u32 tmp;
5946 
5947 	if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
5948 						       POWER_DOMAIN_PIPE(crtc->pipe)))
5949 		return false;
5950 
5951 	pipe_config->shared_dpll = NULL;
5952 
5953 	active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set);
5954 
5955 	if (IS_GEN9_LP(dev_priv) &&
5956 	    bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) {
5957 		drm_WARN_ON(&dev_priv->drm, active);
5958 		active = true;
5959 	}
5960 
5961 	intel_dsc_get_config(pipe_config);
5962 
5963 	if (!active) {
5964 		/* bigjoiner slave doesn't enable transcoder */
5965 		if (!pipe_config->bigjoiner_slave)
5966 			goto out;
5967 
5968 		active = true;
5969 		pipe_config->pixel_multiplier = 1;
5970 
5971 		/* we cannot read out most state, so don't bother.. */
5972 		pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
5973 	} else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
5974 	    DISPLAY_VER(dev_priv) >= 11) {
5975 		hsw_get_ddi_port_state(crtc, pipe_config);
5976 		intel_get_transcoder_timings(crtc, pipe_config);
5977 	}
5978 
5979 	if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
5980 		intel_vrr_get_config(crtc, pipe_config);
5981 
5982 	intel_get_pipe_src_size(crtc, pipe_config);
5983 
5984 	if (IS_HASWELL(dev_priv)) {
5985 		u32 tmp = intel_de_read(dev_priv,
5986 					PIPECONF(pipe_config->cpu_transcoder));
5987 
5988 		if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
5989 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
5990 		else
5991 			pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
5992 	} else {
5993 		pipe_config->output_format =
5994 			bdw_get_pipemisc_output_format(crtc);
5995 	}
5996 
5997 	pipe_config->gamma_mode = intel_de_read(dev_priv,
5998 						GAMMA_MODE(crtc->pipe));
5999 
6000 	pipe_config->csc_mode = intel_de_read(dev_priv,
6001 					      PIPE_CSC_MODE(crtc->pipe));
6002 
6003 	if (DISPLAY_VER(dev_priv) >= 9) {
6004 		tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
6005 
6006 		if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
6007 			pipe_config->gamma_enable = true;
6008 
6009 		if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
6010 			pipe_config->csc_enable = true;
6011 	} else {
6012 		i9xx_get_pipe_color_config(pipe_config);
6013 	}
6014 
6015 	intel_color_get_config(pipe_config);
6016 
6017 	tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
6018 	pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
6019 	if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
6020 		pipe_config->ips_linetime =
6021 			REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
6022 
6023 	if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set,
6024 						      POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) {
6025 		if (DISPLAY_VER(dev_priv) >= 9)
6026 			skl_get_pfit_config(pipe_config);
6027 		else
6028 			ilk_get_pfit_config(pipe_config);
6029 	}
6030 
6031 	if (hsw_crtc_supports_ips(crtc)) {
6032 		if (IS_HASWELL(dev_priv))
6033 			pipe_config->ips_enabled = intel_de_read(dev_priv,
6034 								 IPS_CTL) & IPS_ENABLE;
6035 		else {
6036 			/*
6037 			 * We cannot readout IPS state on broadwell, set to
6038 			 * true so we can set it to a defined state on first
6039 			 * commit.
6040 			 */
6041 			pipe_config->ips_enabled = true;
6042 		}
6043 	}
6044 
6045 	if (pipe_config->bigjoiner_slave) {
6046 		/* Cannot be read out as a slave, set to 0. */
6047 		pipe_config->pixel_multiplier = 0;
6048 	} else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
6049 	    !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
6050 		pipe_config->pixel_multiplier =
6051 			intel_de_read(dev_priv,
6052 				      PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
6053 	} else {
6054 		pipe_config->pixel_multiplier = 1;
6055 	}
6056 
6057 out:
6058 	intel_display_power_put_all_in_set(dev_priv, &power_domain_set);
6059 
6060 	return active;
6061 }
6062 
intel_crtc_get_pipe_config(struct intel_crtc_state * crtc_state)6063 static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
6064 {
6065 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6066 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
6067 
6068 	if (!i915->display.get_pipe_config(crtc, crtc_state))
6069 		return false;
6070 
6071 	crtc_state->hw.active = true;
6072 
6073 	intel_crtc_readout_derived_state(crtc_state);
6074 
6075 	return true;
6076 }
6077 
6078 /* VESA 640x480x72Hz mode to set on the pipe */
6079 static const struct drm_display_mode load_detect_mode = {
6080 	DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6081 		 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6082 };
6083 
6084 struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)6085 intel_framebuffer_create(struct drm_i915_gem_object *obj,
6086 			 struct drm_mode_fb_cmd2 *mode_cmd)
6087 {
6088 	struct intel_framebuffer *intel_fb;
6089 	int ret;
6090 
6091 	intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6092 	if (!intel_fb)
6093 		return ERR_PTR(-ENOMEM);
6094 
6095 	ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
6096 	if (ret)
6097 		goto err;
6098 
6099 	return &intel_fb->base;
6100 
6101 err:
6102 	kfree(intel_fb);
6103 	return ERR_PTR(ret);
6104 }
6105 
intel_modeset_disable_planes(struct drm_atomic_state * state,struct drm_crtc * crtc)6106 static int intel_modeset_disable_planes(struct drm_atomic_state *state,
6107 					struct drm_crtc *crtc)
6108 {
6109 	struct drm_plane *plane;
6110 	struct drm_plane_state *plane_state;
6111 	int ret, i;
6112 
6113 	ret = drm_atomic_add_affected_planes(state, crtc);
6114 	if (ret)
6115 		return ret;
6116 
6117 	for_each_new_plane_in_state(state, plane, plane_state, i) {
6118 		if (plane_state->crtc != crtc)
6119 			continue;
6120 
6121 		ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
6122 		if (ret)
6123 			return ret;
6124 
6125 		drm_atomic_set_fb_for_plane(plane_state, NULL);
6126 	}
6127 
6128 	return 0;
6129 }
6130 
intel_get_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)6131 int intel_get_load_detect_pipe(struct drm_connector *connector,
6132 			       struct intel_load_detect_pipe *old,
6133 			       struct drm_modeset_acquire_ctx *ctx)
6134 {
6135 	struct intel_crtc *intel_crtc;
6136 	struct intel_encoder *intel_encoder =
6137 		intel_attached_encoder(to_intel_connector(connector));
6138 	struct drm_crtc *possible_crtc;
6139 	struct drm_encoder *encoder = &intel_encoder->base;
6140 	struct drm_crtc *crtc = NULL;
6141 	struct drm_device *dev = encoder->dev;
6142 	struct drm_i915_private *dev_priv = to_i915(dev);
6143 	struct drm_mode_config *config = &dev->mode_config;
6144 	struct drm_atomic_state *state = NULL, *restore_state = NULL;
6145 	struct drm_connector_state *connector_state;
6146 	struct intel_crtc_state *crtc_state;
6147 	int ret, i = -1;
6148 
6149 	drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6150 		    connector->base.id, connector->name,
6151 		    encoder->base.id, encoder->name);
6152 
6153 	old->restore_state = NULL;
6154 
6155 	drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
6156 
6157 	/*
6158 	 * Algorithm gets a little messy:
6159 	 *
6160 	 *   - if the connector already has an assigned crtc, use it (but make
6161 	 *     sure it's on first)
6162 	 *
6163 	 *   - try to find the first unused crtc that can drive this connector,
6164 	 *     and use that if we find one
6165 	 */
6166 
6167 	/* See if we already have a CRTC for this connector */
6168 	if (connector->state->crtc) {
6169 		crtc = connector->state->crtc;
6170 
6171 		ret = drm_modeset_lock(&crtc->mutex, ctx);
6172 		if (ret)
6173 			goto fail;
6174 
6175 		/* Make sure the crtc and connector are running */
6176 		goto found;
6177 	}
6178 
6179 	/* Find an unused one (if possible) */
6180 	for_each_crtc(dev, possible_crtc) {
6181 		i++;
6182 		if (!(encoder->possible_crtcs & (1 << i)))
6183 			continue;
6184 
6185 		ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
6186 		if (ret)
6187 			goto fail;
6188 
6189 		if (possible_crtc->state->enable) {
6190 			drm_modeset_unlock(&possible_crtc->mutex);
6191 			continue;
6192 		}
6193 
6194 		crtc = possible_crtc;
6195 		break;
6196 	}
6197 
6198 	/*
6199 	 * If we didn't find an unused CRTC, don't use any.
6200 	 */
6201 	if (!crtc) {
6202 		drm_dbg_kms(&dev_priv->drm,
6203 			    "no pipe available for load-detect\n");
6204 		ret = -ENODEV;
6205 		goto fail;
6206 	}
6207 
6208 found:
6209 	intel_crtc = to_intel_crtc(crtc);
6210 
6211 	state = drm_atomic_state_alloc(dev);
6212 	restore_state = drm_atomic_state_alloc(dev);
6213 	if (!state || !restore_state) {
6214 		ret = -ENOMEM;
6215 		goto fail;
6216 	}
6217 
6218 	state->acquire_ctx = ctx;
6219 	restore_state->acquire_ctx = ctx;
6220 
6221 	connector_state = drm_atomic_get_connector_state(state, connector);
6222 	if (IS_ERR(connector_state)) {
6223 		ret = PTR_ERR(connector_state);
6224 		goto fail;
6225 	}
6226 
6227 	ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
6228 	if (ret)
6229 		goto fail;
6230 
6231 	crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6232 	if (IS_ERR(crtc_state)) {
6233 		ret = PTR_ERR(crtc_state);
6234 		goto fail;
6235 	}
6236 
6237 	crtc_state->uapi.active = true;
6238 
6239 	ret = drm_atomic_set_mode_for_crtc(&crtc_state->uapi,
6240 					   &load_detect_mode);
6241 	if (ret)
6242 		goto fail;
6243 
6244 	ret = intel_modeset_disable_planes(state, crtc);
6245 	if (ret)
6246 		goto fail;
6247 
6248 	ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
6249 	if (!ret)
6250 		ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
6251 	if (!ret)
6252 		ret = drm_atomic_add_affected_planes(restore_state, crtc);
6253 	if (ret) {
6254 		drm_dbg_kms(&dev_priv->drm,
6255 			    "Failed to create a copy of old state to restore: %i\n",
6256 			    ret);
6257 		goto fail;
6258 	}
6259 
6260 	ret = drm_atomic_commit(state);
6261 	if (ret) {
6262 		drm_dbg_kms(&dev_priv->drm,
6263 			    "failed to set mode on load-detect pipe\n");
6264 		goto fail;
6265 	}
6266 
6267 	old->restore_state = restore_state;
6268 	drm_atomic_state_put(state);
6269 
6270 	/* let the connector get through one full cycle before testing */
6271 	intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
6272 	return true;
6273 
6274 fail:
6275 	if (state) {
6276 		drm_atomic_state_put(state);
6277 		state = NULL;
6278 	}
6279 	if (restore_state) {
6280 		drm_atomic_state_put(restore_state);
6281 		restore_state = NULL;
6282 	}
6283 
6284 	if (ret == -EDEADLK)
6285 		return ret;
6286 
6287 	return false;
6288 }
6289 
intel_release_load_detect_pipe(struct drm_connector * connector,struct intel_load_detect_pipe * old,struct drm_modeset_acquire_ctx * ctx)6290 void intel_release_load_detect_pipe(struct drm_connector *connector,
6291 				    struct intel_load_detect_pipe *old,
6292 				    struct drm_modeset_acquire_ctx *ctx)
6293 {
6294 	struct intel_encoder *intel_encoder =
6295 		intel_attached_encoder(to_intel_connector(connector));
6296 	struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
6297 	struct drm_encoder *encoder = &intel_encoder->base;
6298 	struct drm_atomic_state *state = old->restore_state;
6299 	int ret;
6300 
6301 	drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6302 		    connector->base.id, connector->name,
6303 		    encoder->base.id, encoder->name);
6304 
6305 	if (!state)
6306 		return;
6307 
6308 	ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
6309 	if (ret)
6310 		drm_dbg_kms(&i915->drm,
6311 			    "Couldn't release load detect pipe: %i\n", ret);
6312 	drm_atomic_state_put(state);
6313 }
6314 
i9xx_pll_refclk(struct drm_device * dev,const struct intel_crtc_state * pipe_config)6315 static int i9xx_pll_refclk(struct drm_device *dev,
6316 			   const struct intel_crtc_state *pipe_config)
6317 {
6318 	struct drm_i915_private *dev_priv = to_i915(dev);
6319 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6320 
6321 	if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
6322 		return dev_priv->vbt.lvds_ssc_freq;
6323 	else if (HAS_PCH_SPLIT(dev_priv))
6324 		return 120000;
6325 	else if (!IS_DISPLAY_VER(dev_priv, 2))
6326 		return 96000;
6327 	else
6328 		return 48000;
6329 }
6330 
6331 /* Returns the clock of the currently programmed mode of the given pipe. */
i9xx_crtc_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6332 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
6333 				struct intel_crtc_state *pipe_config)
6334 {
6335 	struct drm_device *dev = crtc->base.dev;
6336 	struct drm_i915_private *dev_priv = to_i915(dev);
6337 	enum pipe pipe = crtc->pipe;
6338 	u32 dpll = pipe_config->dpll_hw_state.dpll;
6339 	u32 fp;
6340 	struct dpll clock;
6341 	int port_clock;
6342 	int refclk = i9xx_pll_refclk(dev, pipe_config);
6343 
6344 	if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
6345 		fp = pipe_config->dpll_hw_state.fp0;
6346 	else
6347 		fp = pipe_config->dpll_hw_state.fp1;
6348 
6349 	clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
6350 	if (IS_PINEVIEW(dev_priv)) {
6351 		clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6352 		clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
6353 	} else {
6354 		clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6355 		clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6356 	}
6357 
6358 	if (!IS_DISPLAY_VER(dev_priv, 2)) {
6359 		if (IS_PINEVIEW(dev_priv))
6360 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6361 				DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
6362 		else
6363 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
6364 			       DPLL_FPA01_P1_POST_DIV_SHIFT);
6365 
6366 		switch (dpll & DPLL_MODE_MASK) {
6367 		case DPLLB_MODE_DAC_SERIAL:
6368 			clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6369 				5 : 10;
6370 			break;
6371 		case DPLLB_MODE_LVDS:
6372 			clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6373 				7 : 14;
6374 			break;
6375 		default:
6376 			drm_dbg_kms(&dev_priv->drm,
6377 				    "Unknown DPLL mode %08x in programmed "
6378 				    "mode\n", (int)(dpll & DPLL_MODE_MASK));
6379 			return;
6380 		}
6381 
6382 		if (IS_PINEVIEW(dev_priv))
6383 			port_clock = pnv_calc_dpll_params(refclk, &clock);
6384 		else
6385 			port_clock = i9xx_calc_dpll_params(refclk, &clock);
6386 	} else {
6387 		u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
6388 								 LVDS);
6389 		bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
6390 
6391 		if (is_lvds) {
6392 			clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6393 				       DPLL_FPA01_P1_POST_DIV_SHIFT);
6394 
6395 			if (lvds & LVDS_CLKB_POWER_UP)
6396 				clock.p2 = 7;
6397 			else
6398 				clock.p2 = 14;
6399 		} else {
6400 			if (dpll & PLL_P1_DIVIDE_BY_TWO)
6401 				clock.p1 = 2;
6402 			else {
6403 				clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6404 					    DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6405 			}
6406 			if (dpll & PLL_P2_DIVIDE_BY_4)
6407 				clock.p2 = 4;
6408 			else
6409 				clock.p2 = 2;
6410 		}
6411 
6412 		port_clock = i9xx_calc_dpll_params(refclk, &clock);
6413 	}
6414 
6415 	/*
6416 	 * This value includes pixel_multiplier. We will use
6417 	 * port_clock to compute adjusted_mode.crtc_clock in the
6418 	 * encoder's get_config() function.
6419 	 */
6420 	pipe_config->port_clock = port_clock;
6421 }
6422 
intel_dotclock_calculate(int link_freq,const struct intel_link_m_n * m_n)6423 int intel_dotclock_calculate(int link_freq,
6424 			     const struct intel_link_m_n *m_n)
6425 {
6426 	/*
6427 	 * The calculation for the data clock is:
6428 	 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
6429 	 * But we want to avoid losing precison if possible, so:
6430 	 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
6431 	 *
6432 	 * and the link clock is simpler:
6433 	 * link_clock = (m * link_clock) / n
6434 	 */
6435 
6436 	if (!m_n->link_n)
6437 		return 0;
6438 
6439 	return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6440 }
6441 
ilk_pch_clock_get(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)6442 static void ilk_pch_clock_get(struct intel_crtc *crtc,
6443 			      struct intel_crtc_state *pipe_config)
6444 {
6445 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6446 
6447 	/* read out port_clock from the DPLL */
6448 	i9xx_crtc_clock_get(crtc, pipe_config);
6449 
6450 	/*
6451 	 * In case there is an active pipe without active ports,
6452 	 * we may need some idea for the dotclock anyway.
6453 	 * Calculate one based on the FDI configuration.
6454 	 */
6455 	pipe_config->hw.adjusted_mode.crtc_clock =
6456 		intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
6457 					 &pipe_config->fdi_m_n);
6458 }
6459 
6460 /* Returns the currently programmed mode of the given encoder. */
6461 struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder * encoder)6462 intel_encoder_current_mode(struct intel_encoder *encoder)
6463 {
6464 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
6465 	struct intel_crtc_state *crtc_state;
6466 	struct drm_display_mode *mode;
6467 	struct intel_crtc *crtc;
6468 	enum pipe pipe;
6469 
6470 	if (!encoder->get_hw_state(encoder, &pipe))
6471 		return NULL;
6472 
6473 	crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
6474 
6475 	mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6476 	if (!mode)
6477 		return NULL;
6478 
6479 	crtc_state = intel_crtc_state_alloc(crtc);
6480 	if (!crtc_state) {
6481 		kfree(mode);
6482 		return NULL;
6483 	}
6484 
6485 	if (!intel_crtc_get_pipe_config(crtc_state)) {
6486 		kfree(crtc_state);
6487 		kfree(mode);
6488 		return NULL;
6489 	}
6490 
6491 	intel_encoder_get_config(encoder, crtc_state);
6492 
6493 	intel_mode_from_crtc_timings(mode, &crtc_state->hw.adjusted_mode);
6494 
6495 	kfree(crtc_state);
6496 
6497 	return mode;
6498 }
6499 
6500 /**
6501  * intel_wm_need_update - Check whether watermarks need updating
6502  * @cur: current plane state
6503  * @new: new plane state
6504  *
6505  * Check current plane state versus the new one to determine whether
6506  * watermarks need to be recalculated.
6507  *
6508  * Returns true or false.
6509  */
intel_wm_need_update(const struct intel_plane_state * cur,struct intel_plane_state * new)6510 static bool intel_wm_need_update(const struct intel_plane_state *cur,
6511 				 struct intel_plane_state *new)
6512 {
6513 	/* Update watermarks on tiling or size changes. */
6514 	if (new->uapi.visible != cur->uapi.visible)
6515 		return true;
6516 
6517 	if (!cur->hw.fb || !new->hw.fb)
6518 		return false;
6519 
6520 	if (cur->hw.fb->modifier != new->hw.fb->modifier ||
6521 	    cur->hw.rotation != new->hw.rotation ||
6522 	    drm_rect_width(&new->uapi.src) != drm_rect_width(&cur->uapi.src) ||
6523 	    drm_rect_height(&new->uapi.src) != drm_rect_height(&cur->uapi.src) ||
6524 	    drm_rect_width(&new->uapi.dst) != drm_rect_width(&cur->uapi.dst) ||
6525 	    drm_rect_height(&new->uapi.dst) != drm_rect_height(&cur->uapi.dst))
6526 		return true;
6527 
6528 	return false;
6529 }
6530 
needs_scaling(const struct intel_plane_state * state)6531 static bool needs_scaling(const struct intel_plane_state *state)
6532 {
6533 	int src_w = drm_rect_width(&state->uapi.src) >> 16;
6534 	int src_h = drm_rect_height(&state->uapi.src) >> 16;
6535 	int dst_w = drm_rect_width(&state->uapi.dst);
6536 	int dst_h = drm_rect_height(&state->uapi.dst);
6537 
6538 	return (src_w != dst_w || src_h != dst_h);
6539 }
6540 
intel_plane_atomic_calc_changes(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * crtc_state,const struct intel_plane_state * old_plane_state,struct intel_plane_state * plane_state)6541 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
6542 				    struct intel_crtc_state *crtc_state,
6543 				    const struct intel_plane_state *old_plane_state,
6544 				    struct intel_plane_state *plane_state)
6545 {
6546 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6547 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
6548 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6549 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6550 	bool was_crtc_enabled = old_crtc_state->hw.active;
6551 	bool is_crtc_enabled = crtc_state->hw.active;
6552 	bool turn_off, turn_on, visible, was_visible;
6553 	int ret;
6554 
6555 	if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
6556 		ret = skl_update_scaler_plane(crtc_state, plane_state);
6557 		if (ret)
6558 			return ret;
6559 	}
6560 
6561 	was_visible = old_plane_state->uapi.visible;
6562 	visible = plane_state->uapi.visible;
6563 
6564 	if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
6565 		was_visible = false;
6566 
6567 	/*
6568 	 * Visibility is calculated as if the crtc was on, but
6569 	 * after scaler setup everything depends on it being off
6570 	 * when the crtc isn't active.
6571 	 *
6572 	 * FIXME this is wrong for watermarks. Watermarks should also
6573 	 * be computed as if the pipe would be active. Perhaps move
6574 	 * per-plane wm computation to the .check_plane() hook, and
6575 	 * only combine the results from all planes in the current place?
6576 	 */
6577 	if (!is_crtc_enabled) {
6578 		intel_plane_set_invisible(crtc_state, plane_state);
6579 		visible = false;
6580 	}
6581 
6582 	if (!was_visible && !visible)
6583 		return 0;
6584 
6585 	turn_off = was_visible && (!visible || mode_changed);
6586 	turn_on = visible && (!was_visible || mode_changed);
6587 
6588 	drm_dbg_atomic(&dev_priv->drm,
6589 		       "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
6590 		       crtc->base.base.id, crtc->base.name,
6591 		       plane->base.base.id, plane->base.name,
6592 		       was_visible, visible,
6593 		       turn_off, turn_on, mode_changed);
6594 
6595 	if (turn_on) {
6596 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6597 			crtc_state->update_wm_pre = true;
6598 
6599 		/* must disable cxsr around plane enable/disable */
6600 		if (plane->id != PLANE_CURSOR)
6601 			crtc_state->disable_cxsr = true;
6602 	} else if (turn_off) {
6603 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
6604 			crtc_state->update_wm_post = true;
6605 
6606 		/* must disable cxsr around plane enable/disable */
6607 		if (plane->id != PLANE_CURSOR)
6608 			crtc_state->disable_cxsr = true;
6609 	} else if (intel_wm_need_update(old_plane_state, plane_state)) {
6610 		if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
6611 			/* FIXME bollocks */
6612 			crtc_state->update_wm_pre = true;
6613 			crtc_state->update_wm_post = true;
6614 		}
6615 	}
6616 
6617 	if (visible || was_visible)
6618 		crtc_state->fb_bits |= plane->frontbuffer_bit;
6619 
6620 	/*
6621 	 * ILK/SNB DVSACNTR/Sprite Enable
6622 	 * IVB SPR_CTL/Sprite Enable
6623 	 * "When in Self Refresh Big FIFO mode, a write to enable the
6624 	 *  plane will be internally buffered and delayed while Big FIFO
6625 	 *  mode is exiting."
6626 	 *
6627 	 * Which means that enabling the sprite can take an extra frame
6628 	 * when we start in big FIFO mode (LP1+). Thus we need to drop
6629 	 * down to LP0 and wait for vblank in order to make sure the
6630 	 * sprite gets enabled on the next vblank after the register write.
6631 	 * Doing otherwise would risk enabling the sprite one frame after
6632 	 * we've already signalled flip completion. We can resume LP1+
6633 	 * once the sprite has been enabled.
6634 	 *
6635 	 *
6636 	 * WaCxSRDisabledForSpriteScaling:ivb
6637 	 * IVB SPR_SCALE/Scaling Enable
6638 	 * "Low Power watermarks must be disabled for at least one
6639 	 *  frame before enabling sprite scaling, and kept disabled
6640 	 *  until sprite scaling is disabled."
6641 	 *
6642 	 * ILK/SNB DVSASCALE/Scaling Enable
6643 	 * "When in Self Refresh Big FIFO mode, scaling enable will be
6644 	 *  masked off while Big FIFO mode is exiting."
6645 	 *
6646 	 * Despite the w/a only being listed for IVB we assume that
6647 	 * the ILK/SNB note has similar ramifications, hence we apply
6648 	 * the w/a on all three platforms.
6649 	 *
6650 	 * With experimental results seems this is needed also for primary
6651 	 * plane, not only sprite plane.
6652 	 */
6653 	if (plane->id != PLANE_CURSOR &&
6654 	    (IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
6655 	     IS_IVYBRIDGE(dev_priv)) &&
6656 	    (turn_on || (!needs_scaling(old_plane_state) &&
6657 			 needs_scaling(plane_state))))
6658 		crtc_state->disable_lp_wm = true;
6659 
6660 	return 0;
6661 }
6662 
encoders_cloneable(const struct intel_encoder * a,const struct intel_encoder * b)6663 static bool encoders_cloneable(const struct intel_encoder *a,
6664 			       const struct intel_encoder *b)
6665 {
6666 	/* masks could be asymmetric, so check both ways */
6667 	return a == b || (a->cloneable & (1 << b->type) &&
6668 			  b->cloneable & (1 << a->type));
6669 }
6670 
check_single_encoder_cloning(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_encoder * encoder)6671 static bool check_single_encoder_cloning(struct intel_atomic_state *state,
6672 					 struct intel_crtc *crtc,
6673 					 struct intel_encoder *encoder)
6674 {
6675 	struct intel_encoder *source_encoder;
6676 	struct drm_connector *connector;
6677 	struct drm_connector_state *connector_state;
6678 	int i;
6679 
6680 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
6681 		if (connector_state->crtc != &crtc->base)
6682 			continue;
6683 
6684 		source_encoder =
6685 			to_intel_encoder(connector_state->best_encoder);
6686 		if (!encoders_cloneable(encoder, source_encoder))
6687 			return false;
6688 	}
6689 
6690 	return true;
6691 }
6692 
icl_add_linked_planes(struct intel_atomic_state * state)6693 static int icl_add_linked_planes(struct intel_atomic_state *state)
6694 {
6695 	struct intel_plane *plane, *linked;
6696 	struct intel_plane_state *plane_state, *linked_plane_state;
6697 	int i;
6698 
6699 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6700 		linked = plane_state->planar_linked_plane;
6701 
6702 		if (!linked)
6703 			continue;
6704 
6705 		linked_plane_state = intel_atomic_get_plane_state(state, linked);
6706 		if (IS_ERR(linked_plane_state))
6707 			return PTR_ERR(linked_plane_state);
6708 
6709 		drm_WARN_ON(state->base.dev,
6710 			    linked_plane_state->planar_linked_plane != plane);
6711 		drm_WARN_ON(state->base.dev,
6712 			    linked_plane_state->planar_slave == plane_state->planar_slave);
6713 	}
6714 
6715 	return 0;
6716 }
6717 
icl_check_nv12_planes(struct intel_crtc_state * crtc_state)6718 static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
6719 {
6720 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6721 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6722 	struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
6723 	struct intel_plane *plane, *linked;
6724 	struct intel_plane_state *plane_state;
6725 	int i;
6726 
6727 	if (DISPLAY_VER(dev_priv) < 11)
6728 		return 0;
6729 
6730 	/*
6731 	 * Destroy all old plane links and make the slave plane invisible
6732 	 * in the crtc_state->active_planes mask.
6733 	 */
6734 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6735 		if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane)
6736 			continue;
6737 
6738 		plane_state->planar_linked_plane = NULL;
6739 		if (plane_state->planar_slave && !plane_state->uapi.visible) {
6740 			crtc_state->enabled_planes &= ~BIT(plane->id);
6741 			crtc_state->active_planes &= ~BIT(plane->id);
6742 			crtc_state->update_planes |= BIT(plane->id);
6743 		}
6744 
6745 		plane_state->planar_slave = false;
6746 	}
6747 
6748 	if (!crtc_state->nv12_planes)
6749 		return 0;
6750 
6751 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
6752 		struct intel_plane_state *linked_state = NULL;
6753 
6754 		if (plane->pipe != crtc->pipe ||
6755 		    !(crtc_state->nv12_planes & BIT(plane->id)))
6756 			continue;
6757 
6758 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
6759 			if (!icl_is_nv12_y_plane(dev_priv, linked->id))
6760 				continue;
6761 
6762 			if (crtc_state->active_planes & BIT(linked->id))
6763 				continue;
6764 
6765 			linked_state = intel_atomic_get_plane_state(state, linked);
6766 			if (IS_ERR(linked_state))
6767 				return PTR_ERR(linked_state);
6768 
6769 			break;
6770 		}
6771 
6772 		if (!linked_state) {
6773 			drm_dbg_kms(&dev_priv->drm,
6774 				    "Need %d free Y planes for planar YUV\n",
6775 				    hweight8(crtc_state->nv12_planes));
6776 
6777 			return -EINVAL;
6778 		}
6779 
6780 		plane_state->planar_linked_plane = linked;
6781 
6782 		linked_state->planar_slave = true;
6783 		linked_state->planar_linked_plane = plane;
6784 		crtc_state->enabled_planes |= BIT(linked->id);
6785 		crtc_state->active_planes |= BIT(linked->id);
6786 		crtc_state->update_planes |= BIT(linked->id);
6787 		drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
6788 			    linked->base.name, plane->base.name);
6789 
6790 		/* Copy parameters to slave plane */
6791 		linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
6792 		linked_state->color_ctl = plane_state->color_ctl;
6793 		linked_state->view = plane_state->view;
6794 
6795 		intel_plane_copy_hw_state(linked_state, plane_state);
6796 		linked_state->uapi.src = plane_state->uapi.src;
6797 		linked_state->uapi.dst = plane_state->uapi.dst;
6798 
6799 		if (icl_is_hdr_plane(dev_priv, plane->id)) {
6800 			if (linked->id == PLANE_SPRITE5)
6801 				plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
6802 			else if (linked->id == PLANE_SPRITE4)
6803 				plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
6804 			else if (linked->id == PLANE_SPRITE3)
6805 				plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
6806 			else if (linked->id == PLANE_SPRITE2)
6807 				plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
6808 			else
6809 				MISSING_CASE(linked->id);
6810 		}
6811 	}
6812 
6813 	return 0;
6814 }
6815 
c8_planes_changed(const struct intel_crtc_state * new_crtc_state)6816 static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
6817 {
6818 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
6819 	struct intel_atomic_state *state =
6820 		to_intel_atomic_state(new_crtc_state->uapi.state);
6821 	const struct intel_crtc_state *old_crtc_state =
6822 		intel_atomic_get_old_crtc_state(state, crtc);
6823 
6824 	return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
6825 }
6826 
hsw_linetime_wm(const struct intel_crtc_state * crtc_state)6827 static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
6828 {
6829 	const struct drm_display_mode *pipe_mode =
6830 		&crtc_state->hw.pipe_mode;
6831 	int linetime_wm;
6832 
6833 	if (!crtc_state->hw.enable)
6834 		return 0;
6835 
6836 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6837 					pipe_mode->crtc_clock);
6838 
6839 	return min(linetime_wm, 0x1ff);
6840 }
6841 
hsw_ips_linetime_wm(const struct intel_crtc_state * crtc_state,const struct intel_cdclk_state * cdclk_state)6842 static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
6843 			       const struct intel_cdclk_state *cdclk_state)
6844 {
6845 	const struct drm_display_mode *pipe_mode =
6846 		&crtc_state->hw.pipe_mode;
6847 	int linetime_wm;
6848 
6849 	if (!crtc_state->hw.enable)
6850 		return 0;
6851 
6852 	linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8,
6853 					cdclk_state->logical.cdclk);
6854 
6855 	return min(linetime_wm, 0x1ff);
6856 }
6857 
skl_linetime_wm(const struct intel_crtc_state * crtc_state)6858 static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
6859 {
6860 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
6861 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6862 	const struct drm_display_mode *pipe_mode =
6863 		&crtc_state->hw.pipe_mode;
6864 	int linetime_wm;
6865 
6866 	if (!crtc_state->hw.enable)
6867 		return 0;
6868 
6869 	linetime_wm = DIV_ROUND_UP(pipe_mode->crtc_htotal * 1000 * 8,
6870 				   crtc_state->pixel_rate);
6871 
6872 	/* Display WA #1135: BXT:ALL GLK:ALL */
6873 	if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
6874 		linetime_wm /= 2;
6875 
6876 	return min(linetime_wm, 0x1ff);
6877 }
6878 
hsw_compute_linetime_wm(struct intel_atomic_state * state,struct intel_crtc * crtc)6879 static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
6880 				   struct intel_crtc *crtc)
6881 {
6882 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6883 	struct intel_crtc_state *crtc_state =
6884 		intel_atomic_get_new_crtc_state(state, crtc);
6885 	const struct intel_cdclk_state *cdclk_state;
6886 
6887 	if (DISPLAY_VER(dev_priv) >= 9)
6888 		crtc_state->linetime = skl_linetime_wm(crtc_state);
6889 	else
6890 		crtc_state->linetime = hsw_linetime_wm(crtc_state);
6891 
6892 	if (!hsw_crtc_supports_ips(crtc))
6893 		return 0;
6894 
6895 	cdclk_state = intel_atomic_get_cdclk_state(state);
6896 	if (IS_ERR(cdclk_state))
6897 		return PTR_ERR(cdclk_state);
6898 
6899 	crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
6900 						       cdclk_state);
6901 
6902 	return 0;
6903 }
6904 
intel_crtc_atomic_check(struct intel_atomic_state * state,struct intel_crtc * crtc)6905 static int intel_crtc_atomic_check(struct intel_atomic_state *state,
6906 				   struct intel_crtc *crtc)
6907 {
6908 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6909 	struct intel_crtc_state *crtc_state =
6910 		intel_atomic_get_new_crtc_state(state, crtc);
6911 	bool mode_changed = intel_crtc_needs_modeset(crtc_state);
6912 	int ret;
6913 
6914 	if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv) &&
6915 	    mode_changed && !crtc_state->hw.active)
6916 		crtc_state->update_wm_post = true;
6917 
6918 	if (mode_changed && crtc_state->hw.enable &&
6919 	    dev_priv->display.crtc_compute_clock &&
6920 	    !crtc_state->bigjoiner_slave &&
6921 	    !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
6922 		ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
6923 		if (ret)
6924 			return ret;
6925 	}
6926 
6927 	/*
6928 	 * May need to update pipe gamma enable bits
6929 	 * when C8 planes are getting enabled/disabled.
6930 	 */
6931 	if (c8_planes_changed(crtc_state))
6932 		crtc_state->uapi.color_mgmt_changed = true;
6933 
6934 	if (mode_changed || crtc_state->update_pipe ||
6935 	    crtc_state->uapi.color_mgmt_changed) {
6936 		ret = intel_color_check(crtc_state);
6937 		if (ret)
6938 			return ret;
6939 	}
6940 
6941 	if (dev_priv->display.compute_pipe_wm) {
6942 		ret = dev_priv->display.compute_pipe_wm(crtc_state);
6943 		if (ret) {
6944 			drm_dbg_kms(&dev_priv->drm,
6945 				    "Target pipe watermarks are invalid\n");
6946 			return ret;
6947 		}
6948 	}
6949 
6950 	if (dev_priv->display.compute_intermediate_wm) {
6951 		if (drm_WARN_ON(&dev_priv->drm,
6952 				!dev_priv->display.compute_pipe_wm))
6953 			return 0;
6954 
6955 		/*
6956 		 * Calculate 'intermediate' watermarks that satisfy both the
6957 		 * old state and the new state.  We can program these
6958 		 * immediately.
6959 		 */
6960 		ret = dev_priv->display.compute_intermediate_wm(crtc_state);
6961 		if (ret) {
6962 			drm_dbg_kms(&dev_priv->drm,
6963 				    "No valid intermediate pipe watermarks are possible\n");
6964 			return ret;
6965 		}
6966 	}
6967 
6968 	if (DISPLAY_VER(dev_priv) >= 9) {
6969 		if (mode_changed || crtc_state->update_pipe) {
6970 			ret = skl_update_scaler_crtc(crtc_state);
6971 			if (ret)
6972 				return ret;
6973 		}
6974 
6975 		ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
6976 		if (ret)
6977 			return ret;
6978 	}
6979 
6980 	if (HAS_IPS(dev_priv)) {
6981 		ret = hsw_compute_ips_config(crtc_state);
6982 		if (ret)
6983 			return ret;
6984 	}
6985 
6986 	if (DISPLAY_VER(dev_priv) >= 9 ||
6987 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
6988 		ret = hsw_compute_linetime_wm(state, crtc);
6989 		if (ret)
6990 			return ret;
6991 
6992 	}
6993 
6994 	if (!mode_changed) {
6995 		ret = intel_psr2_sel_fetch_update(state, crtc);
6996 		if (ret)
6997 			return ret;
6998 	}
6999 
7000 	return 0;
7001 }
7002 
intel_modeset_update_connector_atomic_state(struct drm_device * dev)7003 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
7004 {
7005 	struct intel_connector *connector;
7006 	struct drm_connector_list_iter conn_iter;
7007 
7008 	drm_connector_list_iter_begin(dev, &conn_iter);
7009 	for_each_intel_connector_iter(connector, &conn_iter) {
7010 		struct drm_connector_state *conn_state = connector->base.state;
7011 		struct intel_encoder *encoder =
7012 			to_intel_encoder(connector->base.encoder);
7013 
7014 		if (conn_state->crtc)
7015 			drm_connector_put(&connector->base);
7016 
7017 		if (encoder) {
7018 			struct intel_crtc *crtc =
7019 				to_intel_crtc(encoder->base.crtc);
7020 			const struct intel_crtc_state *crtc_state =
7021 				to_intel_crtc_state(crtc->base.state);
7022 
7023 			conn_state->best_encoder = &encoder->base;
7024 			conn_state->crtc = &crtc->base;
7025 			conn_state->max_bpc = (crtc_state->pipe_bpp ?: 24) / 3;
7026 
7027 			drm_connector_get(&connector->base);
7028 		} else {
7029 			conn_state->best_encoder = NULL;
7030 			conn_state->crtc = NULL;
7031 		}
7032 	}
7033 	drm_connector_list_iter_end(&conn_iter);
7034 }
7035 
7036 static int
compute_sink_pipe_bpp(const struct drm_connector_state * conn_state,struct intel_crtc_state * pipe_config)7037 compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
7038 		      struct intel_crtc_state *pipe_config)
7039 {
7040 	struct drm_connector *connector = conn_state->connector;
7041 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7042 	const struct drm_display_info *info = &connector->display_info;
7043 	int bpp;
7044 
7045 	switch (conn_state->max_bpc) {
7046 	case 6 ... 7:
7047 		bpp = 6 * 3;
7048 		break;
7049 	case 8 ... 9:
7050 		bpp = 8 * 3;
7051 		break;
7052 	case 10 ... 11:
7053 		bpp = 10 * 3;
7054 		break;
7055 	case 12 ... 16:
7056 		bpp = 12 * 3;
7057 		break;
7058 	default:
7059 		MISSING_CASE(conn_state->max_bpc);
7060 		return -EINVAL;
7061 	}
7062 
7063 	if (bpp < pipe_config->pipe_bpp) {
7064 		drm_dbg_kms(&i915->drm,
7065 			    "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
7066 			    "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
7067 			    connector->base.id, connector->name,
7068 			    bpp, 3 * info->bpc,
7069 			    3 * conn_state->max_requested_bpc,
7070 			    pipe_config->pipe_bpp);
7071 
7072 		pipe_config->pipe_bpp = bpp;
7073 	}
7074 
7075 	return 0;
7076 }
7077 
7078 static int
compute_baseline_pipe_bpp(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)7079 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
7080 			  struct intel_crtc_state *pipe_config)
7081 {
7082 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7083 	struct drm_atomic_state *state = pipe_config->uapi.state;
7084 	struct drm_connector *connector;
7085 	struct drm_connector_state *connector_state;
7086 	int bpp, i;
7087 
7088 	if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
7089 	    IS_CHERRYVIEW(dev_priv)))
7090 		bpp = 10*3;
7091 	else if (DISPLAY_VER(dev_priv) >= 5)
7092 		bpp = 12*3;
7093 	else
7094 		bpp = 8*3;
7095 
7096 	pipe_config->pipe_bpp = bpp;
7097 
7098 	/* Clamp display bpp to connector max bpp */
7099 	for_each_new_connector_in_state(state, connector, connector_state, i) {
7100 		int ret;
7101 
7102 		if (connector_state->crtc != &crtc->base)
7103 			continue;
7104 
7105 		ret = compute_sink_pipe_bpp(connector_state, pipe_config);
7106 		if (ret)
7107 			return ret;
7108 	}
7109 
7110 	return 0;
7111 }
7112 
intel_dump_crtc_timings(struct drm_i915_private * i915,const struct drm_display_mode * mode)7113 static void intel_dump_crtc_timings(struct drm_i915_private *i915,
7114 				    const struct drm_display_mode *mode)
7115 {
7116 	drm_dbg_kms(&i915->drm, "crtc timings: %d %d %d %d %d %d %d %d %d, "
7117 		    "type: 0x%x flags: 0x%x\n",
7118 		    mode->crtc_clock,
7119 		    mode->crtc_hdisplay, mode->crtc_hsync_start,
7120 		    mode->crtc_hsync_end, mode->crtc_htotal,
7121 		    mode->crtc_vdisplay, mode->crtc_vsync_start,
7122 		    mode->crtc_vsync_end, mode->crtc_vtotal,
7123 		    mode->type, mode->flags);
7124 }
7125 
7126 static void
intel_dump_m_n_config(const struct intel_crtc_state * pipe_config,const char * id,unsigned int lane_count,const struct intel_link_m_n * m_n)7127 intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
7128 		      const char *id, unsigned int lane_count,
7129 		      const struct intel_link_m_n *m_n)
7130 {
7131 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7132 
7133 	drm_dbg_kms(&i915->drm,
7134 		    "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
7135 		    id, lane_count,
7136 		    m_n->gmch_m, m_n->gmch_n,
7137 		    m_n->link_m, m_n->link_n, m_n->tu);
7138 }
7139 
7140 static void
intel_dump_infoframe(struct drm_i915_private * dev_priv,const union hdmi_infoframe * frame)7141 intel_dump_infoframe(struct drm_i915_private *dev_priv,
7142 		     const union hdmi_infoframe *frame)
7143 {
7144 	if (!drm_debug_enabled(DRM_UT_KMS))
7145 		return;
7146 
7147 	hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
7148 }
7149 
7150 static void
intel_dump_dp_vsc_sdp(struct drm_i915_private * dev_priv,const struct drm_dp_vsc_sdp * vsc)7151 intel_dump_dp_vsc_sdp(struct drm_i915_private *dev_priv,
7152 		      const struct drm_dp_vsc_sdp *vsc)
7153 {
7154 	if (!drm_debug_enabled(DRM_UT_KMS))
7155 		return;
7156 
7157 	drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, vsc);
7158 }
7159 
7160 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
7161 
7162 static const char * const output_type_str[] = {
7163 	OUTPUT_TYPE(UNUSED),
7164 	OUTPUT_TYPE(ANALOG),
7165 	OUTPUT_TYPE(DVO),
7166 	OUTPUT_TYPE(SDVO),
7167 	OUTPUT_TYPE(LVDS),
7168 	OUTPUT_TYPE(TVOUT),
7169 	OUTPUT_TYPE(HDMI),
7170 	OUTPUT_TYPE(DP),
7171 	OUTPUT_TYPE(EDP),
7172 	OUTPUT_TYPE(DSI),
7173 	OUTPUT_TYPE(DDI),
7174 	OUTPUT_TYPE(DP_MST),
7175 };
7176 
7177 #undef OUTPUT_TYPE
7178 
snprintf_output_types(char * buf,size_t len,unsigned int output_types)7179 static void snprintf_output_types(char *buf, size_t len,
7180 				  unsigned int output_types)
7181 {
7182 	char *str = buf;
7183 	int i;
7184 
7185 	str[0] = '\0';
7186 
7187 	for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
7188 		int r;
7189 
7190 		if ((output_types & BIT(i)) == 0)
7191 			continue;
7192 
7193 		r = snprintf(str, len, "%s%s",
7194 			     str != buf ? "," : "", output_type_str[i]);
7195 		if (r >= len)
7196 			break;
7197 		str += r;
7198 		len -= r;
7199 
7200 		output_types &= ~BIT(i);
7201 	}
7202 
7203 	WARN_ON_ONCE(output_types != 0);
7204 }
7205 
7206 static const char * const output_format_str[] = {
7207 	[INTEL_OUTPUT_FORMAT_RGB] = "RGB",
7208 	[INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
7209 	[INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
7210 };
7211 
output_formats(enum intel_output_format format)7212 static const char *output_formats(enum intel_output_format format)
7213 {
7214 	if (format >= ARRAY_SIZE(output_format_str))
7215 		return "invalid";
7216 	return output_format_str[format];
7217 }
7218 
intel_dump_plane_state(const struct intel_plane_state * plane_state)7219 static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
7220 {
7221 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
7222 	struct drm_i915_private *i915 = to_i915(plane->base.dev);
7223 	const struct drm_framebuffer *fb = plane_state->hw.fb;
7224 
7225 	if (!fb) {
7226 		drm_dbg_kms(&i915->drm,
7227 			    "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
7228 			    plane->base.base.id, plane->base.name,
7229 			    yesno(plane_state->uapi.visible));
7230 		return;
7231 	}
7232 
7233 	drm_dbg_kms(&i915->drm,
7234 		    "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
7235 		    plane->base.base.id, plane->base.name,
7236 		    fb->base.id, fb->width, fb->height, &fb->format->format,
7237 		    fb->modifier, yesno(plane_state->uapi.visible));
7238 	drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
7239 		    plane_state->hw.rotation, plane_state->scaler_id);
7240 	if (plane_state->uapi.visible)
7241 		drm_dbg_kms(&i915->drm,
7242 			    "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
7243 			    DRM_RECT_FP_ARG(&plane_state->uapi.src),
7244 			    DRM_RECT_ARG(&plane_state->uapi.dst));
7245 }
7246 
intel_dump_pipe_config(const struct intel_crtc_state * pipe_config,struct intel_atomic_state * state,const char * context)7247 static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
7248 				   struct intel_atomic_state *state,
7249 				   const char *context)
7250 {
7251 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7252 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7253 	const struct intel_plane_state *plane_state;
7254 	struct intel_plane *plane;
7255 	char buf[64];
7256 	int i;
7257 
7258 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
7259 		    crtc->base.base.id, crtc->base.name,
7260 		    yesno(pipe_config->hw.enable), context);
7261 
7262 	if (!pipe_config->hw.enable)
7263 		goto dump_planes;
7264 
7265 	snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
7266 	drm_dbg_kms(&dev_priv->drm,
7267 		    "active: %s, output_types: %s (0x%x), output format: %s\n",
7268 		    yesno(pipe_config->hw.active),
7269 		    buf, pipe_config->output_types,
7270 		    output_formats(pipe_config->output_format));
7271 
7272 	drm_dbg_kms(&dev_priv->drm,
7273 		    "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
7274 		    transcoder_name(pipe_config->cpu_transcoder),
7275 		    pipe_config->pipe_bpp, pipe_config->dither);
7276 
7277 	drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
7278 		    transcoder_name(pipe_config->mst_master_transcoder));
7279 
7280 	drm_dbg_kms(&dev_priv->drm,
7281 		    "port sync: master transcoder: %s, slave transcoder bitmask = 0x%x\n",
7282 		    transcoder_name(pipe_config->master_transcoder),
7283 		    pipe_config->sync_mode_slaves_mask);
7284 
7285 	drm_dbg_kms(&dev_priv->drm, "bigjoiner: %s\n",
7286 		    pipe_config->bigjoiner_slave ? "slave" :
7287 		    pipe_config->bigjoiner ? "master" : "no");
7288 
7289 	drm_dbg_kms(&dev_priv->drm, "splitter: %s, link count %d, overlap %d\n",
7290 		    enableddisabled(pipe_config->splitter.enable),
7291 		    pipe_config->splitter.link_count,
7292 		    pipe_config->splitter.pixel_overlap);
7293 
7294 	if (pipe_config->has_pch_encoder)
7295 		intel_dump_m_n_config(pipe_config, "fdi",
7296 				      pipe_config->fdi_lanes,
7297 				      &pipe_config->fdi_m_n);
7298 
7299 	if (intel_crtc_has_dp_encoder(pipe_config)) {
7300 		intel_dump_m_n_config(pipe_config, "dp m_n",
7301 				pipe_config->lane_count, &pipe_config->dp_m_n);
7302 		if (pipe_config->has_drrs)
7303 			intel_dump_m_n_config(pipe_config, "dp m2_n2",
7304 					      pipe_config->lane_count,
7305 					      &pipe_config->dp_m2_n2);
7306 	}
7307 
7308 	drm_dbg_kms(&dev_priv->drm,
7309 		    "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
7310 		    pipe_config->has_audio, pipe_config->has_infoframe,
7311 		    pipe_config->infoframes.enable);
7312 
7313 	if (pipe_config->infoframes.enable &
7314 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
7315 		drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
7316 			    pipe_config->infoframes.gcp);
7317 	if (pipe_config->infoframes.enable &
7318 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
7319 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
7320 	if (pipe_config->infoframes.enable &
7321 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
7322 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
7323 	if (pipe_config->infoframes.enable &
7324 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
7325 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
7326 	if (pipe_config->infoframes.enable &
7327 	    intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_DRM))
7328 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7329 	if (pipe_config->infoframes.enable &
7330 	    intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
7331 		intel_dump_infoframe(dev_priv, &pipe_config->infoframes.drm);
7332 	if (pipe_config->infoframes.enable &
7333 	    intel_hdmi_infoframe_enable(DP_SDP_VSC))
7334 		intel_dump_dp_vsc_sdp(dev_priv, &pipe_config->infoframes.vsc);
7335 
7336 	drm_dbg_kms(&dev_priv->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
7337 		    yesno(pipe_config->vrr.enable),
7338 		    pipe_config->vrr.vmin, pipe_config->vrr.vmax,
7339 		    pipe_config->vrr.pipeline_full, pipe_config->vrr.flipline,
7340 		    intel_vrr_vmin_vblank_start(pipe_config),
7341 		    intel_vrr_vmax_vblank_start(pipe_config));
7342 
7343 	drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
7344 	drm_mode_debug_printmodeline(&pipe_config->hw.mode);
7345 	drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
7346 	drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
7347 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.adjusted_mode);
7348 	drm_dbg_kms(&dev_priv->drm, "pipe mode:\n");
7349 	drm_mode_debug_printmodeline(&pipe_config->hw.pipe_mode);
7350 	intel_dump_crtc_timings(dev_priv, &pipe_config->hw.pipe_mode);
7351 	drm_dbg_kms(&dev_priv->drm,
7352 		    "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
7353 		    pipe_config->port_clock,
7354 		    pipe_config->pipe_src_w, pipe_config->pipe_src_h,
7355 		    pipe_config->pixel_rate);
7356 
7357 	drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
7358 		    pipe_config->linetime, pipe_config->ips_linetime);
7359 
7360 	if (DISPLAY_VER(dev_priv) >= 9)
7361 		drm_dbg_kms(&dev_priv->drm,
7362 			    "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
7363 			    crtc->num_scalers,
7364 			    pipe_config->scaler_state.scaler_users,
7365 			    pipe_config->scaler_state.scaler_id);
7366 
7367 	if (HAS_GMCH(dev_priv))
7368 		drm_dbg_kms(&dev_priv->drm,
7369 			    "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
7370 			    pipe_config->gmch_pfit.control,
7371 			    pipe_config->gmch_pfit.pgm_ratios,
7372 			    pipe_config->gmch_pfit.lvds_border_bits);
7373 	else
7374 		drm_dbg_kms(&dev_priv->drm,
7375 			    "pch pfit: " DRM_RECT_FMT ", %s, force thru: %s\n",
7376 			    DRM_RECT_ARG(&pipe_config->pch_pfit.dst),
7377 			    enableddisabled(pipe_config->pch_pfit.enabled),
7378 			    yesno(pipe_config->pch_pfit.force_thru));
7379 
7380 	drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
7381 		    pipe_config->ips_enabled, pipe_config->double_wide);
7382 
7383 	intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
7384 
7385 	if (IS_CHERRYVIEW(dev_priv))
7386 		drm_dbg_kms(&dev_priv->drm,
7387 			    "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7388 			    pipe_config->cgm_mode, pipe_config->gamma_mode,
7389 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7390 	else
7391 		drm_dbg_kms(&dev_priv->drm,
7392 			    "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
7393 			    pipe_config->csc_mode, pipe_config->gamma_mode,
7394 			    pipe_config->gamma_enable, pipe_config->csc_enable);
7395 
7396 	drm_dbg_kms(&dev_priv->drm, "degamma lut: %d entries, gamma lut: %d entries\n",
7397 		    pipe_config->hw.degamma_lut ?
7398 		    drm_color_lut_size(pipe_config->hw.degamma_lut) : 0,
7399 		    pipe_config->hw.gamma_lut ?
7400 		    drm_color_lut_size(pipe_config->hw.gamma_lut) : 0);
7401 
7402 dump_planes:
7403 	if (!state)
7404 		return;
7405 
7406 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
7407 		if (plane->pipe == crtc->pipe)
7408 			intel_dump_plane_state(plane_state);
7409 	}
7410 }
7411 
check_digital_port_conflicts(struct intel_atomic_state * state)7412 static bool check_digital_port_conflicts(struct intel_atomic_state *state)
7413 {
7414 	struct drm_device *dev = state->base.dev;
7415 	struct drm_connector *connector;
7416 	struct drm_connector_list_iter conn_iter;
7417 	unsigned int used_ports = 0;
7418 	unsigned int used_mst_ports = 0;
7419 	bool ret = true;
7420 
7421 	/*
7422 	 * We're going to peek into connector->state,
7423 	 * hence connection_mutex must be held.
7424 	 */
7425 	drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex);
7426 
7427 	/*
7428 	 * Walk the connector list instead of the encoder
7429 	 * list to detect the problem on ddi platforms
7430 	 * where there's just one encoder per digital port.
7431 	 */
7432 	drm_connector_list_iter_begin(dev, &conn_iter);
7433 	drm_for_each_connector_iter(connector, &conn_iter) {
7434 		struct drm_connector_state *connector_state;
7435 		struct intel_encoder *encoder;
7436 
7437 		connector_state =
7438 			drm_atomic_get_new_connector_state(&state->base,
7439 							   connector);
7440 		if (!connector_state)
7441 			connector_state = connector->state;
7442 
7443 		if (!connector_state->best_encoder)
7444 			continue;
7445 
7446 		encoder = to_intel_encoder(connector_state->best_encoder);
7447 
7448 		drm_WARN_ON(dev, !connector_state->crtc);
7449 
7450 		switch (encoder->type) {
7451 		case INTEL_OUTPUT_DDI:
7452 			if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
7453 				break;
7454 			fallthrough;
7455 		case INTEL_OUTPUT_DP:
7456 		case INTEL_OUTPUT_HDMI:
7457 		case INTEL_OUTPUT_EDP:
7458 			/* the same port mustn't appear more than once */
7459 			if (used_ports & BIT(encoder->port))
7460 				ret = false;
7461 
7462 			used_ports |= BIT(encoder->port);
7463 			break;
7464 		case INTEL_OUTPUT_DP_MST:
7465 			used_mst_ports |=
7466 				1 << encoder->port;
7467 			break;
7468 		default:
7469 			break;
7470 		}
7471 	}
7472 	drm_connector_list_iter_end(&conn_iter);
7473 
7474 	/* can't mix MST and SST/HDMI on the same port */
7475 	if (used_ports & used_mst_ports)
7476 		return false;
7477 
7478 	return ret;
7479 }
7480 
7481 static void
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7482 intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
7483 					   struct intel_crtc_state *crtc_state)
7484 {
7485 	const struct intel_crtc_state *from_crtc_state = crtc_state;
7486 
7487 	if (crtc_state->bigjoiner_slave) {
7488 		from_crtc_state = intel_atomic_get_new_crtc_state(state,
7489 								  crtc_state->bigjoiner_linked_crtc);
7490 
7491 		/* No need to copy state if the master state is unchanged */
7492 		if (!from_crtc_state)
7493 			return;
7494 	}
7495 
7496 	intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
7497 }
7498 
7499 static void
intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7500 intel_crtc_copy_uapi_to_hw_state(struct intel_atomic_state *state,
7501 				 struct intel_crtc_state *crtc_state)
7502 {
7503 	crtc_state->hw.enable = crtc_state->uapi.enable;
7504 	crtc_state->hw.active = crtc_state->uapi.active;
7505 	crtc_state->hw.mode = crtc_state->uapi.mode;
7506 	crtc_state->hw.adjusted_mode = crtc_state->uapi.adjusted_mode;
7507 	crtc_state->hw.scaling_filter = crtc_state->uapi.scaling_filter;
7508 
7509 	intel_crtc_copy_uapi_to_hw_state_nomodeset(state, crtc_state);
7510 }
7511 
intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state * crtc_state)7512 static void intel_crtc_copy_hw_to_uapi_state(struct intel_crtc_state *crtc_state)
7513 {
7514 	if (crtc_state->bigjoiner_slave)
7515 		return;
7516 
7517 	crtc_state->uapi.enable = crtc_state->hw.enable;
7518 	crtc_state->uapi.active = crtc_state->hw.active;
7519 	drm_WARN_ON(crtc_state->uapi.crtc->dev,
7520 		    drm_atomic_set_mode_for_crtc(&crtc_state->uapi, &crtc_state->hw.mode) < 0);
7521 
7522 	crtc_state->uapi.adjusted_mode = crtc_state->hw.adjusted_mode;
7523 	crtc_state->uapi.scaling_filter = crtc_state->hw.scaling_filter;
7524 
7525 	/* copy color blobs to uapi */
7526 	drm_property_replace_blob(&crtc_state->uapi.degamma_lut,
7527 				  crtc_state->hw.degamma_lut);
7528 	drm_property_replace_blob(&crtc_state->uapi.gamma_lut,
7529 				  crtc_state->hw.gamma_lut);
7530 	drm_property_replace_blob(&crtc_state->uapi.ctm,
7531 				  crtc_state->hw.ctm);
7532 }
7533 
7534 static int
copy_bigjoiner_crtc_state(struct intel_crtc_state * crtc_state,const struct intel_crtc_state * from_crtc_state)7535 copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state,
7536 			  const struct intel_crtc_state *from_crtc_state)
7537 {
7538 	struct intel_crtc_state *saved_state;
7539 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7540 
7541 	saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
7542 	if (!saved_state)
7543 		return -ENOMEM;
7544 
7545 	saved_state->uapi = crtc_state->uapi;
7546 	saved_state->scaler_state = crtc_state->scaler_state;
7547 	saved_state->shared_dpll = crtc_state->shared_dpll;
7548 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7549 	saved_state->crc_enabled = crtc_state->crc_enabled;
7550 
7551 	intel_crtc_free_hw_state(crtc_state);
7552 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7553 	kfree(saved_state);
7554 
7555 	/* Re-init hw state */
7556 	memset(&crtc_state->hw, 0, sizeof(saved_state->hw));
7557 	crtc_state->hw.enable = from_crtc_state->hw.enable;
7558 	crtc_state->hw.active = from_crtc_state->hw.active;
7559 	crtc_state->hw.pipe_mode = from_crtc_state->hw.pipe_mode;
7560 	crtc_state->hw.adjusted_mode = from_crtc_state->hw.adjusted_mode;
7561 
7562 	/* Some fixups */
7563 	crtc_state->uapi.mode_changed = from_crtc_state->uapi.mode_changed;
7564 	crtc_state->uapi.connectors_changed = from_crtc_state->uapi.connectors_changed;
7565 	crtc_state->uapi.active_changed = from_crtc_state->uapi.active_changed;
7566 	crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
7567 	crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
7568 	crtc_state->bigjoiner_slave = true;
7569 	crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
7570 	crtc_state->has_audio = false;
7571 
7572 	return 0;
7573 }
7574 
7575 static int
intel_crtc_prepare_cleared_state(struct intel_atomic_state * state,struct intel_crtc_state * crtc_state)7576 intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
7577 				 struct intel_crtc_state *crtc_state)
7578 {
7579 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7580 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7581 	struct intel_crtc_state *saved_state;
7582 
7583 	saved_state = intel_crtc_state_alloc(crtc);
7584 	if (!saved_state)
7585 		return -ENOMEM;
7586 
7587 	/* free the old crtc_state->hw members */
7588 	intel_crtc_free_hw_state(crtc_state);
7589 
7590 	/* FIXME: before the switch to atomic started, a new pipe_config was
7591 	 * kzalloc'd. Code that depends on any field being zero should be
7592 	 * fixed, so that the crtc_state can be safely duplicated. For now,
7593 	 * only fields that are know to not cause problems are preserved. */
7594 
7595 	saved_state->uapi = crtc_state->uapi;
7596 	saved_state->scaler_state = crtc_state->scaler_state;
7597 	saved_state->shared_dpll = crtc_state->shared_dpll;
7598 	saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
7599 	memcpy(saved_state->icl_port_dplls, crtc_state->icl_port_dplls,
7600 	       sizeof(saved_state->icl_port_dplls));
7601 	saved_state->crc_enabled = crtc_state->crc_enabled;
7602 	if (IS_G4X(dev_priv) ||
7603 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7604 		saved_state->wm = crtc_state->wm;
7605 
7606 	memcpy(crtc_state, saved_state, sizeof(*crtc_state));
7607 	kfree(saved_state);
7608 
7609 	intel_crtc_copy_uapi_to_hw_state(state, crtc_state);
7610 
7611 	return 0;
7612 }
7613 
7614 static int
intel_modeset_pipe_config(struct intel_atomic_state * state,struct intel_crtc_state * pipe_config)7615 intel_modeset_pipe_config(struct intel_atomic_state *state,
7616 			  struct intel_crtc_state *pipe_config)
7617 {
7618 	struct drm_crtc *crtc = pipe_config->uapi.crtc;
7619 	struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
7620 	struct drm_connector *connector;
7621 	struct drm_connector_state *connector_state;
7622 	int base_bpp, ret, i;
7623 	bool retry = true;
7624 
7625 	pipe_config->cpu_transcoder =
7626 		(enum transcoder) to_intel_crtc(crtc)->pipe;
7627 
7628 	/*
7629 	 * Sanitize sync polarity flags based on requested ones. If neither
7630 	 * positive or negative polarity is requested, treat this as meaning
7631 	 * negative polarity.
7632 	 */
7633 	if (!(pipe_config->hw.adjusted_mode.flags &
7634 	      (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
7635 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
7636 
7637 	if (!(pipe_config->hw.adjusted_mode.flags &
7638 	      (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
7639 		pipe_config->hw.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
7640 
7641 	ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
7642 					pipe_config);
7643 	if (ret)
7644 		return ret;
7645 
7646 	base_bpp = pipe_config->pipe_bpp;
7647 
7648 	/*
7649 	 * Determine the real pipe dimensions. Note that stereo modes can
7650 	 * increase the actual pipe size due to the frame doubling and
7651 	 * insertion of additional space for blanks between the frame. This
7652 	 * is stored in the crtc timings. We use the requested mode to do this
7653 	 * computation to clearly distinguish it from the adjusted mode, which
7654 	 * can be changed by the connectors in the below retry loop.
7655 	 */
7656 	drm_mode_get_hv_timing(&pipe_config->hw.mode,
7657 			       &pipe_config->pipe_src_w,
7658 			       &pipe_config->pipe_src_h);
7659 
7660 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7661 		struct intel_encoder *encoder =
7662 			to_intel_encoder(connector_state->best_encoder);
7663 
7664 		if (connector_state->crtc != crtc)
7665 			continue;
7666 
7667 		if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
7668 			drm_dbg_kms(&i915->drm,
7669 				    "rejecting invalid cloning configuration\n");
7670 			return -EINVAL;
7671 		}
7672 
7673 		/*
7674 		 * Determine output_types before calling the .compute_config()
7675 		 * hooks so that the hooks can use this information safely.
7676 		 */
7677 		if (encoder->compute_output_type)
7678 			pipe_config->output_types |=
7679 				BIT(encoder->compute_output_type(encoder, pipe_config,
7680 								 connector_state));
7681 		else
7682 			pipe_config->output_types |= BIT(encoder->type);
7683 	}
7684 
7685 encoder_retry:
7686 	/* Ensure the port clock defaults are reset when retrying. */
7687 	pipe_config->port_clock = 0;
7688 	pipe_config->pixel_multiplier = 1;
7689 
7690 	/* Fill in default crtc timings, allow encoders to overwrite them. */
7691 	drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
7692 			      CRTC_STEREO_DOUBLE);
7693 
7694 	/* Pass our mode to the connectors and the CRTC to give them a chance to
7695 	 * adjust it according to limitations or connector properties, and also
7696 	 * a chance to reject the mode entirely.
7697 	 */
7698 	for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
7699 		struct intel_encoder *encoder =
7700 			to_intel_encoder(connector_state->best_encoder);
7701 
7702 		if (connector_state->crtc != crtc)
7703 			continue;
7704 
7705 		ret = encoder->compute_config(encoder, pipe_config,
7706 					      connector_state);
7707 		if (ret < 0) {
7708 			if (ret != -EDEADLK)
7709 				drm_dbg_kms(&i915->drm,
7710 					    "Encoder config failure: %d\n",
7711 					    ret);
7712 			return ret;
7713 		}
7714 	}
7715 
7716 	/* Set default port clock if not overwritten by the encoder. Needs to be
7717 	 * done afterwards in case the encoder adjusts the mode. */
7718 	if (!pipe_config->port_clock)
7719 		pipe_config->port_clock = pipe_config->hw.adjusted_mode.crtc_clock
7720 			* pipe_config->pixel_multiplier;
7721 
7722 	ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
7723 	if (ret == -EDEADLK)
7724 		return ret;
7725 	if (ret < 0) {
7726 		drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
7727 		return ret;
7728 	}
7729 
7730 	if (ret == I915_DISPLAY_CONFIG_RETRY) {
7731 		if (drm_WARN(&i915->drm, !retry,
7732 			     "loop in pipe configuration computation\n"))
7733 			return -EINVAL;
7734 
7735 		drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
7736 		retry = false;
7737 		goto encoder_retry;
7738 	}
7739 
7740 	/* Dithering seems to not pass-through bits correctly when it should, so
7741 	 * only enable it on 6bpc panels and when its not a compliance
7742 	 * test requesting 6bpc video pattern.
7743 	 */
7744 	pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
7745 		!pipe_config->dither_force_disable;
7746 	drm_dbg_kms(&i915->drm,
7747 		    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
7748 		    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
7749 
7750 	return 0;
7751 }
7752 
7753 static int
intel_modeset_pipe_config_late(struct intel_crtc_state * crtc_state)7754 intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
7755 {
7756 	struct intel_atomic_state *state =
7757 		to_intel_atomic_state(crtc_state->uapi.state);
7758 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
7759 	struct drm_connector_state *conn_state;
7760 	struct drm_connector *connector;
7761 	int i;
7762 
7763 	for_each_new_connector_in_state(&state->base, connector,
7764 					conn_state, i) {
7765 		struct intel_encoder *encoder =
7766 			to_intel_encoder(conn_state->best_encoder);
7767 		int ret;
7768 
7769 		if (conn_state->crtc != &crtc->base ||
7770 		    !encoder->compute_config_late)
7771 			continue;
7772 
7773 		ret = encoder->compute_config_late(encoder, crtc_state,
7774 						   conn_state);
7775 		if (ret)
7776 			return ret;
7777 	}
7778 
7779 	return 0;
7780 }
7781 
intel_fuzzy_clock_check(int clock1,int clock2)7782 bool intel_fuzzy_clock_check(int clock1, int clock2)
7783 {
7784 	int diff;
7785 
7786 	if (clock1 == clock2)
7787 		return true;
7788 
7789 	if (!clock1 || !clock2)
7790 		return false;
7791 
7792 	diff = abs(clock1 - clock2);
7793 
7794 	if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
7795 		return true;
7796 
7797 	return false;
7798 }
7799 
7800 static bool
intel_compare_m_n(unsigned int m,unsigned int n,unsigned int m2,unsigned int n2,bool exact)7801 intel_compare_m_n(unsigned int m, unsigned int n,
7802 		  unsigned int m2, unsigned int n2,
7803 		  bool exact)
7804 {
7805 	if (m == m2 && n == n2)
7806 		return true;
7807 
7808 	if (exact || !m || !n || !m2 || !n2)
7809 		return false;
7810 
7811 	BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
7812 
7813 	if (n > n2) {
7814 		while (n > n2) {
7815 			m2 <<= 1;
7816 			n2 <<= 1;
7817 		}
7818 	} else if (n < n2) {
7819 		while (n < n2) {
7820 			m <<= 1;
7821 			n <<= 1;
7822 		}
7823 	}
7824 
7825 	if (n != n2)
7826 		return false;
7827 
7828 	return intel_fuzzy_clock_check(m, m2);
7829 }
7830 
7831 static bool
intel_compare_link_m_n(const struct intel_link_m_n * m_n,const struct intel_link_m_n * m2_n2,bool exact)7832 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
7833 		       const struct intel_link_m_n *m2_n2,
7834 		       bool exact)
7835 {
7836 	return m_n->tu == m2_n2->tu &&
7837 		intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
7838 				  m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
7839 		intel_compare_m_n(m_n->link_m, m_n->link_n,
7840 				  m2_n2->link_m, m2_n2->link_n, exact);
7841 }
7842 
7843 static bool
intel_compare_infoframe(const union hdmi_infoframe * a,const union hdmi_infoframe * b)7844 intel_compare_infoframe(const union hdmi_infoframe *a,
7845 			const union hdmi_infoframe *b)
7846 {
7847 	return memcmp(a, b, sizeof(*a)) == 0;
7848 }
7849 
7850 static bool
intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)7851 intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
7852 			 const struct drm_dp_vsc_sdp *b)
7853 {
7854 	return memcmp(a, b, sizeof(*a)) == 0;
7855 }
7856 
7857 static void
pipe_config_infoframe_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const union hdmi_infoframe * a,const union hdmi_infoframe * b)7858 pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
7859 			       bool fastset, const char *name,
7860 			       const union hdmi_infoframe *a,
7861 			       const union hdmi_infoframe *b)
7862 {
7863 	if (fastset) {
7864 		if (!drm_debug_enabled(DRM_UT_KMS))
7865 			return;
7866 
7867 		drm_dbg_kms(&dev_priv->drm,
7868 			    "fastset mismatch in %s infoframe\n", name);
7869 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
7870 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
7871 		drm_dbg_kms(&dev_priv->drm, "found:\n");
7872 		hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
7873 	} else {
7874 		drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
7875 		drm_err(&dev_priv->drm, "expected:\n");
7876 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
7877 		drm_err(&dev_priv->drm, "found:\n");
7878 		hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
7879 	}
7880 }
7881 
7882 static void
pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private * dev_priv,bool fastset,const char * name,const struct drm_dp_vsc_sdp * a,const struct drm_dp_vsc_sdp * b)7883 pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
7884 				bool fastset, const char *name,
7885 				const struct drm_dp_vsc_sdp *a,
7886 				const struct drm_dp_vsc_sdp *b)
7887 {
7888 	if (fastset) {
7889 		if (!drm_debug_enabled(DRM_UT_KMS))
7890 			return;
7891 
7892 		drm_dbg_kms(&dev_priv->drm,
7893 			    "fastset mismatch in %s dp sdp\n", name);
7894 		drm_dbg_kms(&dev_priv->drm, "expected:\n");
7895 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, a);
7896 		drm_dbg_kms(&dev_priv->drm, "found:\n");
7897 		drm_dp_vsc_sdp_log(KERN_DEBUG, dev_priv->drm.dev, b);
7898 	} else {
7899 		drm_err(&dev_priv->drm, "mismatch in %s dp sdp\n", name);
7900 		drm_err(&dev_priv->drm, "expected:\n");
7901 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, a);
7902 		drm_err(&dev_priv->drm, "found:\n");
7903 		drm_dp_vsc_sdp_log(KERN_ERR, dev_priv->drm.dev, b);
7904 	}
7905 }
7906 
7907 static void __printf(4, 5)
pipe_config_mismatch(bool fastset,const struct intel_crtc * crtc,const char * name,const char * format,...)7908 pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
7909 		     const char *name, const char *format, ...)
7910 {
7911 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
7912 	struct va_format vaf;
7913 	va_list args;
7914 
7915 	va_start(args, format);
7916 	vaf.fmt = format;
7917 	vaf.va = &args;
7918 
7919 	if (fastset)
7920 		drm_dbg_kms(&i915->drm,
7921 			    "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
7922 			    crtc->base.base.id, crtc->base.name, name, &vaf);
7923 	else
7924 		drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
7925 			crtc->base.base.id, crtc->base.name, name, &vaf);
7926 
7927 	va_end(args);
7928 }
7929 
fastboot_enabled(struct drm_i915_private * dev_priv)7930 static bool fastboot_enabled(struct drm_i915_private *dev_priv)
7931 {
7932 	if (dev_priv->params.fastboot != -1)
7933 		return dev_priv->params.fastboot;
7934 
7935 	/* Enable fastboot by default on Skylake and newer */
7936 	if (DISPLAY_VER(dev_priv) >= 9)
7937 		return true;
7938 
7939 	/* Enable fastboot by default on VLV and CHV */
7940 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7941 		return true;
7942 
7943 	/* Disabled by default on all others */
7944 	return false;
7945 }
7946 
7947 static bool
intel_pipe_config_compare(const struct intel_crtc_state * current_config,const struct intel_crtc_state * pipe_config,bool fastset)7948 intel_pipe_config_compare(const struct intel_crtc_state *current_config,
7949 			  const struct intel_crtc_state *pipe_config,
7950 			  bool fastset)
7951 {
7952 	struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
7953 	struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
7954 	bool ret = true;
7955 	u32 bp_gamma = 0;
7956 	bool fixup_inherited = fastset &&
7957 		current_config->inherited && !pipe_config->inherited;
7958 
7959 	if (fixup_inherited && !fastboot_enabled(dev_priv)) {
7960 		drm_dbg_kms(&dev_priv->drm,
7961 			    "initial modeset and fastboot not set\n");
7962 		ret = false;
7963 	}
7964 
7965 #define PIPE_CONF_CHECK_X(name) do { \
7966 	if (current_config->name != pipe_config->name) { \
7967 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
7968 				     "(expected 0x%08x, found 0x%08x)", \
7969 				     current_config->name, \
7970 				     pipe_config->name); \
7971 		ret = false; \
7972 	} \
7973 } while (0)
7974 
7975 #define PIPE_CONF_CHECK_I(name) do { \
7976 	if (current_config->name != pipe_config->name) { \
7977 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
7978 				     "(expected %i, found %i)", \
7979 				     current_config->name, \
7980 				     pipe_config->name); \
7981 		ret = false; \
7982 	} \
7983 } while (0)
7984 
7985 #define PIPE_CONF_CHECK_BOOL(name) do { \
7986 	if (current_config->name != pipe_config->name) { \
7987 		pipe_config_mismatch(fastset, crtc,  __stringify(name), \
7988 				     "(expected %s, found %s)", \
7989 				     yesno(current_config->name), \
7990 				     yesno(pipe_config->name)); \
7991 		ret = false; \
7992 	} \
7993 } while (0)
7994 
7995 /*
7996  * Checks state where we only read out the enabling, but not the entire
7997  * state itself (like full infoframes or ELD for audio). These states
7998  * require a full modeset on bootup to fix up.
7999  */
8000 #define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
8001 	if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
8002 		PIPE_CONF_CHECK_BOOL(name); \
8003 	} else { \
8004 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8005 				     "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
8006 				     yesno(current_config->name), \
8007 				     yesno(pipe_config->name)); \
8008 		ret = false; \
8009 	} \
8010 } while (0)
8011 
8012 #define PIPE_CONF_CHECK_P(name) do { \
8013 	if (current_config->name != pipe_config->name) { \
8014 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8015 				     "(expected %p, found %p)", \
8016 				     current_config->name, \
8017 				     pipe_config->name); \
8018 		ret = false; \
8019 	} \
8020 } while (0)
8021 
8022 #define PIPE_CONF_CHECK_M_N(name) do { \
8023 	if (!intel_compare_link_m_n(&current_config->name, \
8024 				    &pipe_config->name,\
8025 				    !fastset)) { \
8026 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8027 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8028 				     "found tu %i, gmch %i/%i link %i/%i)", \
8029 				     current_config->name.tu, \
8030 				     current_config->name.gmch_m, \
8031 				     current_config->name.gmch_n, \
8032 				     current_config->name.link_m, \
8033 				     current_config->name.link_n, \
8034 				     pipe_config->name.tu, \
8035 				     pipe_config->name.gmch_m, \
8036 				     pipe_config->name.gmch_n, \
8037 				     pipe_config->name.link_m, \
8038 				     pipe_config->name.link_n); \
8039 		ret = false; \
8040 	} \
8041 } while (0)
8042 
8043 /* This is required for BDW+ where there is only one set of registers for
8044  * switching between high and low RR.
8045  * This macro can be used whenever a comparison has to be made between one
8046  * hw state and multiple sw state variables.
8047  */
8048 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
8049 	if (!intel_compare_link_m_n(&current_config->name, \
8050 				    &pipe_config->name, !fastset) && \
8051 	    !intel_compare_link_m_n(&current_config->alt_name, \
8052 				    &pipe_config->name, !fastset)) { \
8053 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8054 				     "(expected tu %i gmch %i/%i link %i/%i, " \
8055 				     "or tu %i gmch %i/%i link %i/%i, " \
8056 				     "found tu %i, gmch %i/%i link %i/%i)", \
8057 				     current_config->name.tu, \
8058 				     current_config->name.gmch_m, \
8059 				     current_config->name.gmch_n, \
8060 				     current_config->name.link_m, \
8061 				     current_config->name.link_n, \
8062 				     current_config->alt_name.tu, \
8063 				     current_config->alt_name.gmch_m, \
8064 				     current_config->alt_name.gmch_n, \
8065 				     current_config->alt_name.link_m, \
8066 				     current_config->alt_name.link_n, \
8067 				     pipe_config->name.tu, \
8068 				     pipe_config->name.gmch_m, \
8069 				     pipe_config->name.gmch_n, \
8070 				     pipe_config->name.link_m, \
8071 				     pipe_config->name.link_n); \
8072 		ret = false; \
8073 	} \
8074 } while (0)
8075 
8076 #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
8077 	if ((current_config->name ^ pipe_config->name) & (mask)) { \
8078 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8079 				     "(%x) (expected %i, found %i)", \
8080 				     (mask), \
8081 				     current_config->name & (mask), \
8082 				     pipe_config->name & (mask)); \
8083 		ret = false; \
8084 	} \
8085 } while (0)
8086 
8087 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
8088 	if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8089 		pipe_config_mismatch(fastset, crtc, __stringify(name), \
8090 				     "(expected %i, found %i)", \
8091 				     current_config->name, \
8092 				     pipe_config->name); \
8093 		ret = false; \
8094 	} \
8095 } while (0)
8096 
8097 #define PIPE_CONF_CHECK_INFOFRAME(name) do { \
8098 	if (!intel_compare_infoframe(&current_config->infoframes.name, \
8099 				     &pipe_config->infoframes.name)) { \
8100 		pipe_config_infoframe_mismatch(dev_priv, fastset, __stringify(name), \
8101 					       &current_config->infoframes.name, \
8102 					       &pipe_config->infoframes.name); \
8103 		ret = false; \
8104 	} \
8105 } while (0)
8106 
8107 #define PIPE_CONF_CHECK_DP_VSC_SDP(name) do { \
8108 	if (!current_config->has_psr && !pipe_config->has_psr && \
8109 	    !intel_compare_dp_vsc_sdp(&current_config->infoframes.name, \
8110 				      &pipe_config->infoframes.name)) { \
8111 		pipe_config_dp_vsc_sdp_mismatch(dev_priv, fastset, __stringify(name), \
8112 						&current_config->infoframes.name, \
8113 						&pipe_config->infoframes.name); \
8114 		ret = false; \
8115 	} \
8116 } while (0)
8117 
8118 #define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
8119 	if (current_config->name1 != pipe_config->name1) { \
8120 		pipe_config_mismatch(fastset, crtc, __stringify(name1), \
8121 				"(expected %i, found %i, won't compare lut values)", \
8122 				current_config->name1, \
8123 				pipe_config->name1); \
8124 		ret = false;\
8125 	} else { \
8126 		if (!intel_color_lut_equal(current_config->name2, \
8127 					pipe_config->name2, pipe_config->name1, \
8128 					bit_precision)) { \
8129 			pipe_config_mismatch(fastset, crtc, __stringify(name2), \
8130 					"hw_state doesn't match sw_state"); \
8131 			ret = false; \
8132 		} \
8133 	} \
8134 } while (0)
8135 
8136 #define PIPE_CONF_QUIRK(quirk) \
8137 	((current_config->quirks | pipe_config->quirks) & (quirk))
8138 
8139 	PIPE_CONF_CHECK_I(cpu_transcoder);
8140 
8141 	PIPE_CONF_CHECK_BOOL(has_pch_encoder);
8142 	PIPE_CONF_CHECK_I(fdi_lanes);
8143 	PIPE_CONF_CHECK_M_N(fdi_m_n);
8144 
8145 	PIPE_CONF_CHECK_I(lane_count);
8146 	PIPE_CONF_CHECK_X(lane_lat_optim_mask);
8147 
8148 	if (DISPLAY_VER(dev_priv) < 8) {
8149 		PIPE_CONF_CHECK_M_N(dp_m_n);
8150 
8151 		if (current_config->has_drrs)
8152 			PIPE_CONF_CHECK_M_N(dp_m2_n2);
8153 	} else
8154 		PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
8155 
8156 	PIPE_CONF_CHECK_X(output_types);
8157 
8158 	/* FIXME do the readout properly and get rid of this quirk */
8159 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8160 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
8161 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
8162 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
8163 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
8164 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
8165 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
8166 
8167 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
8168 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
8169 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
8170 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
8171 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
8172 		PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
8173 
8174 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
8175 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
8176 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
8177 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
8178 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
8179 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
8180 
8181 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
8182 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
8183 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
8184 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
8185 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
8186 		PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
8187 
8188 		PIPE_CONF_CHECK_I(pixel_multiplier);
8189 
8190 		PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8191 				      DRM_MODE_FLAG_INTERLACE);
8192 
8193 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8194 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8195 					      DRM_MODE_FLAG_PHSYNC);
8196 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8197 					      DRM_MODE_FLAG_NHSYNC);
8198 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8199 					      DRM_MODE_FLAG_PVSYNC);
8200 			PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
8201 					      DRM_MODE_FLAG_NVSYNC);
8202 		}
8203 	}
8204 
8205 	PIPE_CONF_CHECK_I(output_format);
8206 	PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
8207 	if ((DISPLAY_VER(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
8208 	    IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
8209 		PIPE_CONF_CHECK_BOOL(limited_color_range);
8210 
8211 	PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
8212 	PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
8213 	PIPE_CONF_CHECK_BOOL(has_infoframe);
8214 	/* FIXME do the readout properly and get rid of this quirk */
8215 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8216 		PIPE_CONF_CHECK_BOOL(fec_enable);
8217 
8218 	PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
8219 
8220 	PIPE_CONF_CHECK_X(gmch_pfit.control);
8221 	/* pfit ratios are autocomputed by the hw on gen4+ */
8222 	if (DISPLAY_VER(dev_priv) < 4)
8223 		PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
8224 	PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
8225 
8226 	/*
8227 	 * Changing the EDP transcoder input mux
8228 	 * (A_ONOFF vs. A_ON) requires a full modeset.
8229 	 */
8230 	PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
8231 
8232 	if (!fastset) {
8233 		PIPE_CONF_CHECK_I(pipe_src_w);
8234 		PIPE_CONF_CHECK_I(pipe_src_h);
8235 
8236 		PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
8237 		if (current_config->pch_pfit.enabled) {
8238 			PIPE_CONF_CHECK_I(pch_pfit.dst.x1);
8239 			PIPE_CONF_CHECK_I(pch_pfit.dst.y1);
8240 			PIPE_CONF_CHECK_I(pch_pfit.dst.x2);
8241 			PIPE_CONF_CHECK_I(pch_pfit.dst.y2);
8242 		}
8243 
8244 		PIPE_CONF_CHECK_I(scaler_state.scaler_id);
8245 		/* FIXME do the readout properly and get rid of this quirk */
8246 		if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
8247 			PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
8248 
8249 		PIPE_CONF_CHECK_X(gamma_mode);
8250 		if (IS_CHERRYVIEW(dev_priv))
8251 			PIPE_CONF_CHECK_X(cgm_mode);
8252 		else
8253 			PIPE_CONF_CHECK_X(csc_mode);
8254 		PIPE_CONF_CHECK_BOOL(gamma_enable);
8255 		PIPE_CONF_CHECK_BOOL(csc_enable);
8256 
8257 		PIPE_CONF_CHECK_I(linetime);
8258 		PIPE_CONF_CHECK_I(ips_linetime);
8259 
8260 		bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
8261 		if (bp_gamma)
8262 			PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
8263 	}
8264 
8265 	PIPE_CONF_CHECK_BOOL(double_wide);
8266 
8267 	PIPE_CONF_CHECK_P(shared_dpll);
8268 
8269 	/* FIXME do the readout properly and get rid of this quirk */
8270 	if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
8271 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8272 		PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
8273 		PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8274 		PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
8275 		PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
8276 		PIPE_CONF_CHECK_X(dpll_hw_state.spll);
8277 		PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
8278 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
8279 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
8280 		PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
8281 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
8282 		PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
8283 		PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
8284 		PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
8285 		PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
8286 		PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
8287 		PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
8288 		PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
8289 		PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
8290 		PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
8291 		PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
8292 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
8293 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
8294 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
8295 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
8296 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
8297 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
8298 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
8299 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
8300 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
8301 		PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
8302 
8303 		PIPE_CONF_CHECK_X(dsi_pll.ctrl);
8304 		PIPE_CONF_CHECK_X(dsi_pll.div);
8305 
8306 		if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
8307 			PIPE_CONF_CHECK_I(pipe_bpp);
8308 
8309 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
8310 		PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
8311 		PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8312 
8313 		PIPE_CONF_CHECK_I(min_voltage_level);
8314 	}
8315 
8316 	PIPE_CONF_CHECK_X(infoframes.enable);
8317 	PIPE_CONF_CHECK_X(infoframes.gcp);
8318 	PIPE_CONF_CHECK_INFOFRAME(avi);
8319 	PIPE_CONF_CHECK_INFOFRAME(spd);
8320 	PIPE_CONF_CHECK_INFOFRAME(hdmi);
8321 	PIPE_CONF_CHECK_INFOFRAME(drm);
8322 	PIPE_CONF_CHECK_DP_VSC_SDP(vsc);
8323 
8324 	PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
8325 	PIPE_CONF_CHECK_I(master_transcoder);
8326 	PIPE_CONF_CHECK_BOOL(bigjoiner);
8327 	PIPE_CONF_CHECK_BOOL(bigjoiner_slave);
8328 	PIPE_CONF_CHECK_P(bigjoiner_linked_crtc);
8329 
8330 	PIPE_CONF_CHECK_I(dsc.compression_enable);
8331 	PIPE_CONF_CHECK_I(dsc.dsc_split);
8332 	PIPE_CONF_CHECK_I(dsc.compressed_bpp);
8333 
8334 	PIPE_CONF_CHECK_BOOL(splitter.enable);
8335 	PIPE_CONF_CHECK_I(splitter.link_count);
8336 	PIPE_CONF_CHECK_I(splitter.pixel_overlap);
8337 
8338 	PIPE_CONF_CHECK_I(mst_master_transcoder);
8339 
8340 	PIPE_CONF_CHECK_BOOL(vrr.enable);
8341 	PIPE_CONF_CHECK_I(vrr.vmin);
8342 	PIPE_CONF_CHECK_I(vrr.vmax);
8343 	PIPE_CONF_CHECK_I(vrr.flipline);
8344 	PIPE_CONF_CHECK_I(vrr.pipeline_full);
8345 
8346 #undef PIPE_CONF_CHECK_X
8347 #undef PIPE_CONF_CHECK_I
8348 #undef PIPE_CONF_CHECK_BOOL
8349 #undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8350 #undef PIPE_CONF_CHECK_P
8351 #undef PIPE_CONF_CHECK_FLAGS
8352 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
8353 #undef PIPE_CONF_CHECK_COLOR_LUT
8354 #undef PIPE_CONF_QUIRK
8355 
8356 	return ret;
8357 }
8358 
intel_pipe_config_sanity_check(struct drm_i915_private * dev_priv,const struct intel_crtc_state * pipe_config)8359 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
8360 					   const struct intel_crtc_state *pipe_config)
8361 {
8362 	if (pipe_config->has_pch_encoder) {
8363 		int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
8364 							    &pipe_config->fdi_m_n);
8365 		int dotclock = pipe_config->hw.adjusted_mode.crtc_clock;
8366 
8367 		/*
8368 		 * FDI already provided one idea for the dotclock.
8369 		 * Yell if the encoder disagrees.
8370 		 */
8371 		drm_WARN(&dev_priv->drm,
8372 			 !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
8373 			 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
8374 			 fdi_dotclock, dotclock);
8375 	}
8376 }
8377 
verify_wm_state(struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)8378 static void verify_wm_state(struct intel_crtc *crtc,
8379 			    struct intel_crtc_state *new_crtc_state)
8380 {
8381 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8382 	struct skl_hw_state {
8383 		struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
8384 		struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
8385 		struct skl_pipe_wm wm;
8386 	} *hw;
8387 	const struct skl_pipe_wm *sw_wm = &new_crtc_state->wm.skl.optimal;
8388 	int level, max_level = ilk_wm_max_level(dev_priv);
8389 	struct intel_plane *plane;
8390 	u8 hw_enabled_slices;
8391 
8392 	if (DISPLAY_VER(dev_priv) < 9 || !new_crtc_state->hw.active)
8393 		return;
8394 
8395 	hw = kzalloc(sizeof(*hw), GFP_KERNEL);
8396 	if (!hw)
8397 		return;
8398 
8399 	skl_pipe_wm_get_hw_state(crtc, &hw->wm);
8400 
8401 	skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
8402 
8403 	hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
8404 
8405 	if (DISPLAY_VER(dev_priv) >= 11 &&
8406 	    hw_enabled_slices != dev_priv->dbuf.enabled_slices)
8407 		drm_err(&dev_priv->drm,
8408 			"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
8409 			dev_priv->dbuf.enabled_slices,
8410 			hw_enabled_slices);
8411 
8412 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8413 		const struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
8414 		const struct skl_wm_level *hw_wm_level, *sw_wm_level;
8415 
8416 		/* Watermarks */
8417 		for (level = 0; level <= max_level; level++) {
8418 			hw_wm_level = &hw->wm.planes[plane->id].wm[level];
8419 			sw_wm_level = skl_plane_wm_level(sw_wm, plane->id, level);
8420 
8421 			if (skl_wm_level_equals(hw_wm_level, sw_wm_level))
8422 				continue;
8423 
8424 			drm_err(&dev_priv->drm,
8425 				"[PLANE:%d:%s] mismatch in WM%d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8426 				plane->base.base.id, plane->base.name, level,
8427 				sw_wm_level->enable,
8428 				sw_wm_level->blocks,
8429 				sw_wm_level->lines,
8430 				hw_wm_level->enable,
8431 				hw_wm_level->blocks,
8432 				hw_wm_level->lines);
8433 		}
8434 
8435 		hw_wm_level = &hw->wm.planes[plane->id].trans_wm;
8436 		sw_wm_level = skl_plane_trans_wm(sw_wm, plane->id);
8437 
8438 		if (!skl_wm_level_equals(hw_wm_level, sw_wm_level)) {
8439 			drm_err(&dev_priv->drm,
8440 				"[PLANE:%d:%s] mismatch in trans WM (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
8441 				plane->base.base.id, plane->base.name,
8442 				sw_wm_level->enable,
8443 				sw_wm_level->blocks,
8444 				sw_wm_level->lines,
8445 				hw_wm_level->enable,
8446 				hw_wm_level->blocks,
8447 				hw_wm_level->lines);
8448 		}
8449 
8450 		/* DDB */
8451 		hw_ddb_entry = &hw->ddb_y[plane->id];
8452 		sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane->id];
8453 
8454 		if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
8455 			drm_err(&dev_priv->drm,
8456 				"[PLANE:%d:%s] mismatch in DDB (expected (%u,%u), found (%u,%u))\n",
8457 				plane->base.base.id, plane->base.name,
8458 				sw_ddb_entry->start, sw_ddb_entry->end,
8459 				hw_ddb_entry->start, hw_ddb_entry->end);
8460 		}
8461 	}
8462 
8463 	kfree(hw);
8464 }
8465 
8466 static void
verify_connector_state(struct intel_atomic_state * state,struct intel_crtc * crtc)8467 verify_connector_state(struct intel_atomic_state *state,
8468 		       struct intel_crtc *crtc)
8469 {
8470 	struct drm_connector *connector;
8471 	struct drm_connector_state *new_conn_state;
8472 	int i;
8473 
8474 	for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) {
8475 		struct drm_encoder *encoder = connector->encoder;
8476 		struct intel_crtc_state *crtc_state = NULL;
8477 
8478 		if (new_conn_state->crtc != &crtc->base)
8479 			continue;
8480 
8481 		if (crtc)
8482 			crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
8483 
8484 		intel_connector_verify_state(crtc_state, new_conn_state);
8485 
8486 		I915_STATE_WARN(new_conn_state->best_encoder != encoder,
8487 		     "connector's atomic encoder doesn't match legacy encoder\n");
8488 	}
8489 }
8490 
8491 static void
verify_encoder_state(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)8492 verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_state *state)
8493 {
8494 	struct intel_encoder *encoder;
8495 	struct drm_connector *connector;
8496 	struct drm_connector_state *old_conn_state, *new_conn_state;
8497 	int i;
8498 
8499 	for_each_intel_encoder(&dev_priv->drm, encoder) {
8500 		bool enabled = false, found = false;
8501 		enum pipe pipe;
8502 
8503 		drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
8504 			    encoder->base.base.id,
8505 			    encoder->base.name);
8506 
8507 		for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
8508 						   new_conn_state, i) {
8509 			if (old_conn_state->best_encoder == &encoder->base)
8510 				found = true;
8511 
8512 			if (new_conn_state->best_encoder != &encoder->base)
8513 				continue;
8514 			found = enabled = true;
8515 
8516 			I915_STATE_WARN(new_conn_state->crtc !=
8517 					encoder->base.crtc,
8518 			     "connector's crtc doesn't match encoder crtc\n");
8519 		}
8520 
8521 		if (!found)
8522 			continue;
8523 
8524 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
8525 		     "encoder's enabled state mismatch "
8526 		     "(expected %i, found %i)\n",
8527 		     !!encoder->base.crtc, enabled);
8528 
8529 		if (!encoder->base.crtc) {
8530 			bool active;
8531 
8532 			active = encoder->get_hw_state(encoder, &pipe);
8533 			I915_STATE_WARN(active,
8534 			     "encoder detached but still enabled on pipe %c.\n",
8535 			     pipe_name(pipe));
8536 		}
8537 	}
8538 }
8539 
8540 static void
verify_crtc_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8541 verify_crtc_state(struct intel_crtc *crtc,
8542 		  struct intel_crtc_state *old_crtc_state,
8543 		  struct intel_crtc_state *new_crtc_state)
8544 {
8545 	struct drm_device *dev = crtc->base.dev;
8546 	struct drm_i915_private *dev_priv = to_i915(dev);
8547 	struct intel_encoder *encoder;
8548 	struct intel_crtc_state *pipe_config = old_crtc_state;
8549 	struct drm_atomic_state *state = old_crtc_state->uapi.state;
8550 	struct intel_crtc *master = crtc;
8551 
8552 	__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
8553 	intel_crtc_free_hw_state(old_crtc_state);
8554 	intel_crtc_state_reset(old_crtc_state, crtc);
8555 	old_crtc_state->uapi.state = state;
8556 
8557 	drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
8558 		    crtc->base.name);
8559 
8560 	pipe_config->hw.enable = new_crtc_state->hw.enable;
8561 
8562 	intel_crtc_get_pipe_config(pipe_config);
8563 
8564 	/* we keep both pipes enabled on 830 */
8565 	if (IS_I830(dev_priv) && pipe_config->hw.active)
8566 		pipe_config->hw.active = new_crtc_state->hw.active;
8567 
8568 	I915_STATE_WARN(new_crtc_state->hw.active != pipe_config->hw.active,
8569 			"crtc active state doesn't match with hw state "
8570 			"(expected %i, found %i)\n",
8571 			new_crtc_state->hw.active, pipe_config->hw.active);
8572 
8573 	I915_STATE_WARN(crtc->active != new_crtc_state->hw.active,
8574 			"transitional active state does not match atomic hw state "
8575 			"(expected %i, found %i)\n",
8576 			new_crtc_state->hw.active, crtc->active);
8577 
8578 	if (new_crtc_state->bigjoiner_slave)
8579 		master = new_crtc_state->bigjoiner_linked_crtc;
8580 
8581 	for_each_encoder_on_crtc(dev, &master->base, encoder) {
8582 		enum pipe pipe;
8583 		bool active;
8584 
8585 		active = encoder->get_hw_state(encoder, &pipe);
8586 		I915_STATE_WARN(active != new_crtc_state->hw.active,
8587 				"[ENCODER:%i] active %i with crtc active %i\n",
8588 				encoder->base.base.id, active,
8589 				new_crtc_state->hw.active);
8590 
8591 		I915_STATE_WARN(active && master->pipe != pipe,
8592 				"Encoder connected to wrong pipe %c\n",
8593 				pipe_name(pipe));
8594 
8595 		if (active)
8596 			intel_encoder_get_config(encoder, pipe_config);
8597 	}
8598 
8599 	if (!new_crtc_state->hw.active)
8600 		return;
8601 
8602 	intel_pipe_config_sanity_check(dev_priv, pipe_config);
8603 
8604 	if (!intel_pipe_config_compare(new_crtc_state,
8605 				       pipe_config, false)) {
8606 		I915_STATE_WARN(1, "pipe state doesn't match!\n");
8607 		intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
8608 		intel_dump_pipe_config(new_crtc_state, NULL, "[sw state]");
8609 	}
8610 }
8611 
8612 static void
intel_verify_planes(struct intel_atomic_state * state)8613 intel_verify_planes(struct intel_atomic_state *state)
8614 {
8615 	struct intel_plane *plane;
8616 	const struct intel_plane_state *plane_state;
8617 	int i;
8618 
8619 	for_each_new_intel_plane_in_state(state, plane,
8620 					  plane_state, i)
8621 		assert_plane(plane, plane_state->planar_slave ||
8622 			     plane_state->uapi.visible);
8623 }
8624 
8625 static void
verify_single_dpll_state(struct drm_i915_private * dev_priv,struct intel_shared_dpll * pll,struct intel_crtc * crtc,struct intel_crtc_state * new_crtc_state)8626 verify_single_dpll_state(struct drm_i915_private *dev_priv,
8627 			 struct intel_shared_dpll *pll,
8628 			 struct intel_crtc *crtc,
8629 			 struct intel_crtc_state *new_crtc_state)
8630 {
8631 	struct intel_dpll_hw_state dpll_hw_state;
8632 	u8 pipe_mask;
8633 	bool active;
8634 
8635 	memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
8636 
8637 	drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
8638 
8639 	active = intel_dpll_get_hw_state(dev_priv, pll, &dpll_hw_state);
8640 
8641 	if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
8642 		I915_STATE_WARN(!pll->on && pll->active_mask,
8643 		     "pll in active use but not on in sw tracking\n");
8644 		I915_STATE_WARN(pll->on && !pll->active_mask,
8645 		     "pll is on but not used by any active pipe\n");
8646 		I915_STATE_WARN(pll->on != active,
8647 		     "pll on state mismatch (expected %i, found %i)\n",
8648 		     pll->on, active);
8649 	}
8650 
8651 	if (!crtc) {
8652 		I915_STATE_WARN(pll->active_mask & ~pll->state.pipe_mask,
8653 				"more active pll users than references: 0x%x vs 0x%x\n",
8654 				pll->active_mask, pll->state.pipe_mask);
8655 
8656 		return;
8657 	}
8658 
8659 	pipe_mask = BIT(crtc->pipe);
8660 
8661 	if (new_crtc_state->hw.active)
8662 		I915_STATE_WARN(!(pll->active_mask & pipe_mask),
8663 				"pll active mismatch (expected pipe %c in active mask 0x%x)\n",
8664 				pipe_name(crtc->pipe), pll->active_mask);
8665 	else
8666 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8667 				"pll active mismatch (didn't expect pipe %c in active mask 0x%x)\n",
8668 				pipe_name(crtc->pipe), pll->active_mask);
8669 
8670 	I915_STATE_WARN(!(pll->state.pipe_mask & pipe_mask),
8671 			"pll enabled crtcs mismatch (expected 0x%x in 0x%x)\n",
8672 			pipe_mask, pll->state.pipe_mask);
8673 
8674 	I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
8675 					  &dpll_hw_state,
8676 					  sizeof(dpll_hw_state)),
8677 			"pll hw state mismatch\n");
8678 }
8679 
8680 static void
verify_shared_dpll_state(struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8681 verify_shared_dpll_state(struct intel_crtc *crtc,
8682 			 struct intel_crtc_state *old_crtc_state,
8683 			 struct intel_crtc_state *new_crtc_state)
8684 {
8685 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8686 
8687 	if (new_crtc_state->shared_dpll)
8688 		verify_single_dpll_state(dev_priv, new_crtc_state->shared_dpll, crtc, new_crtc_state);
8689 
8690 	if (old_crtc_state->shared_dpll &&
8691 	    old_crtc_state->shared_dpll != new_crtc_state->shared_dpll) {
8692 		u8 pipe_mask = BIT(crtc->pipe);
8693 		struct intel_shared_dpll *pll = old_crtc_state->shared_dpll;
8694 
8695 		I915_STATE_WARN(pll->active_mask & pipe_mask,
8696 				"pll active mismatch (didn't expect pipe %c in active mask (0x%x))\n",
8697 				pipe_name(crtc->pipe), pll->active_mask);
8698 		I915_STATE_WARN(pll->state.pipe_mask & pipe_mask,
8699 				"pll enabled crtcs mismatch (found %x in enabled mask (0x%x))\n",
8700 				pipe_name(crtc->pipe), pll->state.pipe_mask);
8701 	}
8702 }
8703 
8704 static void
intel_modeset_verify_crtc(struct intel_crtc * crtc,struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8705 intel_modeset_verify_crtc(struct intel_crtc *crtc,
8706 			  struct intel_atomic_state *state,
8707 			  struct intel_crtc_state *old_crtc_state,
8708 			  struct intel_crtc_state *new_crtc_state)
8709 {
8710 	if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe)
8711 		return;
8712 
8713 	verify_wm_state(crtc, new_crtc_state);
8714 	verify_connector_state(state, crtc);
8715 	verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
8716 	verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
8717 }
8718 
8719 static void
verify_disabled_dpll_state(struct drm_i915_private * dev_priv)8720 verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
8721 {
8722 	int i;
8723 
8724 	for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
8725 		verify_single_dpll_state(dev_priv,
8726 					 &dev_priv->dpll.shared_dplls[i],
8727 					 NULL, NULL);
8728 }
8729 
8730 static void
intel_modeset_verify_disabled(struct drm_i915_private * dev_priv,struct intel_atomic_state * state)8731 intel_modeset_verify_disabled(struct drm_i915_private *dev_priv,
8732 			      struct intel_atomic_state *state)
8733 {
8734 	verify_encoder_state(dev_priv, state);
8735 	verify_connector_state(state, NULL);
8736 	verify_disabled_dpll_state(dev_priv);
8737 }
8738 
8739 static void
intel_crtc_update_active_timings(const struct intel_crtc_state * crtc_state)8740 intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
8741 {
8742 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8743 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8744 	struct drm_display_mode adjusted_mode =
8745 		crtc_state->hw.adjusted_mode;
8746 
8747 	if (crtc_state->vrr.enable) {
8748 		adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
8749 		adjusted_mode.crtc_vblank_end = crtc_state->vrr.vmax;
8750 		adjusted_mode.crtc_vblank_start = intel_vrr_vmin_vblank_start(crtc_state);
8751 		crtc->vmax_vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
8752 	}
8753 
8754 	drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
8755 
8756 	crtc->mode_flags = crtc_state->mode_flags;
8757 
8758 	/*
8759 	 * The scanline counter increments at the leading edge of hsync.
8760 	 *
8761 	 * On most platforms it starts counting from vtotal-1 on the
8762 	 * first active line. That means the scanline counter value is
8763 	 * always one less than what we would expect. Ie. just after
8764 	 * start of vblank, which also occurs at start of hsync (on the
8765 	 * last active line), the scanline counter will read vblank_start-1.
8766 	 *
8767 	 * On gen2 the scanline counter starts counting from 1 instead
8768 	 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
8769 	 * to keep the value positive), instead of adding one.
8770 	 *
8771 	 * On HSW+ the behaviour of the scanline counter depends on the output
8772 	 * type. For DP ports it behaves like most other platforms, but on HDMI
8773 	 * there's an extra 1 line difference. So we need to add two instead of
8774 	 * one to the value.
8775 	 *
8776 	 * On VLV/CHV DSI the scanline counter would appear to increment
8777 	 * approx. 1/3 of a scanline before start of vblank. Unfortunately
8778 	 * that means we can't tell whether we're in vblank or not while
8779 	 * we're on that particular line. We must still set scanline_offset
8780 	 * to 1 so that the vblank timestamps come out correct when we query
8781 	 * the scanline counter from within the vblank interrupt handler.
8782 	 * However if queried just before the start of vblank we'll get an
8783 	 * answer that's slightly in the future.
8784 	 */
8785 	if (IS_DISPLAY_VER(dev_priv, 2)) {
8786 		int vtotal;
8787 
8788 		vtotal = adjusted_mode.crtc_vtotal;
8789 		if (adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8790 			vtotal /= 2;
8791 
8792 		crtc->scanline_offset = vtotal - 1;
8793 	} else if (HAS_DDI(dev_priv) &&
8794 		   intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
8795 		crtc->scanline_offset = 2;
8796 	} else {
8797 		crtc->scanline_offset = 1;
8798 	}
8799 }
8800 
intel_modeset_clear_plls(struct intel_atomic_state * state)8801 static void intel_modeset_clear_plls(struct intel_atomic_state *state)
8802 {
8803 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8804 	struct intel_crtc_state *new_crtc_state;
8805 	struct intel_crtc *crtc;
8806 	int i;
8807 
8808 	if (!dev_priv->display.crtc_compute_clock)
8809 		return;
8810 
8811 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
8812 		if (!intel_crtc_needs_modeset(new_crtc_state))
8813 			continue;
8814 
8815 		intel_release_shared_dplls(state, crtc);
8816 	}
8817 }
8818 
8819 /*
8820  * This implements the workaround described in the "notes" section of the mode
8821  * set sequence documentation. When going from no pipes or single pipe to
8822  * multiple pipes, and planes are enabled after the pipe, we need to wait at
8823  * least 2 vblanks on the first pipe before enabling planes on the second pipe.
8824  */
hsw_mode_set_planes_workaround(struct intel_atomic_state * state)8825 static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
8826 {
8827 	struct intel_crtc_state *crtc_state;
8828 	struct intel_crtc *crtc;
8829 	struct intel_crtc_state *first_crtc_state = NULL;
8830 	struct intel_crtc_state *other_crtc_state = NULL;
8831 	enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
8832 	int i;
8833 
8834 	/* look at all crtc's that are going to be enabled in during modeset */
8835 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8836 		if (!crtc_state->hw.active ||
8837 		    !intel_crtc_needs_modeset(crtc_state))
8838 			continue;
8839 
8840 		if (first_crtc_state) {
8841 			other_crtc_state = crtc_state;
8842 			break;
8843 		} else {
8844 			first_crtc_state = crtc_state;
8845 			first_pipe = crtc->pipe;
8846 		}
8847 	}
8848 
8849 	/* No workaround needed? */
8850 	if (!first_crtc_state)
8851 		return 0;
8852 
8853 	/* w/a possibly needed, check how many crtc's are already enabled. */
8854 	for_each_intel_crtc(state->base.dev, crtc) {
8855 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
8856 		if (IS_ERR(crtc_state))
8857 			return PTR_ERR(crtc_state);
8858 
8859 		crtc_state->hsw_workaround_pipe = INVALID_PIPE;
8860 
8861 		if (!crtc_state->hw.active ||
8862 		    intel_crtc_needs_modeset(crtc_state))
8863 			continue;
8864 
8865 		/* 2 or more enabled crtcs means no need for w/a */
8866 		if (enabled_pipe != INVALID_PIPE)
8867 			return 0;
8868 
8869 		enabled_pipe = crtc->pipe;
8870 	}
8871 
8872 	if (enabled_pipe != INVALID_PIPE)
8873 		first_crtc_state->hsw_workaround_pipe = enabled_pipe;
8874 	else if (other_crtc_state)
8875 		other_crtc_state->hsw_workaround_pipe = first_pipe;
8876 
8877 	return 0;
8878 }
8879 
intel_calc_active_pipes(struct intel_atomic_state * state,u8 active_pipes)8880 u8 intel_calc_active_pipes(struct intel_atomic_state *state,
8881 			   u8 active_pipes)
8882 {
8883 	const struct intel_crtc_state *crtc_state;
8884 	struct intel_crtc *crtc;
8885 	int i;
8886 
8887 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
8888 		if (crtc_state->hw.active)
8889 			active_pipes |= BIT(crtc->pipe);
8890 		else
8891 			active_pipes &= ~BIT(crtc->pipe);
8892 	}
8893 
8894 	return active_pipes;
8895 }
8896 
intel_modeset_checks(struct intel_atomic_state * state)8897 static int intel_modeset_checks(struct intel_atomic_state *state)
8898 {
8899 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8900 
8901 	state->modeset = true;
8902 
8903 	if (IS_HASWELL(dev_priv))
8904 		return hsw_mode_set_planes_workaround(state);
8905 
8906 	return 0;
8907 }
8908 
8909 /*
8910  * Handle calculation of various watermark data at the end of the atomic check
8911  * phase.  The code here should be run after the per-crtc and per-plane 'check'
8912  * handlers to ensure that all derived state has been updated.
8913  */
calc_watermark_data(struct intel_atomic_state * state)8914 static int calc_watermark_data(struct intel_atomic_state *state)
8915 {
8916 	struct drm_device *dev = state->base.dev;
8917 	struct drm_i915_private *dev_priv = to_i915(dev);
8918 
8919 	/* Is there platform-specific watermark information to calculate? */
8920 	if (dev_priv->display.compute_global_watermarks)
8921 		return dev_priv->display.compute_global_watermarks(state);
8922 
8923 	return 0;
8924 }
8925 
intel_crtc_check_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8926 static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
8927 				     struct intel_crtc_state *new_crtc_state)
8928 {
8929 	if (!intel_pipe_config_compare(old_crtc_state, new_crtc_state, true))
8930 		return;
8931 
8932 	new_crtc_state->uapi.mode_changed = false;
8933 	new_crtc_state->update_pipe = true;
8934 }
8935 
intel_crtc_copy_fastset(const struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)8936 static void intel_crtc_copy_fastset(const struct intel_crtc_state *old_crtc_state,
8937 				    struct intel_crtc_state *new_crtc_state)
8938 {
8939 	/*
8940 	 * If we're not doing the full modeset we want to
8941 	 * keep the current M/N values as they may be
8942 	 * sufficiently different to the computed values
8943 	 * to cause problems.
8944 	 *
8945 	 * FIXME: should really copy more fuzzy state here
8946 	 */
8947 	new_crtc_state->fdi_m_n = old_crtc_state->fdi_m_n;
8948 	new_crtc_state->dp_m_n = old_crtc_state->dp_m_n;
8949 	new_crtc_state->dp_m2_n2 = old_crtc_state->dp_m2_n2;
8950 	new_crtc_state->has_drrs = old_crtc_state->has_drrs;
8951 }
8952 
intel_crtc_add_planes_to_state(struct intel_atomic_state * state,struct intel_crtc * crtc,u8 plane_ids_mask)8953 static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state,
8954 					  struct intel_crtc *crtc,
8955 					  u8 plane_ids_mask)
8956 {
8957 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
8958 	struct intel_plane *plane;
8959 
8960 	for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
8961 		struct intel_plane_state *plane_state;
8962 
8963 		if ((plane_ids_mask & BIT(plane->id)) == 0)
8964 			continue;
8965 
8966 		plane_state = intel_atomic_get_plane_state(state, plane);
8967 		if (IS_ERR(plane_state))
8968 			return PTR_ERR(plane_state);
8969 	}
8970 
8971 	return 0;
8972 }
8973 
intel_atomic_add_affected_planes(struct intel_atomic_state * state,struct intel_crtc * crtc)8974 int intel_atomic_add_affected_planes(struct intel_atomic_state *state,
8975 				     struct intel_crtc *crtc)
8976 {
8977 	const struct intel_crtc_state *old_crtc_state =
8978 		intel_atomic_get_old_crtc_state(state, crtc);
8979 	const struct intel_crtc_state *new_crtc_state =
8980 		intel_atomic_get_new_crtc_state(state, crtc);
8981 
8982 	return intel_crtc_add_planes_to_state(state, crtc,
8983 					      old_crtc_state->enabled_planes |
8984 					      new_crtc_state->enabled_planes);
8985 }
8986 
active_planes_affects_min_cdclk(struct drm_i915_private * dev_priv)8987 static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
8988 {
8989 	/* See {hsw,vlv,ivb}_plane_ratio() */
8990 	return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) ||
8991 		IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8992 		IS_IVYBRIDGE(dev_priv);
8993 }
8994 
intel_crtc_add_bigjoiner_planes(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc * other)8995 static int intel_crtc_add_bigjoiner_planes(struct intel_atomic_state *state,
8996 					   struct intel_crtc *crtc,
8997 					   struct intel_crtc *other)
8998 {
8999 	const struct intel_plane_state *plane_state;
9000 	struct intel_plane *plane;
9001 	u8 plane_ids = 0;
9002 	int i;
9003 
9004 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9005 		if (plane->pipe == crtc->pipe)
9006 			plane_ids |= BIT(plane->id);
9007 	}
9008 
9009 	return intel_crtc_add_planes_to_state(state, other, plane_ids);
9010 }
9011 
intel_bigjoiner_add_affected_planes(struct intel_atomic_state * state)9012 static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
9013 {
9014 	const struct intel_crtc_state *crtc_state;
9015 	struct intel_crtc *crtc;
9016 	int i;
9017 
9018 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9019 		int ret;
9020 
9021 		if (!crtc_state->bigjoiner)
9022 			continue;
9023 
9024 		ret = intel_crtc_add_bigjoiner_planes(state, crtc,
9025 						      crtc_state->bigjoiner_linked_crtc);
9026 		if (ret)
9027 			return ret;
9028 	}
9029 
9030 	return 0;
9031 }
9032 
intel_atomic_check_planes(struct intel_atomic_state * state)9033 static int intel_atomic_check_planes(struct intel_atomic_state *state)
9034 {
9035 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9036 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9037 	struct intel_plane_state *plane_state;
9038 	struct intel_plane *plane;
9039 	struct intel_crtc *crtc;
9040 	int i, ret;
9041 
9042 	ret = icl_add_linked_planes(state);
9043 	if (ret)
9044 		return ret;
9045 
9046 	ret = intel_bigjoiner_add_affected_planes(state);
9047 	if (ret)
9048 		return ret;
9049 
9050 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9051 		ret = intel_plane_atomic_check(state, plane);
9052 		if (ret) {
9053 			drm_dbg_atomic(&dev_priv->drm,
9054 				       "[PLANE:%d:%s] atomic driver check failed\n",
9055 				       plane->base.base.id, plane->base.name);
9056 			return ret;
9057 		}
9058 	}
9059 
9060 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9061 					    new_crtc_state, i) {
9062 		u8 old_active_planes, new_active_planes;
9063 
9064 		ret = icl_check_nv12_planes(new_crtc_state);
9065 		if (ret)
9066 			return ret;
9067 
9068 		/*
9069 		 * On some platforms the number of active planes affects
9070 		 * the planes' minimum cdclk calculation. Add such planes
9071 		 * to the state before we compute the minimum cdclk.
9072 		 */
9073 		if (!active_planes_affects_min_cdclk(dev_priv))
9074 			continue;
9075 
9076 		old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9077 		new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR);
9078 
9079 		if (hweight8(old_active_planes) == hweight8(new_active_planes))
9080 			continue;
9081 
9082 		ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes);
9083 		if (ret)
9084 			return ret;
9085 	}
9086 
9087 	return 0;
9088 }
9089 
intel_atomic_check_cdclk(struct intel_atomic_state * state,bool * need_cdclk_calc)9090 static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
9091 				    bool *need_cdclk_calc)
9092 {
9093 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9094 	const struct intel_cdclk_state *old_cdclk_state;
9095 	const struct intel_cdclk_state *new_cdclk_state;
9096 	struct intel_plane_state *plane_state;
9097 	struct intel_bw_state *new_bw_state;
9098 	struct intel_plane *plane;
9099 	int min_cdclk = 0;
9100 	enum pipe pipe;
9101 	int ret;
9102 	int i;
9103 	/*
9104 	 * active_planes bitmask has been updated, and potentially
9105 	 * affected planes are part of the state. We can now
9106 	 * compute the minimum cdclk for each plane.
9107 	 */
9108 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
9109 		ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
9110 		if (ret)
9111 			return ret;
9112 	}
9113 
9114 	old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
9115 	new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
9116 
9117 	if (new_cdclk_state &&
9118 	    old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
9119 		*need_cdclk_calc = true;
9120 
9121 	ret = dev_priv->display.bw_calc_min_cdclk(state);
9122 	if (ret)
9123 		return ret;
9124 
9125 	new_bw_state = intel_atomic_get_new_bw_state(state);
9126 
9127 	if (!new_cdclk_state || !new_bw_state)
9128 		return 0;
9129 
9130 	for_each_pipe(dev_priv, pipe) {
9131 		min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
9132 
9133 		/*
9134 		 * Currently do this change only if we need to increase
9135 		 */
9136 		if (new_bw_state->min_cdclk > min_cdclk)
9137 			*need_cdclk_calc = true;
9138 	}
9139 
9140 	return 0;
9141 }
9142 
intel_atomic_check_crtcs(struct intel_atomic_state * state)9143 static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
9144 {
9145 	struct intel_crtc_state *crtc_state;
9146 	struct intel_crtc *crtc;
9147 	int i;
9148 
9149 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9150 		struct drm_i915_private *i915 = to_i915(crtc->base.dev);
9151 		int ret;
9152 
9153 		ret = intel_crtc_atomic_check(state, crtc);
9154 		if (ret) {
9155 			drm_dbg_atomic(&i915->drm,
9156 				       "[CRTC:%d:%s] atomic driver check failed\n",
9157 				       crtc->base.base.id, crtc->base.name);
9158 			return ret;
9159 		}
9160 	}
9161 
9162 	return 0;
9163 }
9164 
intel_cpu_transcoders_need_modeset(struct intel_atomic_state * state,u8 transcoders)9165 static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
9166 					       u8 transcoders)
9167 {
9168 	const struct intel_crtc_state *new_crtc_state;
9169 	struct intel_crtc *crtc;
9170 	int i;
9171 
9172 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9173 		if (new_crtc_state->hw.enable &&
9174 		    transcoders & BIT(new_crtc_state->cpu_transcoder) &&
9175 		    intel_crtc_needs_modeset(new_crtc_state))
9176 			return true;
9177 	}
9178 
9179 	return false;
9180 }
9181 
intel_atomic_check_bigjoiner(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state)9182 static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state,
9183 					struct intel_crtc *crtc,
9184 					struct intel_crtc_state *old_crtc_state,
9185 					struct intel_crtc_state *new_crtc_state)
9186 {
9187 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9188 	struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
9189 	struct intel_crtc *slave, *master;
9190 
9191 	/* slave being enabled, is master is still claiming this crtc? */
9192 	if (old_crtc_state->bigjoiner_slave) {
9193 		slave = crtc;
9194 		master = old_crtc_state->bigjoiner_linked_crtc;
9195 		master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
9196 		if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
9197 			goto claimed;
9198 	}
9199 
9200 	if (!new_crtc_state->bigjoiner)
9201 		return 0;
9202 
9203 	if (1 + crtc->pipe >= INTEL_NUM_PIPES(dev_priv)) {
9204 		DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
9205 			      "CRTC + 1 to be used, doesn't exist\n",
9206 			      crtc->base.base.id, crtc->base.name);
9207 		return -EINVAL;
9208 	}
9209 
9210 	slave = new_crtc_state->bigjoiner_linked_crtc =
9211 		intel_get_crtc_for_pipe(dev_priv, crtc->pipe + 1);
9212 	slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
9213 	master = crtc;
9214 	if (IS_ERR(slave_crtc_state))
9215 		return PTR_ERR(slave_crtc_state);
9216 
9217 	/* master being enabled, slave was already configured? */
9218 	if (slave_crtc_state->uapi.enable)
9219 		goto claimed;
9220 
9221 	DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
9222 		      slave->base.base.id, slave->base.name);
9223 
9224 	return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
9225 
9226 claimed:
9227 	DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
9228 		      "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
9229 		      slave->base.base.id, slave->base.name,
9230 		      master->base.base.id, master->base.name);
9231 	return -EINVAL;
9232 }
9233 
kill_bigjoiner_slave(struct intel_atomic_state * state,struct intel_crtc_state * master_crtc_state)9234 static void kill_bigjoiner_slave(struct intel_atomic_state *state,
9235 				 struct intel_crtc_state *master_crtc_state)
9236 {
9237 	struct intel_crtc_state *slave_crtc_state =
9238 		intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc);
9239 
9240 	slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false;
9241 	slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false;
9242 	slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL;
9243 	intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state);
9244 }
9245 
9246 /**
9247  * DOC: asynchronous flip implementation
9248  *
9249  * Asynchronous page flip is the implementation for the DRM_MODE_PAGE_FLIP_ASYNC
9250  * flag. Currently async flip is only supported via the drmModePageFlip IOCTL.
9251  * Correspondingly, support is currently added for primary plane only.
9252  *
9253  * Async flip can only change the plane surface address, so anything else
9254  * changing is rejected from the intel_atomic_check_async() function.
9255  * Once this check is cleared, flip done interrupt is enabled using
9256  * the intel_crtc_enable_flip_done() function.
9257  *
9258  * As soon as the surface address register is written, flip done interrupt is
9259  * generated and the requested events are sent to the usersapce in the interrupt
9260  * handler itself. The timestamp and sequence sent during the flip done event
9261  * correspond to the last vblank and have no relation to the actual time when
9262  * the flip done event was sent.
9263  */
intel_atomic_check_async(struct intel_atomic_state * state)9264 static int intel_atomic_check_async(struct intel_atomic_state *state)
9265 {
9266 	struct drm_i915_private *i915 = to_i915(state->base.dev);
9267 	const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9268 	const struct intel_plane_state *new_plane_state, *old_plane_state;
9269 	struct intel_crtc *crtc;
9270 	struct intel_plane *plane;
9271 	int i;
9272 
9273 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9274 					    new_crtc_state, i) {
9275 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9276 			drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
9277 			return -EINVAL;
9278 		}
9279 
9280 		if (!new_crtc_state->hw.active) {
9281 			drm_dbg_kms(&i915->drm, "CRTC inactive\n");
9282 			return -EINVAL;
9283 		}
9284 		if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
9285 			drm_dbg_kms(&i915->drm,
9286 				    "Active planes cannot be changed during async flip\n");
9287 			return -EINVAL;
9288 		}
9289 	}
9290 
9291 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
9292 					     new_plane_state, i) {
9293 		/*
9294 		 * TODO: Async flip is only supported through the page flip IOCTL
9295 		 * as of now. So support currently added for primary plane only.
9296 		 * Support for other planes on platforms on which supports
9297 		 * this(vlv/chv and icl+) should be added when async flip is
9298 		 * enabled in the atomic IOCTL path.
9299 		 */
9300 		if (!plane->async_flip)
9301 			return -EINVAL;
9302 
9303 		/*
9304 		 * FIXME: This check is kept generic for all platforms.
9305 		 * Need to verify this for all gen9 and gen10 platforms to enable
9306 		 * this selectively if required.
9307 		 */
9308 		switch (new_plane_state->hw.fb->modifier) {
9309 		case I915_FORMAT_MOD_X_TILED:
9310 		case I915_FORMAT_MOD_Y_TILED:
9311 		case I915_FORMAT_MOD_Yf_TILED:
9312 			break;
9313 		default:
9314 			drm_dbg_kms(&i915->drm,
9315 				    "Linear memory/CCS does not support async flips\n");
9316 			return -EINVAL;
9317 		}
9318 
9319 		if (old_plane_state->view.color_plane[0].stride !=
9320 		    new_plane_state->view.color_plane[0].stride) {
9321 			drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
9322 			return -EINVAL;
9323 		}
9324 
9325 		if (old_plane_state->hw.fb->modifier !=
9326 		    new_plane_state->hw.fb->modifier) {
9327 			drm_dbg_kms(&i915->drm,
9328 				    "Framebuffer modifiers cannot be changed in async flip\n");
9329 			return -EINVAL;
9330 		}
9331 
9332 		if (old_plane_state->hw.fb->format !=
9333 		    new_plane_state->hw.fb->format) {
9334 			drm_dbg_kms(&i915->drm,
9335 				    "Framebuffer format cannot be changed in async flip\n");
9336 			return -EINVAL;
9337 		}
9338 
9339 		if (old_plane_state->hw.rotation !=
9340 		    new_plane_state->hw.rotation) {
9341 			drm_dbg_kms(&i915->drm, "Rotation cannot be changed in async flip\n");
9342 			return -EINVAL;
9343 		}
9344 
9345 		if (!drm_rect_equals(&old_plane_state->uapi.src, &new_plane_state->uapi.src) ||
9346 		    !drm_rect_equals(&old_plane_state->uapi.dst, &new_plane_state->uapi.dst)) {
9347 			drm_dbg_kms(&i915->drm,
9348 				    "Plane size/co-ordinates cannot be changed in async flip\n");
9349 			return -EINVAL;
9350 		}
9351 
9352 		if (old_plane_state->hw.alpha != new_plane_state->hw.alpha) {
9353 			drm_dbg_kms(&i915->drm, "Alpha value cannot be changed in async flip\n");
9354 			return -EINVAL;
9355 		}
9356 
9357 		if (old_plane_state->hw.pixel_blend_mode !=
9358 		    new_plane_state->hw.pixel_blend_mode) {
9359 			drm_dbg_kms(&i915->drm,
9360 				    "Pixel blend mode cannot be changed in async flip\n");
9361 			return -EINVAL;
9362 		}
9363 
9364 		if (old_plane_state->hw.color_encoding != new_plane_state->hw.color_encoding) {
9365 			drm_dbg_kms(&i915->drm,
9366 				    "Color encoding cannot be changed in async flip\n");
9367 			return -EINVAL;
9368 		}
9369 
9370 		if (old_plane_state->hw.color_range != new_plane_state->hw.color_range) {
9371 			drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
9372 			return -EINVAL;
9373 		}
9374 	}
9375 
9376 	return 0;
9377 }
9378 
intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state * state)9379 static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state)
9380 {
9381 	struct intel_crtc_state *crtc_state;
9382 	struct intel_crtc *crtc;
9383 	int i;
9384 
9385 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9386 		struct intel_crtc_state *linked_crtc_state;
9387 		struct intel_crtc *linked_crtc;
9388 		int ret;
9389 
9390 		if (!crtc_state->bigjoiner)
9391 			continue;
9392 
9393 		linked_crtc = crtc_state->bigjoiner_linked_crtc;
9394 		linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc);
9395 		if (IS_ERR(linked_crtc_state))
9396 			return PTR_ERR(linked_crtc_state);
9397 
9398 		if (!intel_crtc_needs_modeset(crtc_state))
9399 			continue;
9400 
9401 		linked_crtc_state->uapi.mode_changed = true;
9402 
9403 		ret = drm_atomic_add_affected_connectors(&state->base,
9404 							 &linked_crtc->base);
9405 		if (ret)
9406 			return ret;
9407 
9408 		ret = intel_atomic_add_affected_planes(state, linked_crtc);
9409 		if (ret)
9410 			return ret;
9411 	}
9412 
9413 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9414 		/* Kill old bigjoiner link, we may re-establish afterwards */
9415 		if (intel_crtc_needs_modeset(crtc_state) &&
9416 		    crtc_state->bigjoiner && !crtc_state->bigjoiner_slave)
9417 			kill_bigjoiner_slave(state, crtc_state);
9418 	}
9419 
9420 	return 0;
9421 }
9422 
9423 /**
9424  * intel_atomic_check - validate state object
9425  * @dev: drm device
9426  * @_state: state to validate
9427  */
intel_atomic_check(struct drm_device * dev,struct drm_atomic_state * _state)9428 static int intel_atomic_check(struct drm_device *dev,
9429 			      struct drm_atomic_state *_state)
9430 {
9431 	struct drm_i915_private *dev_priv = to_i915(dev);
9432 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
9433 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9434 	struct intel_crtc *crtc;
9435 	int ret, i;
9436 	bool any_ms = false;
9437 
9438 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9439 					    new_crtc_state, i) {
9440 		if (new_crtc_state->inherited != old_crtc_state->inherited)
9441 			new_crtc_state->uapi.mode_changed = true;
9442 	}
9443 
9444 	intel_vrr_check_modeset(state);
9445 
9446 	ret = drm_atomic_helper_check_modeset(dev, &state->base);
9447 	if (ret)
9448 		goto fail;
9449 
9450 	ret = intel_bigjoiner_add_affected_crtcs(state);
9451 	if (ret)
9452 		goto fail;
9453 
9454 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9455 					    new_crtc_state, i) {
9456 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9457 			/* Light copy */
9458 			intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state);
9459 
9460 			continue;
9461 		}
9462 
9463 		if (!new_crtc_state->uapi.enable) {
9464 			if (!new_crtc_state->bigjoiner_slave) {
9465 				intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state);
9466 				any_ms = true;
9467 			}
9468 			continue;
9469 		}
9470 
9471 		ret = intel_crtc_prepare_cleared_state(state, new_crtc_state);
9472 		if (ret)
9473 			goto fail;
9474 
9475 		ret = intel_modeset_pipe_config(state, new_crtc_state);
9476 		if (ret)
9477 			goto fail;
9478 
9479 		ret = intel_atomic_check_bigjoiner(state, crtc, old_crtc_state,
9480 						   new_crtc_state);
9481 		if (ret)
9482 			goto fail;
9483 	}
9484 
9485 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9486 					    new_crtc_state, i) {
9487 		if (!intel_crtc_needs_modeset(new_crtc_state))
9488 			continue;
9489 
9490 		ret = intel_modeset_pipe_config_late(new_crtc_state);
9491 		if (ret)
9492 			goto fail;
9493 
9494 		intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
9495 	}
9496 
9497 	/**
9498 	 * Check if fastset is allowed by external dependencies like other
9499 	 * pipes and transcoders.
9500 	 *
9501 	 * Right now it only forces a fullmodeset when the MST master
9502 	 * transcoder did not changed but the pipe of the master transcoder
9503 	 * needs a fullmodeset so all slaves also needs to do a fullmodeset or
9504 	 * in case of port synced crtcs, if one of the synced crtcs
9505 	 * needs a full modeset, all other synced crtcs should be
9506 	 * forced a full modeset.
9507 	 */
9508 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9509 		if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
9510 			continue;
9511 
9512 		if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
9513 			enum transcoder master = new_crtc_state->mst_master_transcoder;
9514 
9515 			if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
9516 				new_crtc_state->uapi.mode_changed = true;
9517 				new_crtc_state->update_pipe = false;
9518 			}
9519 		}
9520 
9521 		if (is_trans_port_sync_mode(new_crtc_state)) {
9522 			u8 trans = new_crtc_state->sync_mode_slaves_mask;
9523 
9524 			if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
9525 				trans |= BIT(new_crtc_state->master_transcoder);
9526 
9527 			if (intel_cpu_transcoders_need_modeset(state, trans)) {
9528 				new_crtc_state->uapi.mode_changed = true;
9529 				new_crtc_state->update_pipe = false;
9530 			}
9531 		}
9532 
9533 		if (new_crtc_state->bigjoiner) {
9534 			struct intel_crtc_state *linked_crtc_state =
9535 				intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc);
9536 
9537 			if (intel_crtc_needs_modeset(linked_crtc_state)) {
9538 				new_crtc_state->uapi.mode_changed = true;
9539 				new_crtc_state->update_pipe = false;
9540 			}
9541 		}
9542 	}
9543 
9544 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9545 					    new_crtc_state, i) {
9546 		if (intel_crtc_needs_modeset(new_crtc_state)) {
9547 			any_ms = true;
9548 			continue;
9549 		}
9550 
9551 		if (!new_crtc_state->update_pipe)
9552 			continue;
9553 
9554 		intel_crtc_copy_fastset(old_crtc_state, new_crtc_state);
9555 	}
9556 
9557 	if (any_ms && !check_digital_port_conflicts(state)) {
9558 		drm_dbg_kms(&dev_priv->drm,
9559 			    "rejecting conflicting digital port configuration\n");
9560 		ret = -EINVAL;
9561 		goto fail;
9562 	}
9563 
9564 	ret = drm_dp_mst_atomic_check(&state->base);
9565 	if (ret)
9566 		goto fail;
9567 
9568 	ret = intel_atomic_check_planes(state);
9569 	if (ret)
9570 		goto fail;
9571 
9572 	intel_fbc_choose_crtc(dev_priv, state);
9573 	ret = calc_watermark_data(state);
9574 	if (ret)
9575 		goto fail;
9576 
9577 	ret = intel_bw_atomic_check(state);
9578 	if (ret)
9579 		goto fail;
9580 
9581 	ret = intel_atomic_check_cdclk(state, &any_ms);
9582 	if (ret)
9583 		goto fail;
9584 
9585 	if (any_ms) {
9586 		ret = intel_modeset_checks(state);
9587 		if (ret)
9588 			goto fail;
9589 
9590 		ret = intel_modeset_calc_cdclk(state);
9591 		if (ret)
9592 			return ret;
9593 
9594 		intel_modeset_clear_plls(state);
9595 	}
9596 
9597 	ret = intel_atomic_check_crtcs(state);
9598 	if (ret)
9599 		goto fail;
9600 
9601 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9602 					    new_crtc_state, i) {
9603 		if (new_crtc_state->uapi.async_flip) {
9604 			ret = intel_atomic_check_async(state);
9605 			if (ret)
9606 				goto fail;
9607 		}
9608 
9609 		if (!intel_crtc_needs_modeset(new_crtc_state) &&
9610 		    !new_crtc_state->update_pipe)
9611 			continue;
9612 
9613 		intel_dump_pipe_config(new_crtc_state, state,
9614 				       intel_crtc_needs_modeset(new_crtc_state) ?
9615 				       "[modeset]" : "[fastset]");
9616 	}
9617 
9618 	return 0;
9619 
9620  fail:
9621 	if (ret == -EDEADLK)
9622 		return ret;
9623 
9624 	/*
9625 	 * FIXME would probably be nice to know which crtc specifically
9626 	 * caused the failure, in cases where we can pinpoint it.
9627 	 */
9628 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9629 					    new_crtc_state, i)
9630 		intel_dump_pipe_config(new_crtc_state, state, "[failed]");
9631 
9632 	return ret;
9633 }
9634 
intel_atomic_prepare_commit(struct intel_atomic_state * state)9635 static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
9636 {
9637 	struct intel_crtc_state *crtc_state;
9638 	struct intel_crtc *crtc;
9639 	int i, ret;
9640 
9641 	ret = drm_atomic_helper_prepare_planes(state->base.dev, &state->base);
9642 	if (ret < 0)
9643 		return ret;
9644 
9645 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
9646 		bool mode_changed = intel_crtc_needs_modeset(crtc_state);
9647 
9648 		if (mode_changed || crtc_state->update_pipe ||
9649 		    crtc_state->uapi.color_mgmt_changed) {
9650 			intel_dsb_prepare(crtc_state);
9651 		}
9652 	}
9653 
9654 	return 0;
9655 }
9656 
intel_crtc_arm_fifo_underrun(struct intel_crtc * crtc,struct intel_crtc_state * crtc_state)9657 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
9658 				  struct intel_crtc_state *crtc_state)
9659 {
9660 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9661 
9662 	if (!IS_DISPLAY_VER(dev_priv, 2) || crtc_state->active_planes)
9663 		intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
9664 
9665 	if (crtc_state->has_pch_encoder) {
9666 		enum pipe pch_transcoder =
9667 			intel_crtc_pch_transcoder(crtc);
9668 
9669 		intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
9670 	}
9671 }
9672 
intel_pipe_fastset(const struct intel_crtc_state * old_crtc_state,const struct intel_crtc_state * new_crtc_state)9673 static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
9674 			       const struct intel_crtc_state *new_crtc_state)
9675 {
9676 	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
9677 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9678 
9679 	/*
9680 	 * Update pipe size and adjust fitter if needed: the reason for this is
9681 	 * that in compute_mode_changes we check the native mode (not the pfit
9682 	 * mode) to see if we can flip rather than do a full mode set. In the
9683 	 * fastboot case, we'll flip, but if we don't update the pipesrc and
9684 	 * pfit state, we'll end up with a big fb scanned out into the wrong
9685 	 * sized surface.
9686 	 */
9687 	intel_set_pipe_src_size(new_crtc_state);
9688 
9689 	/* on skylake this is done by detaching scalers */
9690 	if (DISPLAY_VER(dev_priv) >= 9) {
9691 		skl_detach_scalers(new_crtc_state);
9692 
9693 		if (new_crtc_state->pch_pfit.enabled)
9694 			skl_pfit_enable(new_crtc_state);
9695 	} else if (HAS_PCH_SPLIT(dev_priv)) {
9696 		if (new_crtc_state->pch_pfit.enabled)
9697 			ilk_pfit_enable(new_crtc_state);
9698 		else if (old_crtc_state->pch_pfit.enabled)
9699 			ilk_pfit_disable(old_crtc_state);
9700 	}
9701 
9702 	/*
9703 	 * The register is supposedly single buffered so perhaps
9704 	 * not 100% correct to do this here. But SKL+ calculate
9705 	 * this based on the adjust pixel rate so pfit changes do
9706 	 * affect it and so it must be updated for fastsets.
9707 	 * HSW/BDW only really need this here for fastboot, after
9708 	 * that the value should not change without a full modeset.
9709 	 */
9710 	if (DISPLAY_VER(dev_priv) >= 9 ||
9711 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
9712 		hsw_set_linetime_wm(new_crtc_state);
9713 
9714 	if (DISPLAY_VER(dev_priv) >= 11)
9715 		icl_set_pipe_chicken(crtc);
9716 }
9717 
commit_pipe_config(struct intel_atomic_state * state,struct intel_crtc * crtc)9718 static void commit_pipe_config(struct intel_atomic_state *state,
9719 			       struct intel_crtc *crtc)
9720 {
9721 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9722 	const struct intel_crtc_state *old_crtc_state =
9723 		intel_atomic_get_old_crtc_state(state, crtc);
9724 	const struct intel_crtc_state *new_crtc_state =
9725 		intel_atomic_get_new_crtc_state(state, crtc);
9726 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9727 
9728 	/*
9729 	 * During modesets pipe configuration was programmed as the
9730 	 * CRTC was enabled.
9731 	 */
9732 	if (!modeset) {
9733 		if (new_crtc_state->uapi.color_mgmt_changed ||
9734 		    new_crtc_state->update_pipe)
9735 			intel_color_commit(new_crtc_state);
9736 
9737 		if (DISPLAY_VER(dev_priv) >= 9)
9738 			skl_detach_scalers(new_crtc_state);
9739 
9740 		if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
9741 			bdw_set_pipemisc(new_crtc_state);
9742 
9743 		if (new_crtc_state->update_pipe)
9744 			intel_pipe_fastset(old_crtc_state, new_crtc_state);
9745 
9746 		intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
9747 	}
9748 
9749 	if (dev_priv->display.atomic_update_watermarks)
9750 		dev_priv->display.atomic_update_watermarks(state, crtc);
9751 }
9752 
intel_enable_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)9753 static void intel_enable_crtc(struct intel_atomic_state *state,
9754 			      struct intel_crtc *crtc)
9755 {
9756 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9757 	const struct intel_crtc_state *new_crtc_state =
9758 		intel_atomic_get_new_crtc_state(state, crtc);
9759 
9760 	if (!intel_crtc_needs_modeset(new_crtc_state))
9761 		return;
9762 
9763 	intel_crtc_update_active_timings(new_crtc_state);
9764 
9765 	dev_priv->display.crtc_enable(state, crtc);
9766 
9767 	if (new_crtc_state->bigjoiner_slave)
9768 		return;
9769 
9770 	/* vblanks work again, re-enable pipe CRC. */
9771 	intel_crtc_enable_pipe_crc(crtc);
9772 }
9773 
intel_update_crtc(struct intel_atomic_state * state,struct intel_crtc * crtc)9774 static void intel_update_crtc(struct intel_atomic_state *state,
9775 			      struct intel_crtc *crtc)
9776 {
9777 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9778 	const struct intel_crtc_state *old_crtc_state =
9779 		intel_atomic_get_old_crtc_state(state, crtc);
9780 	struct intel_crtc_state *new_crtc_state =
9781 		intel_atomic_get_new_crtc_state(state, crtc);
9782 	bool modeset = intel_crtc_needs_modeset(new_crtc_state);
9783 
9784 	if (!modeset) {
9785 		if (new_crtc_state->preload_luts &&
9786 		    (new_crtc_state->uapi.color_mgmt_changed ||
9787 		     new_crtc_state->update_pipe))
9788 			intel_color_load_luts(new_crtc_state);
9789 
9790 		intel_pre_plane_update(state, crtc);
9791 
9792 		if (new_crtc_state->update_pipe)
9793 			intel_encoders_update_pipe(state, crtc);
9794 	}
9795 
9796 	if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
9797 		intel_fbc_disable(crtc);
9798 	else
9799 		intel_fbc_enable(state, crtc);
9800 
9801 	/* Perform vblank evasion around commit operation */
9802 	intel_pipe_update_start(new_crtc_state);
9803 
9804 	commit_pipe_config(state, crtc);
9805 
9806 	if (DISPLAY_VER(dev_priv) >= 9)
9807 		skl_update_planes_on_crtc(state, crtc);
9808 	else
9809 		i9xx_update_planes_on_crtc(state, crtc);
9810 
9811 	intel_pipe_update_end(new_crtc_state);
9812 
9813 	/*
9814 	 * We usually enable FIFO underrun interrupts as part of the
9815 	 * CRTC enable sequence during modesets.  But when we inherit a
9816 	 * valid pipe configuration from the BIOS we need to take care
9817 	 * of enabling them on the CRTC's first fastset.
9818 	 */
9819 	if (new_crtc_state->update_pipe && !modeset &&
9820 	    old_crtc_state->inherited)
9821 		intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
9822 }
9823 
intel_old_crtc_state_disables(struct intel_atomic_state * state,struct intel_crtc_state * old_crtc_state,struct intel_crtc_state * new_crtc_state,struct intel_crtc * crtc)9824 static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
9825 					  struct intel_crtc_state *old_crtc_state,
9826 					  struct intel_crtc_state *new_crtc_state,
9827 					  struct intel_crtc *crtc)
9828 {
9829 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9830 
9831 	drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
9832 
9833 	intel_crtc_disable_planes(state, crtc);
9834 
9835 	/*
9836 	 * We still need special handling for disabling bigjoiner master
9837 	 * and slaves since for slave we do not have encoder or plls
9838 	 * so we dont need to disable those.
9839 	 */
9840 	if (old_crtc_state->bigjoiner) {
9841 		intel_crtc_disable_planes(state,
9842 					  old_crtc_state->bigjoiner_linked_crtc);
9843 		old_crtc_state->bigjoiner_linked_crtc->active = false;
9844 	}
9845 
9846 	/*
9847 	 * We need to disable pipe CRC before disabling the pipe,
9848 	 * or we race against vblank off.
9849 	 */
9850 	intel_crtc_disable_pipe_crc(crtc);
9851 
9852 	dev_priv->display.crtc_disable(state, crtc);
9853 	crtc->active = false;
9854 	intel_fbc_disable(crtc);
9855 	intel_disable_shared_dpll(old_crtc_state);
9856 
9857 	/* FIXME unify this for all platforms */
9858 	if (!new_crtc_state->hw.active &&
9859 	    !HAS_GMCH(dev_priv) &&
9860 	    dev_priv->display.initial_watermarks)
9861 		dev_priv->display.initial_watermarks(state, crtc);
9862 }
9863 
intel_commit_modeset_disables(struct intel_atomic_state * state)9864 static void intel_commit_modeset_disables(struct intel_atomic_state *state)
9865 {
9866 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
9867 	struct intel_crtc *crtc;
9868 	u32 handled = 0;
9869 	int i;
9870 
9871 	/* Only disable port sync and MST slaves */
9872 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9873 					    new_crtc_state, i) {
9874 		if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
9875 			continue;
9876 
9877 		if (!old_crtc_state->hw.active)
9878 			continue;
9879 
9880 		/* In case of Transcoder port Sync master slave CRTCs can be
9881 		 * assigned in any order and we need to make sure that
9882 		 * slave CRTCs are disabled first and then master CRTC since
9883 		 * Slave vblanks are masked till Master Vblanks.
9884 		 */
9885 		if (!is_trans_port_sync_slave(old_crtc_state) &&
9886 		    !intel_dp_mst_is_slave_trans(old_crtc_state))
9887 			continue;
9888 
9889 		intel_pre_plane_update(state, crtc);
9890 		intel_old_crtc_state_disables(state, old_crtc_state,
9891 					      new_crtc_state, crtc);
9892 		handled |= BIT(crtc->pipe);
9893 	}
9894 
9895 	/* Disable everything else left on */
9896 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9897 					    new_crtc_state, i) {
9898 		if (!intel_crtc_needs_modeset(new_crtc_state) ||
9899 		    (handled & BIT(crtc->pipe)) ||
9900 		    old_crtc_state->bigjoiner_slave)
9901 			continue;
9902 
9903 		intel_pre_plane_update(state, crtc);
9904 		if (old_crtc_state->bigjoiner) {
9905 			struct intel_crtc *slave =
9906 				old_crtc_state->bigjoiner_linked_crtc;
9907 
9908 			intel_pre_plane_update(state, slave);
9909 		}
9910 
9911 		if (old_crtc_state->hw.active)
9912 			intel_old_crtc_state_disables(state, old_crtc_state,
9913 						      new_crtc_state, crtc);
9914 	}
9915 }
9916 
intel_commit_modeset_enables(struct intel_atomic_state * state)9917 static void intel_commit_modeset_enables(struct intel_atomic_state *state)
9918 {
9919 	struct intel_crtc_state *new_crtc_state;
9920 	struct intel_crtc *crtc;
9921 	int i;
9922 
9923 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
9924 		if (!new_crtc_state->hw.active)
9925 			continue;
9926 
9927 		intel_enable_crtc(state, crtc);
9928 		intel_update_crtc(state, crtc);
9929 	}
9930 }
9931 
skl_commit_modeset_enables(struct intel_atomic_state * state)9932 static void skl_commit_modeset_enables(struct intel_atomic_state *state)
9933 {
9934 	struct drm_i915_private *dev_priv = to_i915(state->base.dev);
9935 	struct intel_crtc *crtc;
9936 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
9937 	struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
9938 	u8 update_pipes = 0, modeset_pipes = 0;
9939 	int i;
9940 
9941 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9942 		enum pipe pipe = crtc->pipe;
9943 
9944 		if (!new_crtc_state->hw.active)
9945 			continue;
9946 
9947 		/* ignore allocations for crtc's that have been turned off. */
9948 		if (!intel_crtc_needs_modeset(new_crtc_state)) {
9949 			entries[pipe] = old_crtc_state->wm.skl.ddb;
9950 			update_pipes |= BIT(pipe);
9951 		} else {
9952 			modeset_pipes |= BIT(pipe);
9953 		}
9954 	}
9955 
9956 	/*
9957 	 * Whenever the number of active pipes changes, we need to make sure we
9958 	 * update the pipes in the right order so that their ddb allocations
9959 	 * never overlap with each other between CRTC updates. Otherwise we'll
9960 	 * cause pipe underruns and other bad stuff.
9961 	 *
9962 	 * So first lets enable all pipes that do not need a fullmodeset as
9963 	 * those don't have any external dependency.
9964 	 */
9965 	while (update_pipes) {
9966 		for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
9967 						    new_crtc_state, i) {
9968 			enum pipe pipe = crtc->pipe;
9969 
9970 			if ((update_pipes & BIT(pipe)) == 0)
9971 				continue;
9972 
9973 			if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
9974 							entries, I915_MAX_PIPES, pipe))
9975 				continue;
9976 
9977 			entries[pipe] = new_crtc_state->wm.skl.ddb;
9978 			update_pipes &= ~BIT(pipe);
9979 
9980 			intel_update_crtc(state, crtc);
9981 
9982 			/*
9983 			 * If this is an already active pipe, it's DDB changed,
9984 			 * and this isn't the last pipe that needs updating
9985 			 * then we need to wait for a vblank to pass for the
9986 			 * new ddb allocation to take effect.
9987 			 */
9988 			if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
9989 						 &old_crtc_state->wm.skl.ddb) &&
9990 			    (update_pipes | modeset_pipes))
9991 				intel_wait_for_vblank(dev_priv, pipe);
9992 		}
9993 	}
9994 
9995 	update_pipes = modeset_pipes;
9996 
9997 	/*
9998 	 * Enable all pipes that needs a modeset and do not depends on other
9999 	 * pipes
10000 	 */
10001 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10002 		enum pipe pipe = crtc->pipe;
10003 
10004 		if ((modeset_pipes & BIT(pipe)) == 0)
10005 			continue;
10006 
10007 		if (intel_dp_mst_is_slave_trans(new_crtc_state) ||
10008 		    is_trans_port_sync_master(new_crtc_state) ||
10009 		    (new_crtc_state->bigjoiner && !new_crtc_state->bigjoiner_slave))
10010 			continue;
10011 
10012 		modeset_pipes &= ~BIT(pipe);
10013 
10014 		intel_enable_crtc(state, crtc);
10015 	}
10016 
10017 	/*
10018 	 * Then we enable all remaining pipes that depend on other
10019 	 * pipes: MST slaves and port sync masters, big joiner master
10020 	 */
10021 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10022 		enum pipe pipe = crtc->pipe;
10023 
10024 		if ((modeset_pipes & BIT(pipe)) == 0)
10025 			continue;
10026 
10027 		modeset_pipes &= ~BIT(pipe);
10028 
10029 		intel_enable_crtc(state, crtc);
10030 	}
10031 
10032 	/*
10033 	 * Finally we do the plane updates/etc. for all pipes that got enabled.
10034 	 */
10035 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10036 		enum pipe pipe = crtc->pipe;
10037 
10038 		if ((update_pipes & BIT(pipe)) == 0)
10039 			continue;
10040 
10041 		drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
10042 									entries, I915_MAX_PIPES, pipe));
10043 
10044 		entries[pipe] = new_crtc_state->wm.skl.ddb;
10045 		update_pipes &= ~BIT(pipe);
10046 
10047 		intel_update_crtc(state, crtc);
10048 	}
10049 
10050 	drm_WARN_ON(&dev_priv->drm, modeset_pipes);
10051 	drm_WARN_ON(&dev_priv->drm, update_pipes);
10052 }
10053 
intel_atomic_helper_free_state(struct drm_i915_private * dev_priv)10054 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
10055 {
10056 	struct intel_atomic_state *state, *next;
10057 	struct llist_node *freed;
10058 
10059 	freed = llist_del_all(&dev_priv->atomic_helper.free_list);
10060 	llist_for_each_entry_safe(state, next, freed, freed)
10061 		drm_atomic_state_put(&state->base);
10062 }
10063 
intel_atomic_helper_free_state_worker(struct work_struct * work)10064 static void intel_atomic_helper_free_state_worker(struct work_struct *work)
10065 {
10066 	struct drm_i915_private *dev_priv =
10067 		container_of(work, typeof(*dev_priv), atomic_helper.free_work);
10068 
10069 	intel_atomic_helper_free_state(dev_priv);
10070 }
10071 
intel_atomic_commit_fence_wait(struct intel_atomic_state * intel_state)10072 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
10073 {
10074 	struct wait_queue_entry wait_fence, wait_reset;
10075 	struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
10076 
10077 	init_wait_entry(&wait_fence, 0);
10078 	init_wait_entry(&wait_reset, 0);
10079 	for (;;) {
10080 		prepare_to_wait(&intel_state->commit_ready.wait,
10081 				&wait_fence, TASK_UNINTERRUPTIBLE);
10082 		prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10083 					      I915_RESET_MODESET),
10084 				&wait_reset, TASK_UNINTERRUPTIBLE);
10085 
10086 
10087 		if (i915_sw_fence_done(&intel_state->commit_ready) ||
10088 		    test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
10089 			break;
10090 
10091 		schedule();
10092 	}
10093 	finish_wait(&intel_state->commit_ready.wait, &wait_fence);
10094 	finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
10095 				  I915_RESET_MODESET),
10096 		    &wait_reset);
10097 }
10098 
intel_cleanup_dsbs(struct intel_atomic_state * state)10099 static void intel_cleanup_dsbs(struct intel_atomic_state *state)
10100 {
10101 	struct intel_crtc_state *old_crtc_state, *new_crtc_state;
10102 	struct intel_crtc *crtc;
10103 	int i;
10104 
10105 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10106 					    new_crtc_state, i)
10107 		intel_dsb_cleanup(old_crtc_state);
10108 }
10109 
intel_atomic_cleanup_work(struct work_struct * work)10110 static void intel_atomic_cleanup_work(struct work_struct *work)
10111 {
10112 	struct intel_atomic_state *state =
10113 		container_of(work, struct intel_atomic_state, base.commit_work);
10114 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10115 
10116 	intel_cleanup_dsbs(state);
10117 	drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
10118 	drm_atomic_helper_commit_cleanup_done(&state->base);
10119 	drm_atomic_state_put(&state->base);
10120 
10121 	intel_atomic_helper_free_state(i915);
10122 }
10123 
intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state * state)10124 static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
10125 {
10126 	struct drm_i915_private *i915 = to_i915(state->base.dev);
10127 	struct intel_plane *plane;
10128 	struct intel_plane_state *plane_state;
10129 	int i;
10130 
10131 	for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
10132 		struct drm_framebuffer *fb = plane_state->hw.fb;
10133 		int ret;
10134 
10135 		if (!fb ||
10136 		    fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
10137 			continue;
10138 
10139 		/*
10140 		 * The layout of the fast clear color value expected by HW
10141 		 * (the DRM ABI requiring this value to be located in fb at offset 0 of plane#2):
10142 		 * - 4 x 4 bytes per-channel value
10143 		 *   (in surface type specific float/int format provided by the fb user)
10144 		 * - 8 bytes native color value used by the display
10145 		 *   (converted/written by GPU during a fast clear operation using the
10146 		 *    above per-channel values)
10147 		 *
10148 		 * The commit's FB prepare hook already ensured that FB obj is pinned and the
10149 		 * caller made sure that the object is synced wrt. the related color clear value
10150 		 * GPU write on it.
10151 		 */
10152 		ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
10153 						     fb->offsets[2] + 16,
10154 						     &plane_state->ccval,
10155 						     sizeof(plane_state->ccval));
10156 		/* The above could only fail if the FB obj has an unexpected backing store type. */
10157 		drm_WARN_ON(&i915->drm, ret);
10158 	}
10159 }
10160 
intel_atomic_commit_tail(struct intel_atomic_state * state)10161 static void intel_atomic_commit_tail(struct intel_atomic_state *state)
10162 {
10163 	struct drm_device *dev = state->base.dev;
10164 	struct drm_i915_private *dev_priv = to_i915(dev);
10165 	struct intel_crtc_state *new_crtc_state, *old_crtc_state;
10166 	struct intel_crtc *crtc;
10167 	u64 put_domains[I915_MAX_PIPES] = {};
10168 	intel_wakeref_t wakeref = 0;
10169 	int i;
10170 
10171 	intel_atomic_commit_fence_wait(state);
10172 
10173 	drm_atomic_helper_wait_for_dependencies(&state->base);
10174 
10175 	if (state->modeset)
10176 		wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
10177 
10178 	intel_atomic_prepare_plane_clear_colors(state);
10179 
10180 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10181 					    new_crtc_state, i) {
10182 		if (intel_crtc_needs_modeset(new_crtc_state) ||
10183 		    new_crtc_state->update_pipe) {
10184 
10185 			put_domains[crtc->pipe] =
10186 				modeset_get_crtc_power_domains(new_crtc_state);
10187 		}
10188 	}
10189 
10190 	intel_commit_modeset_disables(state);
10191 
10192 	/* FIXME: Eventually get rid of our crtc->config pointer */
10193 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10194 		crtc->config = new_crtc_state;
10195 
10196 	if (state->modeset) {
10197 		drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
10198 
10199 		intel_set_cdclk_pre_plane_update(state);
10200 
10201 		intel_modeset_verify_disabled(dev_priv, state);
10202 	}
10203 
10204 	intel_sagv_pre_plane_update(state);
10205 
10206 	/* Complete the events for pipes that have now been disabled */
10207 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10208 		bool modeset = intel_crtc_needs_modeset(new_crtc_state);
10209 
10210 		/* Complete events for now disable pipes here. */
10211 		if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) {
10212 			spin_lock_irq(&dev->event_lock);
10213 			drm_crtc_send_vblank_event(&crtc->base,
10214 						   new_crtc_state->uapi.event);
10215 			spin_unlock_irq(&dev->event_lock);
10216 
10217 			new_crtc_state->uapi.event = NULL;
10218 		}
10219 	}
10220 
10221 	if (state->modeset)
10222 		intel_encoders_update_prepare(state);
10223 
10224 	intel_dbuf_pre_plane_update(state);
10225 
10226 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10227 		if (new_crtc_state->uapi.async_flip)
10228 			intel_crtc_enable_flip_done(state, crtc);
10229 	}
10230 
10231 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
10232 	dev_priv->display.commit_modeset_enables(state);
10233 
10234 	if (state->modeset) {
10235 		intel_encoders_update_complete(state);
10236 
10237 		intel_set_cdclk_post_plane_update(state);
10238 	}
10239 
10240 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
10241 	 * already, but still need the state for the delayed optimization. To
10242 	 * fix this:
10243 	 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
10244 	 * - schedule that vblank worker _before_ calling hw_done
10245 	 * - at the start of commit_tail, cancel it _synchrously
10246 	 * - switch over to the vblank wait helper in the core after that since
10247 	 *   we don't need out special handling any more.
10248 	 */
10249 	drm_atomic_helper_wait_for_flip_done(dev, &state->base);
10250 
10251 	for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
10252 		if (new_crtc_state->uapi.async_flip)
10253 			intel_crtc_disable_flip_done(state, crtc);
10254 
10255 		if (new_crtc_state->hw.active &&
10256 		    !intel_crtc_needs_modeset(new_crtc_state) &&
10257 		    !new_crtc_state->preload_luts &&
10258 		    (new_crtc_state->uapi.color_mgmt_changed ||
10259 		     new_crtc_state->update_pipe))
10260 			intel_color_load_luts(new_crtc_state);
10261 	}
10262 
10263 	/*
10264 	 * Now that the vblank has passed, we can go ahead and program the
10265 	 * optimal watermarks on platforms that need two-step watermark
10266 	 * programming.
10267 	 *
10268 	 * TODO: Move this (and other cleanup) to an async worker eventually.
10269 	 */
10270 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
10271 					    new_crtc_state, i) {
10272 		/*
10273 		 * Gen2 reports pipe underruns whenever all planes are disabled.
10274 		 * So re-enable underrun reporting after some planes get enabled.
10275 		 *
10276 		 * We do this before .optimize_watermarks() so that we have a
10277 		 * chance of catching underruns with the intermediate watermarks
10278 		 * vs. the new plane configuration.
10279 		 */
10280 		if (IS_DISPLAY_VER(dev_priv, 2) && planes_enabling(old_crtc_state, new_crtc_state))
10281 			intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
10282 
10283 		if (dev_priv->display.optimize_watermarks)
10284 			dev_priv->display.optimize_watermarks(state, crtc);
10285 	}
10286 
10287 	intel_dbuf_post_plane_update(state);
10288 
10289 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10290 		intel_post_plane_update(state, crtc);
10291 
10292 		modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
10293 
10294 		intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
10295 
10296 		/*
10297 		 * DSB cleanup is done in cleanup_work aligning with framebuffer
10298 		 * cleanup. So copy and reset the dsb structure to sync with
10299 		 * commit_done and later do dsb cleanup in cleanup_work.
10300 		 */
10301 		old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
10302 	}
10303 
10304 	/* Underruns don't always raise interrupts, so check manually */
10305 	intel_check_cpu_fifo_underruns(dev_priv);
10306 	intel_check_pch_fifo_underruns(dev_priv);
10307 
10308 	if (state->modeset)
10309 		intel_verify_planes(state);
10310 
10311 	intel_sagv_post_plane_update(state);
10312 
10313 	drm_atomic_helper_commit_hw_done(&state->base);
10314 
10315 	if (state->modeset) {
10316 		/* As one of the primary mmio accessors, KMS has a high
10317 		 * likelihood of triggering bugs in unclaimed access. After we
10318 		 * finish modesetting, see if an error has been flagged, and if
10319 		 * so enable debugging for the next modeset - and hope we catch
10320 		 * the culprit.
10321 		 */
10322 		intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
10323 		intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
10324 	}
10325 	intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10326 
10327 	/*
10328 	 * Defer the cleanup of the old state to a separate worker to not
10329 	 * impede the current task (userspace for blocking modesets) that
10330 	 * are executed inline. For out-of-line asynchronous modesets/flips,
10331 	 * deferring to a new worker seems overkill, but we would place a
10332 	 * schedule point (cond_resched()) here anyway to keep latencies
10333 	 * down.
10334 	 */
10335 	INIT_WORK(&state->base.commit_work, intel_atomic_cleanup_work);
10336 	queue_work(system_highpri_wq, &state->base.commit_work);
10337 }
10338 
intel_atomic_commit_work(struct work_struct * work)10339 static void intel_atomic_commit_work(struct work_struct *work)
10340 {
10341 	struct intel_atomic_state *state =
10342 		container_of(work, struct intel_atomic_state, base.commit_work);
10343 
10344 	intel_atomic_commit_tail(state);
10345 }
10346 
10347 static int __i915_sw_fence_call
intel_atomic_commit_ready(struct i915_sw_fence * fence,enum i915_sw_fence_notify notify)10348 intel_atomic_commit_ready(struct i915_sw_fence *fence,
10349 			  enum i915_sw_fence_notify notify)
10350 {
10351 	struct intel_atomic_state *state =
10352 		container_of(fence, struct intel_atomic_state, commit_ready);
10353 
10354 	switch (notify) {
10355 	case FENCE_COMPLETE:
10356 		/* we do blocking waits in the worker, nothing to do here */
10357 		break;
10358 	case FENCE_FREE:
10359 		{
10360 			struct intel_atomic_helper *helper =
10361 				&to_i915(state->base.dev)->atomic_helper;
10362 
10363 			if (llist_add(&state->freed, &helper->free_list))
10364 				schedule_work(&helper->free_work);
10365 			break;
10366 		}
10367 	}
10368 
10369 	return NOTIFY_DONE;
10370 }
10371 
intel_atomic_track_fbs(struct intel_atomic_state * state)10372 static void intel_atomic_track_fbs(struct intel_atomic_state *state)
10373 {
10374 	struct intel_plane_state *old_plane_state, *new_plane_state;
10375 	struct intel_plane *plane;
10376 	int i;
10377 
10378 	for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
10379 					     new_plane_state, i)
10380 		intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
10381 					to_intel_frontbuffer(new_plane_state->hw.fb),
10382 					plane->frontbuffer_bit);
10383 }
10384 
intel_atomic_commit(struct drm_device * dev,struct drm_atomic_state * _state,bool nonblock)10385 static int intel_atomic_commit(struct drm_device *dev,
10386 			       struct drm_atomic_state *_state,
10387 			       bool nonblock)
10388 {
10389 	struct intel_atomic_state *state = to_intel_atomic_state(_state);
10390 	struct drm_i915_private *dev_priv = to_i915(dev);
10391 	int ret = 0;
10392 
10393 	state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
10394 
10395 	drm_atomic_state_get(&state->base);
10396 	i915_sw_fence_init(&state->commit_ready,
10397 			   intel_atomic_commit_ready);
10398 
10399 	/*
10400 	 * The intel_legacy_cursor_update() fast path takes care
10401 	 * of avoiding the vblank waits for simple cursor
10402 	 * movement and flips. For cursor on/off and size changes,
10403 	 * we want to perform the vblank waits so that watermark
10404 	 * updates happen during the correct frames. Gen9+ have
10405 	 * double buffered watermarks and so shouldn't need this.
10406 	 *
10407 	 * Unset state->legacy_cursor_update before the call to
10408 	 * drm_atomic_helper_setup_commit() because otherwise
10409 	 * drm_atomic_helper_wait_for_flip_done() is a noop and
10410 	 * we get FIFO underruns because we didn't wait
10411 	 * for vblank.
10412 	 *
10413 	 * FIXME doing watermarks and fb cleanup from a vblank worker
10414 	 * (assuming we had any) would solve these problems.
10415 	 */
10416 	if (DISPLAY_VER(dev_priv) < 9 && state->base.legacy_cursor_update) {
10417 		struct intel_crtc_state *new_crtc_state;
10418 		struct intel_crtc *crtc;
10419 		int i;
10420 
10421 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10422 			if (new_crtc_state->wm.need_postvbl_update ||
10423 			    new_crtc_state->update_wm_post)
10424 				state->base.legacy_cursor_update = false;
10425 	}
10426 
10427 	ret = intel_atomic_prepare_commit(state);
10428 	if (ret) {
10429 		drm_dbg_atomic(&dev_priv->drm,
10430 			       "Preparing state failed with %i\n", ret);
10431 		i915_sw_fence_commit(&state->commit_ready);
10432 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10433 		return ret;
10434 	}
10435 
10436 	ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
10437 	if (!ret)
10438 		ret = drm_atomic_helper_swap_state(&state->base, true);
10439 	if (!ret)
10440 		intel_atomic_swap_global_state(state);
10441 
10442 	if (ret) {
10443 		struct intel_crtc_state *new_crtc_state;
10444 		struct intel_crtc *crtc;
10445 		int i;
10446 
10447 		i915_sw_fence_commit(&state->commit_ready);
10448 
10449 		for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
10450 			intel_dsb_cleanup(new_crtc_state);
10451 
10452 		drm_atomic_helper_cleanup_planes(dev, &state->base);
10453 		intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
10454 		return ret;
10455 	}
10456 	intel_shared_dpll_swap_state(state);
10457 	intel_atomic_track_fbs(state);
10458 
10459 	drm_atomic_state_get(&state->base);
10460 	INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
10461 
10462 	i915_sw_fence_commit(&state->commit_ready);
10463 	if (nonblock && state->modeset) {
10464 		queue_work(dev_priv->modeset_wq, &state->base.commit_work);
10465 	} else if (nonblock) {
10466 		queue_work(dev_priv->flip_wq, &state->base.commit_work);
10467 	} else {
10468 		if (state->modeset)
10469 			flush_workqueue(dev_priv->modeset_wq);
10470 		intel_atomic_commit_tail(state);
10471 	}
10472 
10473 	return 0;
10474 }
10475 
10476 struct wait_rps_boost {
10477 	struct wait_queue_entry wait;
10478 
10479 	struct drm_crtc *crtc;
10480 	struct i915_request *request;
10481 };
10482 
do_rps_boost(struct wait_queue_entry * _wait,unsigned mode,int sync,void * key)10483 static int do_rps_boost(struct wait_queue_entry *_wait,
10484 			unsigned mode, int sync, void *key)
10485 {
10486 	struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
10487 	struct i915_request *rq = wait->request;
10488 
10489 	/*
10490 	 * If we missed the vblank, but the request is already running it
10491 	 * is reasonable to assume that it will complete before the next
10492 	 * vblank without our intervention, so leave RPS alone.
10493 	 */
10494 	if (!i915_request_started(rq))
10495 		intel_rps_boost(rq);
10496 	i915_request_put(rq);
10497 
10498 	drm_crtc_vblank_put(wait->crtc);
10499 
10500 	list_del(&wait->wait.entry);
10501 	kfree(wait);
10502 	return 1;
10503 }
10504 
add_rps_boost_after_vblank(struct drm_crtc * crtc,struct dma_fence * fence)10505 static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
10506 				       struct dma_fence *fence)
10507 {
10508 	struct wait_rps_boost *wait;
10509 
10510 	if (!dma_fence_is_i915(fence))
10511 		return;
10512 
10513 	if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
10514 		return;
10515 
10516 	if (drm_crtc_vblank_get(crtc))
10517 		return;
10518 
10519 	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
10520 	if (!wait) {
10521 		drm_crtc_vblank_put(crtc);
10522 		return;
10523 	}
10524 
10525 	wait->request = to_request(dma_fence_get(fence));
10526 	wait->crtc = crtc;
10527 
10528 	wait->wait.func = do_rps_boost;
10529 	wait->wait.flags = 0;
10530 
10531 	add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
10532 }
10533 
intel_plane_pin_fb(struct intel_plane_state * plane_state)10534 int intel_plane_pin_fb(struct intel_plane_state *plane_state)
10535 {
10536 	struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
10537 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10538 	struct drm_framebuffer *fb = plane_state->hw.fb;
10539 	struct i915_vma *vma;
10540 	bool phys_cursor =
10541 		plane->id == PLANE_CURSOR &&
10542 		INTEL_INFO(dev_priv)->display.cursor_needs_physical;
10543 
10544 	vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
10545 					 &plane_state->view.gtt,
10546 					 intel_plane_uses_fence(plane_state),
10547 					 &plane_state->flags);
10548 	if (IS_ERR(vma))
10549 		return PTR_ERR(vma);
10550 
10551 	plane_state->vma = vma;
10552 
10553 	return 0;
10554 }
10555 
intel_plane_unpin_fb(struct intel_plane_state * old_plane_state)10556 void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
10557 {
10558 	struct i915_vma *vma;
10559 
10560 	vma = fetch_and_zero(&old_plane_state->vma);
10561 	if (vma)
10562 		intel_unpin_fb_vma(vma, old_plane_state->flags);
10563 }
10564 
10565 /**
10566  * intel_prepare_plane_fb - Prepare fb for usage on plane
10567  * @_plane: drm plane to prepare for
10568  * @_new_plane_state: the plane state being prepared
10569  *
10570  * Prepares a framebuffer for usage on a display plane.  Generally this
10571  * involves pinning the underlying object and updating the frontbuffer tracking
10572  * bits.  Some older platforms need special physical address handling for
10573  * cursor planes.
10574  *
10575  * Returns 0 on success, negative error code on failure.
10576  */
10577 int
intel_prepare_plane_fb(struct drm_plane * _plane,struct drm_plane_state * _new_plane_state)10578 intel_prepare_plane_fb(struct drm_plane *_plane,
10579 		       struct drm_plane_state *_new_plane_state)
10580 {
10581 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
10582 	struct intel_plane *plane = to_intel_plane(_plane);
10583 	struct intel_plane_state *new_plane_state =
10584 		to_intel_plane_state(_new_plane_state);
10585 	struct intel_atomic_state *state =
10586 		to_intel_atomic_state(new_plane_state->uapi.state);
10587 	struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10588 	const struct intel_plane_state *old_plane_state =
10589 		intel_atomic_get_old_plane_state(state, plane);
10590 	struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
10591 	struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
10592 	int ret;
10593 
10594 	if (old_obj) {
10595 		const struct intel_crtc_state *crtc_state =
10596 			intel_atomic_get_new_crtc_state(state,
10597 							to_intel_crtc(old_plane_state->hw.crtc));
10598 
10599 		/* Big Hammer, we also need to ensure that any pending
10600 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
10601 		 * current scanout is retired before unpinning the old
10602 		 * framebuffer. Note that we rely on userspace rendering
10603 		 * into the buffer attached to the pipe they are waiting
10604 		 * on. If not, userspace generates a GPU hang with IPEHR
10605 		 * point to the MI_WAIT_FOR_EVENT.
10606 		 *
10607 		 * This should only fail upon a hung GPU, in which case we
10608 		 * can safely continue.
10609 		 */
10610 		if (intel_crtc_needs_modeset(crtc_state)) {
10611 			ret = i915_sw_fence_await_reservation(&state->commit_ready,
10612 							      old_obj->base.resv, NULL,
10613 							      false, 0,
10614 							      GFP_KERNEL);
10615 			if (ret < 0)
10616 				return ret;
10617 		}
10618 	}
10619 
10620 	if (new_plane_state->uapi.fence) { /* explicit fencing */
10621 		i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
10622 					     &attr);
10623 		ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
10624 						    new_plane_state->uapi.fence,
10625 						    i915_fence_timeout(dev_priv),
10626 						    GFP_KERNEL);
10627 		if (ret < 0)
10628 			return ret;
10629 	}
10630 
10631 	if (!obj)
10632 		return 0;
10633 
10634 
10635 	ret = intel_plane_pin_fb(new_plane_state);
10636 	if (ret)
10637 		return ret;
10638 
10639 	i915_gem_object_wait_priority(obj, 0, &attr);
10640 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
10641 
10642 	if (!new_plane_state->uapi.fence) { /* implicit fencing */
10643 		struct dma_fence *fence;
10644 
10645 		ret = i915_sw_fence_await_reservation(&state->commit_ready,
10646 						      obj->base.resv, NULL,
10647 						      false,
10648 						      i915_fence_timeout(dev_priv),
10649 						      GFP_KERNEL);
10650 		if (ret < 0)
10651 			goto unpin_fb;
10652 
10653 		fence = dma_resv_get_excl_rcu(obj->base.resv);
10654 		if (fence) {
10655 			add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10656 						   fence);
10657 			dma_fence_put(fence);
10658 		}
10659 	} else {
10660 		add_rps_boost_after_vblank(new_plane_state->hw.crtc,
10661 					   new_plane_state->uapi.fence);
10662 	}
10663 
10664 	/*
10665 	 * We declare pageflips to be interactive and so merit a small bias
10666 	 * towards upclocking to deliver the frame on time. By only changing
10667 	 * the RPS thresholds to sample more regularly and aim for higher
10668 	 * clocks we can hopefully deliver low power workloads (like kodi)
10669 	 * that are not quite steady state without resorting to forcing
10670 	 * maximum clocks following a vblank miss (see do_rps_boost()).
10671 	 */
10672 	if (!state->rps_interactive) {
10673 		intel_rps_mark_interactive(&dev_priv->gt.rps, true);
10674 		state->rps_interactive = true;
10675 	}
10676 
10677 	return 0;
10678 
10679 unpin_fb:
10680 	intel_plane_unpin_fb(new_plane_state);
10681 
10682 	return ret;
10683 }
10684 
10685 /**
10686  * intel_cleanup_plane_fb - Cleans up an fb after plane use
10687  * @plane: drm plane to clean up for
10688  * @_old_plane_state: the state from the previous modeset
10689  *
10690  * Cleans up a framebuffer that has just been removed from a plane.
10691  */
10692 void
intel_cleanup_plane_fb(struct drm_plane * plane,struct drm_plane_state * _old_plane_state)10693 intel_cleanup_plane_fb(struct drm_plane *plane,
10694 		       struct drm_plane_state *_old_plane_state)
10695 {
10696 	struct intel_plane_state *old_plane_state =
10697 		to_intel_plane_state(_old_plane_state);
10698 	struct intel_atomic_state *state =
10699 		to_intel_atomic_state(old_plane_state->uapi.state);
10700 	struct drm_i915_private *dev_priv = to_i915(plane->dev);
10701 	struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
10702 
10703 	if (!obj)
10704 		return;
10705 
10706 	if (state->rps_interactive) {
10707 		intel_rps_mark_interactive(&dev_priv->gt.rps, false);
10708 		state->rps_interactive = false;
10709 	}
10710 
10711 	/* Should only be called after a successful intel_prepare_plane_fb()! */
10712 	intel_plane_unpin_fb(old_plane_state);
10713 }
10714 
10715 /**
10716  * intel_plane_destroy - destroy a plane
10717  * @plane: plane to destroy
10718  *
10719  * Common destruction function for all types of planes (primary, cursor,
10720  * sprite).
10721  */
intel_plane_destroy(struct drm_plane * plane)10722 void intel_plane_destroy(struct drm_plane *plane)
10723 {
10724 	drm_plane_cleanup(plane);
10725 	kfree(to_intel_plane(plane));
10726 }
10727 
intel_plane_possible_crtcs_init(struct drm_i915_private * dev_priv)10728 static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
10729 {
10730 	struct intel_plane *plane;
10731 
10732 	for_each_intel_plane(&dev_priv->drm, plane) {
10733 		struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
10734 								  plane->pipe);
10735 
10736 		plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
10737 	}
10738 }
10739 
10740 
intel_get_pipe_from_crtc_id_ioctl(struct drm_device * dev,void * data,struct drm_file * file)10741 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
10742 				      struct drm_file *file)
10743 {
10744 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
10745 	struct drm_crtc *drmmode_crtc;
10746 	struct intel_crtc *crtc;
10747 
10748 	drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
10749 	if (!drmmode_crtc)
10750 		return -ENOENT;
10751 
10752 	crtc = to_intel_crtc(drmmode_crtc);
10753 	pipe_from_crtc_id->pipe = crtc->pipe;
10754 
10755 	return 0;
10756 }
10757 
intel_encoder_possible_clones(struct intel_encoder * encoder)10758 static u32 intel_encoder_possible_clones(struct intel_encoder *encoder)
10759 {
10760 	struct drm_device *dev = encoder->base.dev;
10761 	struct intel_encoder *source_encoder;
10762 	u32 possible_clones = 0;
10763 
10764 	for_each_intel_encoder(dev, source_encoder) {
10765 		if (encoders_cloneable(encoder, source_encoder))
10766 			possible_clones |= drm_encoder_mask(&source_encoder->base);
10767 	}
10768 
10769 	return possible_clones;
10770 }
10771 
intel_encoder_possible_crtcs(struct intel_encoder * encoder)10772 static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder)
10773 {
10774 	struct drm_device *dev = encoder->base.dev;
10775 	struct intel_crtc *crtc;
10776 	u32 possible_crtcs = 0;
10777 
10778 	for_each_intel_crtc(dev, crtc) {
10779 		if (encoder->pipe_mask & BIT(crtc->pipe))
10780 			possible_crtcs |= drm_crtc_mask(&crtc->base);
10781 	}
10782 
10783 	return possible_crtcs;
10784 }
10785 
ilk_has_edp_a(struct drm_i915_private * dev_priv)10786 static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
10787 {
10788 	if (!IS_MOBILE(dev_priv))
10789 		return false;
10790 
10791 	if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
10792 		return false;
10793 
10794 	if (IS_IRONLAKE(dev_priv) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
10795 		return false;
10796 
10797 	return true;
10798 }
10799 
intel_ddi_crt_present(struct drm_i915_private * dev_priv)10800 static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
10801 {
10802 	if (DISPLAY_VER(dev_priv) >= 9)
10803 		return false;
10804 
10805 	if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
10806 		return false;
10807 
10808 	if (HAS_PCH_LPT_H(dev_priv) &&
10809 	    intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
10810 		return false;
10811 
10812 	/* DDI E can't be used if DDI A requires 4 lanes */
10813 	if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
10814 		return false;
10815 
10816 	if (!dev_priv->vbt.int_crt_support)
10817 		return false;
10818 
10819 	return true;
10820 }
10821 
intel_setup_outputs(struct drm_i915_private * dev_priv)10822 static void intel_setup_outputs(struct drm_i915_private *dev_priv)
10823 {
10824 	struct intel_encoder *encoder;
10825 	bool dpd_is_edp = false;
10826 
10827 	intel_pps_unlock_regs_wa(dev_priv);
10828 
10829 	if (!HAS_DISPLAY(dev_priv))
10830 		return;
10831 
10832 	if (IS_ALDERLAKE_S(dev_priv)) {
10833 		intel_ddi_init(dev_priv, PORT_A);
10834 		intel_ddi_init(dev_priv, PORT_TC1);
10835 		intel_ddi_init(dev_priv, PORT_TC2);
10836 		intel_ddi_init(dev_priv, PORT_TC3);
10837 		intel_ddi_init(dev_priv, PORT_TC4);
10838 	} else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) {
10839 		intel_ddi_init(dev_priv, PORT_A);
10840 		intel_ddi_init(dev_priv, PORT_B);
10841 		intel_ddi_init(dev_priv, PORT_TC1);
10842 		intel_ddi_init(dev_priv, PORT_TC2);
10843 	} else if (DISPLAY_VER(dev_priv) >= 12) {
10844 		intel_ddi_init(dev_priv, PORT_A);
10845 		intel_ddi_init(dev_priv, PORT_B);
10846 		intel_ddi_init(dev_priv, PORT_TC1);
10847 		intel_ddi_init(dev_priv, PORT_TC2);
10848 		intel_ddi_init(dev_priv, PORT_TC3);
10849 		intel_ddi_init(dev_priv, PORT_TC4);
10850 		intel_ddi_init(dev_priv, PORT_TC5);
10851 		intel_ddi_init(dev_priv, PORT_TC6);
10852 		icl_dsi_init(dev_priv);
10853 	} else if (IS_JSL_EHL(dev_priv)) {
10854 		intel_ddi_init(dev_priv, PORT_A);
10855 		intel_ddi_init(dev_priv, PORT_B);
10856 		intel_ddi_init(dev_priv, PORT_C);
10857 		intel_ddi_init(dev_priv, PORT_D);
10858 		icl_dsi_init(dev_priv);
10859 	} else if (IS_DISPLAY_VER(dev_priv, 11)) {
10860 		intel_ddi_init(dev_priv, PORT_A);
10861 		intel_ddi_init(dev_priv, PORT_B);
10862 		intel_ddi_init(dev_priv, PORT_C);
10863 		intel_ddi_init(dev_priv, PORT_D);
10864 		intel_ddi_init(dev_priv, PORT_E);
10865 		/*
10866 		 * On some ICL SKUs port F is not present. No strap bits for
10867 		 * this, so rely on VBT.
10868 		 * Work around broken VBTs on SKUs known to have no port F.
10869 		 */
10870 		if (IS_ICL_WITH_PORT_F(dev_priv) &&
10871 		    intel_bios_is_port_present(dev_priv, PORT_F))
10872 			intel_ddi_init(dev_priv, PORT_F);
10873 
10874 		icl_dsi_init(dev_priv);
10875 	} else if (IS_GEN9_LP(dev_priv)) {
10876 		/*
10877 		 * FIXME: Broxton doesn't support port detection via the
10878 		 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
10879 		 * detect the ports.
10880 		 */
10881 		intel_ddi_init(dev_priv, PORT_A);
10882 		intel_ddi_init(dev_priv, PORT_B);
10883 		intel_ddi_init(dev_priv, PORT_C);
10884 
10885 		vlv_dsi_init(dev_priv);
10886 	} else if (HAS_DDI(dev_priv)) {
10887 		int found;
10888 
10889 		if (intel_ddi_crt_present(dev_priv))
10890 			intel_crt_init(dev_priv);
10891 
10892 		/*
10893 		 * Haswell uses DDI functions to detect digital outputs.
10894 		 * On SKL pre-D0 the strap isn't connected. Later SKUs may or
10895 		 * may not have it - it was supposed to be fixed by the same
10896 		 * time we stopped using straps. Assume it's there.
10897 		 */
10898 		found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
10899 		/* WaIgnoreDDIAStrap: skl */
10900 		if (found || IS_GEN9_BC(dev_priv))
10901 			intel_ddi_init(dev_priv, PORT_A);
10902 
10903 		/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
10904 		 * register */
10905 		if (HAS_PCH_TGP(dev_priv)) {
10906 			/* W/A due to lack of STRAP config on TGP PCH*/
10907 			found = (SFUSE_STRAP_DDIB_DETECTED |
10908 				 SFUSE_STRAP_DDIC_DETECTED |
10909 				 SFUSE_STRAP_DDID_DETECTED);
10910 		} else {
10911 			found = intel_de_read(dev_priv, SFUSE_STRAP);
10912 		}
10913 
10914 		if (found & SFUSE_STRAP_DDIB_DETECTED)
10915 			intel_ddi_init(dev_priv, PORT_B);
10916 		if (found & SFUSE_STRAP_DDIC_DETECTED)
10917 			intel_ddi_init(dev_priv, PORT_C);
10918 		if (found & SFUSE_STRAP_DDID_DETECTED)
10919 			intel_ddi_init(dev_priv, PORT_D);
10920 		if (found & SFUSE_STRAP_DDIF_DETECTED)
10921 			intel_ddi_init(dev_priv, PORT_F);
10922 		/*
10923 		 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
10924 		 */
10925 		if (IS_GEN9_BC(dev_priv) &&
10926 		    intel_bios_is_port_present(dev_priv, PORT_E))
10927 			intel_ddi_init(dev_priv, PORT_E);
10928 
10929 	} else if (HAS_PCH_SPLIT(dev_priv)) {
10930 		int found;
10931 
10932 		/*
10933 		 * intel_edp_init_connector() depends on this completing first,
10934 		 * to prevent the registration of both eDP and LVDS and the
10935 		 * incorrect sharing of the PPS.
10936 		 */
10937 		intel_lvds_init(dev_priv);
10938 		intel_crt_init(dev_priv);
10939 
10940 		dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
10941 
10942 		if (ilk_has_edp_a(dev_priv))
10943 			g4x_dp_init(dev_priv, DP_A, PORT_A);
10944 
10945 		if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
10946 			/* PCH SDVOB multiplex with HDMIB */
10947 			found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
10948 			if (!found)
10949 				g4x_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
10950 			if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
10951 				g4x_dp_init(dev_priv, PCH_DP_B, PORT_B);
10952 		}
10953 
10954 		if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
10955 			g4x_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
10956 
10957 		if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
10958 			g4x_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
10959 
10960 		if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
10961 			g4x_dp_init(dev_priv, PCH_DP_C, PORT_C);
10962 
10963 		if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
10964 			g4x_dp_init(dev_priv, PCH_DP_D, PORT_D);
10965 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
10966 		bool has_edp, has_port;
10967 
10968 		if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
10969 			intel_crt_init(dev_priv);
10970 
10971 		/*
10972 		 * The DP_DETECTED bit is the latched state of the DDC
10973 		 * SDA pin at boot. However since eDP doesn't require DDC
10974 		 * (no way to plug in a DP->HDMI dongle) the DDC pins for
10975 		 * eDP ports may have been muxed to an alternate function.
10976 		 * Thus we can't rely on the DP_DETECTED bit alone to detect
10977 		 * eDP ports. Consult the VBT as well as DP_DETECTED to
10978 		 * detect eDP ports.
10979 		 *
10980 		 * Sadly the straps seem to be missing sometimes even for HDMI
10981 		 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
10982 		 * and VBT for the presence of the port. Additionally we can't
10983 		 * trust the port type the VBT declares as we've seen at least
10984 		 * HDMI ports that the VBT claim are DP or eDP.
10985 		 */
10986 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
10987 		has_port = intel_bios_is_port_present(dev_priv, PORT_B);
10988 		if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
10989 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_B, PORT_B);
10990 		if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
10991 			g4x_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
10992 
10993 		has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
10994 		has_port = intel_bios_is_port_present(dev_priv, PORT_C);
10995 		if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
10996 			has_edp &= g4x_dp_init(dev_priv, VLV_DP_C, PORT_C);
10997 		if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
10998 			g4x_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
10999 
11000 		if (IS_CHERRYVIEW(dev_priv)) {
11001 			/*
11002 			 * eDP not supported on port D,
11003 			 * so no need to worry about it
11004 			 */
11005 			has_port = intel_bios_is_port_present(dev_priv, PORT_D);
11006 			if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
11007 				g4x_dp_init(dev_priv, CHV_DP_D, PORT_D);
11008 			if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
11009 				g4x_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
11010 		}
11011 
11012 		vlv_dsi_init(dev_priv);
11013 	} else if (IS_PINEVIEW(dev_priv)) {
11014 		intel_lvds_init(dev_priv);
11015 		intel_crt_init(dev_priv);
11016 	} else if (IS_DISPLAY_RANGE(dev_priv, 3, 4)) {
11017 		bool found = false;
11018 
11019 		if (IS_MOBILE(dev_priv))
11020 			intel_lvds_init(dev_priv);
11021 
11022 		intel_crt_init(dev_priv);
11023 
11024 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11025 			drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
11026 			found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
11027 			if (!found && IS_G4X(dev_priv)) {
11028 				drm_dbg_kms(&dev_priv->drm,
11029 					    "probing HDMI on SDVOB\n");
11030 				g4x_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
11031 			}
11032 
11033 			if (!found && IS_G4X(dev_priv))
11034 				g4x_dp_init(dev_priv, DP_B, PORT_B);
11035 		}
11036 
11037 		/* Before G4X SDVOC doesn't have its own detect register */
11038 
11039 		if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
11040 			drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
11041 			found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
11042 		}
11043 
11044 		if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
11045 
11046 			if (IS_G4X(dev_priv)) {
11047 				drm_dbg_kms(&dev_priv->drm,
11048 					    "probing HDMI on SDVOC\n");
11049 				g4x_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
11050 			}
11051 			if (IS_G4X(dev_priv))
11052 				g4x_dp_init(dev_priv, DP_C, PORT_C);
11053 		}
11054 
11055 		if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
11056 			g4x_dp_init(dev_priv, DP_D, PORT_D);
11057 
11058 		if (SUPPORTS_TV(dev_priv))
11059 			intel_tv_init(dev_priv);
11060 	} else if (IS_DISPLAY_VER(dev_priv, 2)) {
11061 		if (IS_I85X(dev_priv))
11062 			intel_lvds_init(dev_priv);
11063 
11064 		intel_crt_init(dev_priv);
11065 		intel_dvo_init(dev_priv);
11066 	}
11067 
11068 	for_each_intel_encoder(&dev_priv->drm, encoder) {
11069 		encoder->base.possible_crtcs =
11070 			intel_encoder_possible_crtcs(encoder);
11071 		encoder->base.possible_clones =
11072 			intel_encoder_possible_clones(encoder);
11073 	}
11074 
11075 	intel_init_pch_refclk(dev_priv);
11076 
11077 	drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
11078 }
11079 
intel_user_framebuffer_destroy(struct drm_framebuffer * fb)11080 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
11081 {
11082 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
11083 
11084 	drm_framebuffer_cleanup(fb);
11085 	intel_frontbuffer_put(intel_fb->frontbuffer);
11086 
11087 	kfree(intel_fb);
11088 }
11089 
intel_user_framebuffer_create_handle(struct drm_framebuffer * fb,struct drm_file * file,unsigned int * handle)11090 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
11091 						struct drm_file *file,
11092 						unsigned int *handle)
11093 {
11094 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11095 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
11096 
11097 	if (i915_gem_object_is_userptr(obj)) {
11098 		drm_dbg(&i915->drm,
11099 			"attempting to use a userptr for a framebuffer, denied\n");
11100 		return -EINVAL;
11101 	}
11102 
11103 	return drm_gem_handle_create(file, &obj->base, handle);
11104 }
11105 
intel_user_framebuffer_dirty(struct drm_framebuffer * fb,struct drm_file * file,unsigned flags,unsigned color,struct drm_clip_rect * clips,unsigned num_clips)11106 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
11107 					struct drm_file *file,
11108 					unsigned flags, unsigned color,
11109 					struct drm_clip_rect *clips,
11110 					unsigned num_clips)
11111 {
11112 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11113 
11114 	i915_gem_object_flush_if_display(obj);
11115 	intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
11116 
11117 	return 0;
11118 }
11119 
11120 static const struct drm_framebuffer_funcs intel_fb_funcs = {
11121 	.destroy = intel_user_framebuffer_destroy,
11122 	.create_handle = intel_user_framebuffer_create_handle,
11123 	.dirty = intel_user_framebuffer_dirty,
11124 };
11125 
intel_framebuffer_init(struct intel_framebuffer * intel_fb,struct drm_i915_gem_object * obj,struct drm_mode_fb_cmd2 * mode_cmd)11126 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
11127 				  struct drm_i915_gem_object *obj,
11128 				  struct drm_mode_fb_cmd2 *mode_cmd)
11129 {
11130 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
11131 	struct drm_framebuffer *fb = &intel_fb->base;
11132 	u32 max_stride;
11133 	unsigned int tiling, stride;
11134 	int ret = -EINVAL;
11135 	int i;
11136 
11137 	intel_fb->frontbuffer = intel_frontbuffer_get(obj);
11138 	if (!intel_fb->frontbuffer)
11139 		return -ENOMEM;
11140 
11141 	i915_gem_object_lock(obj, NULL);
11142 	tiling = i915_gem_object_get_tiling(obj);
11143 	stride = i915_gem_object_get_stride(obj);
11144 	i915_gem_object_unlock(obj);
11145 
11146 	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
11147 		/*
11148 		 * If there's a fence, enforce that
11149 		 * the fb modifier and tiling mode match.
11150 		 */
11151 		if (tiling != I915_TILING_NONE &&
11152 		    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11153 			drm_dbg_kms(&dev_priv->drm,
11154 				    "tiling_mode doesn't match fb modifier\n");
11155 			goto err;
11156 		}
11157 	} else {
11158 		if (tiling == I915_TILING_X) {
11159 			mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
11160 		} else if (tiling == I915_TILING_Y) {
11161 			drm_dbg_kms(&dev_priv->drm,
11162 				    "No Y tiling for legacy addfb\n");
11163 			goto err;
11164 		}
11165 	}
11166 
11167 	if (!drm_any_plane_has_format(&dev_priv->drm,
11168 				      mode_cmd->pixel_format,
11169 				      mode_cmd->modifier[0])) {
11170 		drm_dbg_kms(&dev_priv->drm,
11171 			    "unsupported pixel format %p4cc / modifier 0x%llx\n",
11172 			    &mode_cmd->pixel_format, mode_cmd->modifier[0]);
11173 		goto err;
11174 	}
11175 
11176 	/*
11177 	 * gen2/3 display engine uses the fence if present,
11178 	 * so the tiling mode must match the fb modifier exactly.
11179 	 */
11180 	if (DISPLAY_VER(dev_priv) < 4 &&
11181 	    tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
11182 		drm_dbg_kms(&dev_priv->drm,
11183 			    "tiling_mode must match fb modifier exactly on gen2/3\n");
11184 		goto err;
11185 	}
11186 
11187 	max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
11188 					 mode_cmd->modifier[0]);
11189 	if (mode_cmd->pitches[0] > max_stride) {
11190 		drm_dbg_kms(&dev_priv->drm,
11191 			    "%s pitch (%u) must be at most %d\n",
11192 			    mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
11193 			    "tiled" : "linear",
11194 			    mode_cmd->pitches[0], max_stride);
11195 		goto err;
11196 	}
11197 
11198 	/*
11199 	 * If there's a fence, enforce that
11200 	 * the fb pitch and fence stride match.
11201 	 */
11202 	if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
11203 		drm_dbg_kms(&dev_priv->drm,
11204 			    "pitch (%d) must match tiling stride (%d)\n",
11205 			    mode_cmd->pitches[0], stride);
11206 		goto err;
11207 	}
11208 
11209 	/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
11210 	if (mode_cmd->offsets[0] != 0) {
11211 		drm_dbg_kms(&dev_priv->drm,
11212 			    "plane 0 offset (0x%08x) must be 0\n",
11213 			    mode_cmd->offsets[0]);
11214 		goto err;
11215 	}
11216 
11217 	drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
11218 
11219 	for (i = 0; i < fb->format->num_planes; i++) {
11220 		u32 stride_alignment;
11221 
11222 		if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
11223 			drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
11224 				    i);
11225 			goto err;
11226 		}
11227 
11228 		stride_alignment = intel_fb_stride_alignment(fb, i);
11229 		if (fb->pitches[i] & (stride_alignment - 1)) {
11230 			drm_dbg_kms(&dev_priv->drm,
11231 				    "plane %d pitch (%d) must be at least %u byte aligned\n",
11232 				    i, fb->pitches[i], stride_alignment);
11233 			goto err;
11234 		}
11235 
11236 		if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
11237 			int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
11238 
11239 			if (fb->pitches[i] != ccs_aux_stride) {
11240 				drm_dbg_kms(&dev_priv->drm,
11241 					    "ccs aux plane %d pitch (%d) must be %d\n",
11242 					    i,
11243 					    fb->pitches[i], ccs_aux_stride);
11244 				goto err;
11245 			}
11246 		}
11247 
11248 		fb->obj[i] = &obj->base;
11249 	}
11250 
11251 	ret = intel_fill_fb_info(dev_priv, fb);
11252 	if (ret)
11253 		goto err;
11254 
11255 	ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
11256 	if (ret) {
11257 		drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
11258 		goto err;
11259 	}
11260 
11261 	return 0;
11262 
11263 err:
11264 	intel_frontbuffer_put(intel_fb->frontbuffer);
11265 	return ret;
11266 }
11267 
11268 static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device * dev,struct drm_file * filp,const struct drm_mode_fb_cmd2 * user_mode_cmd)11269 intel_user_framebuffer_create(struct drm_device *dev,
11270 			      struct drm_file *filp,
11271 			      const struct drm_mode_fb_cmd2 *user_mode_cmd)
11272 {
11273 	struct drm_framebuffer *fb;
11274 	struct drm_i915_gem_object *obj;
11275 	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
11276 
11277 	obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
11278 	if (!obj)
11279 		return ERR_PTR(-ENOENT);
11280 
11281 	fb = intel_framebuffer_create(obj, &mode_cmd);
11282 	i915_gem_object_put(obj);
11283 
11284 	return fb;
11285 }
11286 
11287 static enum drm_mode_status
intel_mode_valid(struct drm_device * dev,const struct drm_display_mode * mode)11288 intel_mode_valid(struct drm_device *dev,
11289 		 const struct drm_display_mode *mode)
11290 {
11291 	struct drm_i915_private *dev_priv = to_i915(dev);
11292 	int hdisplay_max, htotal_max;
11293 	int vdisplay_max, vtotal_max;
11294 
11295 	/*
11296 	 * Can't reject DBLSCAN here because Xorg ddxen can add piles
11297 	 * of DBLSCAN modes to the output's mode list when they detect
11298 	 * the scaling mode property on the connector. And they don't
11299 	 * ask the kernel to validate those modes in any way until
11300 	 * modeset time at which point the client gets a protocol error.
11301 	 * So in order to not upset those clients we silently ignore the
11302 	 * DBLSCAN flag on such connectors. For other connectors we will
11303 	 * reject modes with the DBLSCAN flag in encoder->compute_config().
11304 	 * And we always reject DBLSCAN modes in connector->mode_valid()
11305 	 * as we never want such modes on the connector's mode list.
11306 	 */
11307 
11308 	if (mode->vscan > 1)
11309 		return MODE_NO_VSCAN;
11310 
11311 	if (mode->flags & DRM_MODE_FLAG_HSKEW)
11312 		return MODE_H_ILLEGAL;
11313 
11314 	if (mode->flags & (DRM_MODE_FLAG_CSYNC |
11315 			   DRM_MODE_FLAG_NCSYNC |
11316 			   DRM_MODE_FLAG_PCSYNC))
11317 		return MODE_HSYNC;
11318 
11319 	if (mode->flags & (DRM_MODE_FLAG_BCAST |
11320 			   DRM_MODE_FLAG_PIXMUX |
11321 			   DRM_MODE_FLAG_CLKDIV2))
11322 		return MODE_BAD;
11323 
11324 	/* Transcoder timing limits */
11325 	if (DISPLAY_VER(dev_priv) >= 11) {
11326 		hdisplay_max = 16384;
11327 		vdisplay_max = 8192;
11328 		htotal_max = 16384;
11329 		vtotal_max = 8192;
11330 	} else if (DISPLAY_VER(dev_priv) >= 9 ||
11331 		   IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
11332 		hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
11333 		vdisplay_max = 4096;
11334 		htotal_max = 8192;
11335 		vtotal_max = 8192;
11336 	} else if (DISPLAY_VER(dev_priv) >= 3) {
11337 		hdisplay_max = 4096;
11338 		vdisplay_max = 4096;
11339 		htotal_max = 8192;
11340 		vtotal_max = 8192;
11341 	} else {
11342 		hdisplay_max = 2048;
11343 		vdisplay_max = 2048;
11344 		htotal_max = 4096;
11345 		vtotal_max = 4096;
11346 	}
11347 
11348 	if (mode->hdisplay > hdisplay_max ||
11349 	    mode->hsync_start > htotal_max ||
11350 	    mode->hsync_end > htotal_max ||
11351 	    mode->htotal > htotal_max)
11352 		return MODE_H_ILLEGAL;
11353 
11354 	if (mode->vdisplay > vdisplay_max ||
11355 	    mode->vsync_start > vtotal_max ||
11356 	    mode->vsync_end > vtotal_max ||
11357 	    mode->vtotal > vtotal_max)
11358 		return MODE_V_ILLEGAL;
11359 
11360 	if (DISPLAY_VER(dev_priv) >= 5) {
11361 		if (mode->hdisplay < 64 ||
11362 		    mode->htotal - mode->hdisplay < 32)
11363 			return MODE_H_ILLEGAL;
11364 
11365 		if (mode->vtotal - mode->vdisplay < 5)
11366 			return MODE_V_ILLEGAL;
11367 	} else {
11368 		if (mode->htotal - mode->hdisplay < 32)
11369 			return MODE_H_ILLEGAL;
11370 
11371 		if (mode->vtotal - mode->vdisplay < 3)
11372 			return MODE_V_ILLEGAL;
11373 	}
11374 
11375 	return MODE_OK;
11376 }
11377 
11378 enum drm_mode_status
intel_mode_valid_max_plane_size(struct drm_i915_private * dev_priv,const struct drm_display_mode * mode,bool bigjoiner)11379 intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv,
11380 				const struct drm_display_mode *mode,
11381 				bool bigjoiner)
11382 {
11383 	int plane_width_max, plane_height_max;
11384 
11385 	/*
11386 	 * intel_mode_valid() should be
11387 	 * sufficient on older platforms.
11388 	 */
11389 	if (DISPLAY_VER(dev_priv) < 9)
11390 		return MODE_OK;
11391 
11392 	/*
11393 	 * Most people will probably want a fullscreen
11394 	 * plane so let's not advertize modes that are
11395 	 * too big for that.
11396 	 */
11397 	if (DISPLAY_VER(dev_priv) >= 11) {
11398 		plane_width_max = 5120 << bigjoiner;
11399 		plane_height_max = 4320;
11400 	} else {
11401 		plane_width_max = 5120;
11402 		plane_height_max = 4096;
11403 	}
11404 
11405 	if (mode->hdisplay > plane_width_max)
11406 		return MODE_H_ILLEGAL;
11407 
11408 	if (mode->vdisplay > plane_height_max)
11409 		return MODE_V_ILLEGAL;
11410 
11411 	return MODE_OK;
11412 }
11413 
11414 static const struct drm_mode_config_funcs intel_mode_funcs = {
11415 	.fb_create = intel_user_framebuffer_create,
11416 	.get_format_info = intel_get_format_info,
11417 	.output_poll_changed = intel_fbdev_output_poll_changed,
11418 	.mode_valid = intel_mode_valid,
11419 	.atomic_check = intel_atomic_check,
11420 	.atomic_commit = intel_atomic_commit,
11421 	.atomic_state_alloc = intel_atomic_state_alloc,
11422 	.atomic_state_clear = intel_atomic_state_clear,
11423 	.atomic_state_free = intel_atomic_state_free,
11424 };
11425 
11426 /**
11427  * intel_init_display_hooks - initialize the display modesetting hooks
11428  * @dev_priv: device private
11429  */
intel_init_display_hooks(struct drm_i915_private * dev_priv)11430 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
11431 {
11432 	intel_init_cdclk_hooks(dev_priv);
11433 	intel_init_audio_hooks(dev_priv);
11434 
11435 	intel_dpll_init_clock_hook(dev_priv);
11436 
11437 	if (DISPLAY_VER(dev_priv) >= 9) {
11438 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11439 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11440 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11441 	} else if (HAS_DDI(dev_priv)) {
11442 		dev_priv->display.get_pipe_config = hsw_get_pipe_config;
11443 		dev_priv->display.crtc_enable = hsw_crtc_enable;
11444 		dev_priv->display.crtc_disable = hsw_crtc_disable;
11445 	} else if (HAS_PCH_SPLIT(dev_priv)) {
11446 		dev_priv->display.get_pipe_config = ilk_get_pipe_config;
11447 		dev_priv->display.crtc_enable = ilk_crtc_enable;
11448 		dev_priv->display.crtc_disable = ilk_crtc_disable;
11449 	} else if (IS_CHERRYVIEW(dev_priv) ||
11450 		   IS_VALLEYVIEW(dev_priv)) {
11451 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11452 		dev_priv->display.crtc_enable = valleyview_crtc_enable;
11453 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11454 	} else {
11455 		dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
11456 		dev_priv->display.crtc_enable = i9xx_crtc_enable;
11457 		dev_priv->display.crtc_disable = i9xx_crtc_disable;
11458 	}
11459 
11460 	intel_fdi_init_hook(dev_priv);
11461 
11462 	if (DISPLAY_VER(dev_priv) >= 9) {
11463 		dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
11464 		dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
11465 	} else {
11466 		dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
11467 		dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
11468 	}
11469 
11470 }
11471 
intel_modeset_init_hw(struct drm_i915_private * i915)11472 void intel_modeset_init_hw(struct drm_i915_private *i915)
11473 {
11474 	struct intel_cdclk_state *cdclk_state =
11475 		to_intel_cdclk_state(i915->cdclk.obj.state);
11476 
11477 	intel_update_cdclk(i915);
11478 	intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
11479 	cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
11480 }
11481 
sanitize_watermarks_add_affected(struct drm_atomic_state * state)11482 static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
11483 {
11484 	struct drm_plane *plane;
11485 	struct intel_crtc *crtc;
11486 
11487 	for_each_intel_crtc(state->dev, crtc) {
11488 		struct intel_crtc_state *crtc_state;
11489 
11490 		crtc_state = intel_atomic_get_crtc_state(state, crtc);
11491 		if (IS_ERR(crtc_state))
11492 			return PTR_ERR(crtc_state);
11493 
11494 		if (crtc_state->hw.active) {
11495 			/*
11496 			 * Preserve the inherited flag to avoid
11497 			 * taking the full modeset path.
11498 			 */
11499 			crtc_state->inherited = true;
11500 		}
11501 	}
11502 
11503 	drm_for_each_plane(plane, state->dev) {
11504 		struct drm_plane_state *plane_state;
11505 
11506 		plane_state = drm_atomic_get_plane_state(state, plane);
11507 		if (IS_ERR(plane_state))
11508 			return PTR_ERR(plane_state);
11509 	}
11510 
11511 	return 0;
11512 }
11513 
11514 /*
11515  * Calculate what we think the watermarks should be for the state we've read
11516  * out of the hardware and then immediately program those watermarks so that
11517  * we ensure the hardware settings match our internal state.
11518  *
11519  * We can calculate what we think WM's should be by creating a duplicate of the
11520  * current state (which was constructed during hardware readout) and running it
11521  * through the atomic check code to calculate new watermark values in the
11522  * state object.
11523  */
sanitize_watermarks(struct drm_i915_private * dev_priv)11524 static void sanitize_watermarks(struct drm_i915_private *dev_priv)
11525 {
11526 	struct drm_atomic_state *state;
11527 	struct intel_atomic_state *intel_state;
11528 	struct intel_crtc *crtc;
11529 	struct intel_crtc_state *crtc_state;
11530 	struct drm_modeset_acquire_ctx ctx;
11531 	int ret;
11532 	int i;
11533 
11534 	/* Only supported on platforms that use atomic watermark design */
11535 	if (!dev_priv->display.optimize_watermarks)
11536 		return;
11537 
11538 	state = drm_atomic_state_alloc(&dev_priv->drm);
11539 	if (drm_WARN_ON(&dev_priv->drm, !state))
11540 		return;
11541 
11542 	intel_state = to_intel_atomic_state(state);
11543 
11544 	drm_modeset_acquire_init(&ctx, 0);
11545 
11546 retry:
11547 	state->acquire_ctx = &ctx;
11548 
11549 	/*
11550 	 * Hardware readout is the only time we don't want to calculate
11551 	 * intermediate watermarks (since we don't trust the current
11552 	 * watermarks).
11553 	 */
11554 	if (!HAS_GMCH(dev_priv))
11555 		intel_state->skip_intermediate_wm = true;
11556 
11557 	ret = sanitize_watermarks_add_affected(state);
11558 	if (ret)
11559 		goto fail;
11560 
11561 	ret = intel_atomic_check(&dev_priv->drm, state);
11562 	if (ret)
11563 		goto fail;
11564 
11565 	/* Write calculated watermark values back */
11566 	for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
11567 		crtc_state->wm.need_postvbl_update = true;
11568 		dev_priv->display.optimize_watermarks(intel_state, crtc);
11569 
11570 		to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
11571 	}
11572 
11573 fail:
11574 	if (ret == -EDEADLK) {
11575 		drm_atomic_state_clear(state);
11576 		drm_modeset_backoff(&ctx);
11577 		goto retry;
11578 	}
11579 
11580 	/*
11581 	 * If we fail here, it means that the hardware appears to be
11582 	 * programmed in a way that shouldn't be possible, given our
11583 	 * understanding of watermark requirements.  This might mean a
11584 	 * mistake in the hardware readout code or a mistake in the
11585 	 * watermark calculations for a given platform.  Raise a WARN
11586 	 * so that this is noticeable.
11587 	 *
11588 	 * If this actually happens, we'll have to just leave the
11589 	 * BIOS-programmed watermarks untouched and hope for the best.
11590 	 */
11591 	drm_WARN(&dev_priv->drm, ret,
11592 		 "Could not determine valid watermarks for inherited state\n");
11593 
11594 	drm_atomic_state_put(state);
11595 
11596 	drm_modeset_drop_locks(&ctx);
11597 	drm_modeset_acquire_fini(&ctx);
11598 }
11599 
intel_update_fdi_pll_freq(struct drm_i915_private * dev_priv)11600 static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
11601 {
11602 	if (IS_IRONLAKE(dev_priv)) {
11603 		u32 fdi_pll_clk =
11604 			intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
11605 
11606 		dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
11607 	} else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
11608 		dev_priv->fdi_pll_freq = 270000;
11609 	} else {
11610 		return;
11611 	}
11612 
11613 	drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
11614 }
11615 
intel_initial_commit(struct drm_device * dev)11616 static int intel_initial_commit(struct drm_device *dev)
11617 {
11618 	struct drm_atomic_state *state = NULL;
11619 	struct drm_modeset_acquire_ctx ctx;
11620 	struct intel_crtc *crtc;
11621 	int ret = 0;
11622 
11623 	state = drm_atomic_state_alloc(dev);
11624 	if (!state)
11625 		return -ENOMEM;
11626 
11627 	drm_modeset_acquire_init(&ctx, 0);
11628 
11629 retry:
11630 	state->acquire_ctx = &ctx;
11631 
11632 	for_each_intel_crtc(dev, crtc) {
11633 		struct intel_crtc_state *crtc_state =
11634 			intel_atomic_get_crtc_state(state, crtc);
11635 
11636 		if (IS_ERR(crtc_state)) {
11637 			ret = PTR_ERR(crtc_state);
11638 			goto out;
11639 		}
11640 
11641 		if (crtc_state->hw.active) {
11642 			struct intel_encoder *encoder;
11643 
11644 			/*
11645 			 * We've not yet detected sink capabilities
11646 			 * (audio,infoframes,etc.) and thus we don't want to
11647 			 * force a full state recomputation yet. We want that to
11648 			 * happen only for the first real commit from userspace.
11649 			 * So preserve the inherited flag for the time being.
11650 			 */
11651 			crtc_state->inherited = true;
11652 
11653 			ret = drm_atomic_add_affected_planes(state, &crtc->base);
11654 			if (ret)
11655 				goto out;
11656 
11657 			/*
11658 			 * FIXME hack to force a LUT update to avoid the
11659 			 * plane update forcing the pipe gamma on without
11660 			 * having a proper LUT loaded. Remove once we
11661 			 * have readout for pipe gamma enable.
11662 			 */
11663 			crtc_state->uapi.color_mgmt_changed = true;
11664 
11665 			for_each_intel_encoder_mask(dev, encoder,
11666 						    crtc_state->uapi.encoder_mask) {
11667 				if (encoder->initial_fastset_check &&
11668 				    !encoder->initial_fastset_check(encoder, crtc_state)) {
11669 					ret = drm_atomic_add_affected_connectors(state,
11670 										 &crtc->base);
11671 					if (ret)
11672 						goto out;
11673 				}
11674 			}
11675 		}
11676 	}
11677 
11678 	ret = drm_atomic_commit(state);
11679 
11680 out:
11681 	if (ret == -EDEADLK) {
11682 		drm_atomic_state_clear(state);
11683 		drm_modeset_backoff(&ctx);
11684 		goto retry;
11685 	}
11686 
11687 	drm_atomic_state_put(state);
11688 
11689 	drm_modeset_drop_locks(&ctx);
11690 	drm_modeset_acquire_fini(&ctx);
11691 
11692 	return ret;
11693 }
11694 
intel_mode_config_init(struct drm_i915_private * i915)11695 static void intel_mode_config_init(struct drm_i915_private *i915)
11696 {
11697 	struct drm_mode_config *mode_config = &i915->drm.mode_config;
11698 
11699 	drm_mode_config_init(&i915->drm);
11700 	INIT_LIST_HEAD(&i915->global_obj_list);
11701 
11702 	mode_config->min_width = 0;
11703 	mode_config->min_height = 0;
11704 
11705 	mode_config->preferred_depth = 24;
11706 	mode_config->prefer_shadow = 1;
11707 
11708 	mode_config->allow_fb_modifiers = true;
11709 
11710 	mode_config->funcs = &intel_mode_funcs;
11711 
11712 	mode_config->async_page_flip = has_async_flips(i915);
11713 
11714 	/*
11715 	 * Maximum framebuffer dimensions, chosen to match
11716 	 * the maximum render engine surface size on gen4+.
11717 	 */
11718 	if (DISPLAY_VER(i915) >= 7) {
11719 		mode_config->max_width = 16384;
11720 		mode_config->max_height = 16384;
11721 	} else if (DISPLAY_VER(i915) >= 4) {
11722 		mode_config->max_width = 8192;
11723 		mode_config->max_height = 8192;
11724 	} else if (IS_DISPLAY_VER(i915, 3)) {
11725 		mode_config->max_width = 4096;
11726 		mode_config->max_height = 4096;
11727 	} else {
11728 		mode_config->max_width = 2048;
11729 		mode_config->max_height = 2048;
11730 	}
11731 
11732 	if (IS_I845G(i915) || IS_I865G(i915)) {
11733 		mode_config->cursor_width = IS_I845G(i915) ? 64 : 512;
11734 		mode_config->cursor_height = 1023;
11735 	} else if (IS_I830(i915) || IS_I85X(i915) ||
11736 		   IS_I915G(i915) || IS_I915GM(i915)) {
11737 		mode_config->cursor_width = 64;
11738 		mode_config->cursor_height = 64;
11739 	} else {
11740 		mode_config->cursor_width = 256;
11741 		mode_config->cursor_height = 256;
11742 	}
11743 }
11744 
intel_mode_config_cleanup(struct drm_i915_private * i915)11745 static void intel_mode_config_cleanup(struct drm_i915_private *i915)
11746 {
11747 	intel_atomic_global_obj_cleanup(i915);
11748 	drm_mode_config_cleanup(&i915->drm);
11749 }
11750 
plane_config_fini(struct intel_initial_plane_config * plane_config)11751 static void plane_config_fini(struct intel_initial_plane_config *plane_config)
11752 {
11753 	if (plane_config->fb) {
11754 		struct drm_framebuffer *fb = &plane_config->fb->base;
11755 
11756 		/* We may only have the stub and not a full framebuffer */
11757 		if (drm_framebuffer_read_refcount(fb))
11758 			drm_framebuffer_put(fb);
11759 		else
11760 			kfree(fb);
11761 	}
11762 
11763 	if (plane_config->vma)
11764 		i915_vma_put(plane_config->vma);
11765 }
11766 
11767 /* part #1: call before irq install */
intel_modeset_init_noirq(struct drm_i915_private * i915)11768 int intel_modeset_init_noirq(struct drm_i915_private *i915)
11769 {
11770 	int ret;
11771 
11772 	if (i915_inject_probe_failure(i915))
11773 		return -ENODEV;
11774 
11775 	if (HAS_DISPLAY(i915)) {
11776 		ret = drm_vblank_init(&i915->drm,
11777 				      INTEL_NUM_PIPES(i915));
11778 		if (ret)
11779 			return ret;
11780 	}
11781 
11782 	intel_bios_init(i915);
11783 
11784 	ret = intel_vga_register(i915);
11785 	if (ret)
11786 		goto cleanup_bios;
11787 
11788 	/* FIXME: completely on the wrong abstraction layer */
11789 	intel_power_domains_init_hw(i915, false);
11790 
11791 	intel_csr_ucode_init(i915);
11792 
11793 	i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
11794 	i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
11795 					WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
11796 
11797 	i915->framestart_delay = 1; /* 1-4 */
11798 
11799 	intel_mode_config_init(i915);
11800 
11801 	ret = intel_cdclk_init(i915);
11802 	if (ret)
11803 		goto cleanup_vga_client_pw_domain_csr;
11804 
11805 	ret = intel_dbuf_init(i915);
11806 	if (ret)
11807 		goto cleanup_vga_client_pw_domain_csr;
11808 
11809 	ret = intel_bw_init(i915);
11810 	if (ret)
11811 		goto cleanup_vga_client_pw_domain_csr;
11812 
11813 	init_llist_head(&i915->atomic_helper.free_list);
11814 	INIT_WORK(&i915->atomic_helper.free_work,
11815 		  intel_atomic_helper_free_state_worker);
11816 
11817 	intel_init_quirks(i915);
11818 
11819 	intel_fbc_init(i915);
11820 
11821 	return 0;
11822 
11823 cleanup_vga_client_pw_domain_csr:
11824 	intel_csr_ucode_fini(i915);
11825 	intel_power_domains_driver_remove(i915);
11826 	intel_vga_unregister(i915);
11827 cleanup_bios:
11828 	intel_bios_driver_remove(i915);
11829 
11830 	return ret;
11831 }
11832 
11833 /* part #2: call after irq install, but before gem init */
intel_modeset_init_nogem(struct drm_i915_private * i915)11834 int intel_modeset_init_nogem(struct drm_i915_private *i915)
11835 {
11836 	struct drm_device *dev = &i915->drm;
11837 	enum pipe pipe;
11838 	struct intel_crtc *crtc;
11839 	int ret;
11840 
11841 	intel_init_pm(i915);
11842 
11843 	intel_panel_sanitize_ssc(i915);
11844 
11845 	intel_pps_setup(i915);
11846 
11847 	intel_gmbus_setup(i915);
11848 
11849 	drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
11850 		    INTEL_NUM_PIPES(i915),
11851 		    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
11852 
11853 	if (HAS_DISPLAY(i915)) {
11854 		for_each_pipe(i915, pipe) {
11855 			ret = intel_crtc_init(i915, pipe);
11856 			if (ret) {
11857 				intel_mode_config_cleanup(i915);
11858 				return ret;
11859 			}
11860 		}
11861 	}
11862 
11863 	intel_plane_possible_crtcs_init(i915);
11864 	intel_shared_dpll_init(dev);
11865 	intel_update_fdi_pll_freq(i915);
11866 
11867 	intel_update_czclk(i915);
11868 	intel_modeset_init_hw(i915);
11869 	intel_dpll_update_ref_clks(i915);
11870 
11871 	intel_hdcp_component_init(i915);
11872 
11873 	if (i915->max_cdclk_freq == 0)
11874 		intel_update_max_cdclk(i915);
11875 
11876 	/*
11877 	 * If the platform has HTI, we need to find out whether it has reserved
11878 	 * any display resources before we create our display outputs.
11879 	 */
11880 	if (INTEL_INFO(i915)->display.has_hti)
11881 		i915->hti_state = intel_de_read(i915, HDPORT_STATE);
11882 
11883 	/* Just disable it once at startup */
11884 	intel_vga_disable(i915);
11885 	intel_setup_outputs(i915);
11886 
11887 	drm_modeset_lock_all(dev);
11888 	intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
11889 	drm_modeset_unlock_all(dev);
11890 
11891 	for_each_intel_crtc(dev, crtc) {
11892 		struct intel_initial_plane_config plane_config = {};
11893 
11894 		if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
11895 			continue;
11896 
11897 		/*
11898 		 * Note that reserving the BIOS fb up front prevents us
11899 		 * from stuffing other stolen allocations like the ring
11900 		 * on top.  This prevents some ugliness at boot time, and
11901 		 * can even allow for smooth boot transitions if the BIOS
11902 		 * fb is large enough for the active pipe configuration.
11903 		 */
11904 		i915->display.get_initial_plane_config(crtc, &plane_config);
11905 
11906 		/*
11907 		 * If the fb is shared between multiple heads, we'll
11908 		 * just get the first one.
11909 		 */
11910 		intel_find_initial_plane_obj(crtc, &plane_config);
11911 
11912 		plane_config_fini(&plane_config);
11913 	}
11914 
11915 	/*
11916 	 * Make sure hardware watermarks really match the state we read out.
11917 	 * Note that we need to do this after reconstructing the BIOS fb's
11918 	 * since the watermark calculation done here will use pstate->fb.
11919 	 */
11920 	if (!HAS_GMCH(i915))
11921 		sanitize_watermarks(i915);
11922 
11923 	return 0;
11924 }
11925 
11926 /* part #3: call after gem init */
intel_modeset_init(struct drm_i915_private * i915)11927 int intel_modeset_init(struct drm_i915_private *i915)
11928 {
11929 	int ret;
11930 
11931 	if (!HAS_DISPLAY(i915))
11932 		return 0;
11933 
11934 	/*
11935 	 * Force all active planes to recompute their states. So that on
11936 	 * mode_setcrtc after probe, all the intel_plane_state variables
11937 	 * are already calculated and there is no assert_plane warnings
11938 	 * during bootup.
11939 	 */
11940 	ret = intel_initial_commit(&i915->drm);
11941 	if (ret)
11942 		drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret);
11943 
11944 	intel_overlay_setup(i915);
11945 
11946 	ret = intel_fbdev_init(&i915->drm);
11947 	if (ret)
11948 		return ret;
11949 
11950 	/* Only enable hotplug handling once the fbdev is fully set up. */
11951 	intel_hpd_init(i915);
11952 	intel_hpd_poll_disable(i915);
11953 
11954 	intel_init_ipc(i915);
11955 
11956 	return 0;
11957 }
11958 
i830_enable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)11959 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
11960 {
11961 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
11962 	/* 640x480@60Hz, ~25175 kHz */
11963 	struct dpll clock = {
11964 		.m1 = 18,
11965 		.m2 = 7,
11966 		.p1 = 13,
11967 		.p2 = 4,
11968 		.n = 2,
11969 	};
11970 	u32 dpll, fp;
11971 	int i;
11972 
11973 	drm_WARN_ON(&dev_priv->drm,
11974 		    i9xx_calc_dpll_params(48000, &clock) != 25154);
11975 
11976 	drm_dbg_kms(&dev_priv->drm,
11977 		    "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
11978 		    pipe_name(pipe), clock.vco, clock.dot);
11979 
11980 	fp = i9xx_dpll_compute_fp(&clock);
11981 	dpll = DPLL_DVO_2X_MODE |
11982 		DPLL_VGA_MODE_DIS |
11983 		((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
11984 		PLL_P2_DIVIDE_BY_4 |
11985 		PLL_REF_INPUT_DREFCLK |
11986 		DPLL_VCO_ENABLE;
11987 
11988 	intel_de_write(dev_priv, FP0(pipe), fp);
11989 	intel_de_write(dev_priv, FP1(pipe), fp);
11990 
11991 	intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
11992 	intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
11993 	intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
11994 	intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
11995 	intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
11996 	intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
11997 	intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
11998 
11999 	/*
12000 	 * Apparently we need to have VGA mode enabled prior to changing
12001 	 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
12002 	 * dividers, even though the register value does change.
12003 	 */
12004 	intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
12005 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12006 
12007 	/* Wait for the clocks to stabilize. */
12008 	intel_de_posting_read(dev_priv, DPLL(pipe));
12009 	udelay(150);
12010 
12011 	/* The pixel multiplier can only be updated once the
12012 	 * DPLL is enabled and the clocks are stable.
12013 	 *
12014 	 * So write it again.
12015 	 */
12016 	intel_de_write(dev_priv, DPLL(pipe), dpll);
12017 
12018 	/* We do this three times for luck */
12019 	for (i = 0; i < 3 ; i++) {
12020 		intel_de_write(dev_priv, DPLL(pipe), dpll);
12021 		intel_de_posting_read(dev_priv, DPLL(pipe));
12022 		udelay(150); /* wait for warmup */
12023 	}
12024 
12025 	intel_de_write(dev_priv, PIPECONF(pipe),
12026 		       PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
12027 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12028 
12029 	intel_wait_for_pipe_scanline_moving(crtc);
12030 }
12031 
i830_disable_pipe(struct drm_i915_private * dev_priv,enum pipe pipe)12032 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
12033 {
12034 	struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12035 
12036 	drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
12037 		    pipe_name(pipe));
12038 
12039 	drm_WARN_ON(&dev_priv->drm,
12040 		    intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
12041 		    DISPLAY_PLANE_ENABLE);
12042 	drm_WARN_ON(&dev_priv->drm,
12043 		    intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
12044 		    DISPLAY_PLANE_ENABLE);
12045 	drm_WARN_ON(&dev_priv->drm,
12046 		    intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
12047 		    DISPLAY_PLANE_ENABLE);
12048 	drm_WARN_ON(&dev_priv->drm,
12049 		    intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
12050 	drm_WARN_ON(&dev_priv->drm,
12051 		    intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
12052 
12053 	intel_de_write(dev_priv, PIPECONF(pipe), 0);
12054 	intel_de_posting_read(dev_priv, PIPECONF(pipe));
12055 
12056 	intel_wait_for_pipe_scanline_stopped(crtc);
12057 
12058 	intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
12059 	intel_de_posting_read(dev_priv, DPLL(pipe));
12060 }
12061 
12062 static void
intel_sanitize_plane_mapping(struct drm_i915_private * dev_priv)12063 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
12064 {
12065 	struct intel_crtc *crtc;
12066 
12067 	if (DISPLAY_VER(dev_priv) >= 4)
12068 		return;
12069 
12070 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12071 		struct intel_plane *plane =
12072 			to_intel_plane(crtc->base.primary);
12073 		struct intel_crtc *plane_crtc;
12074 		enum pipe pipe;
12075 
12076 		if (!plane->get_hw_state(plane, &pipe))
12077 			continue;
12078 
12079 		if (pipe == crtc->pipe)
12080 			continue;
12081 
12082 		drm_dbg_kms(&dev_priv->drm,
12083 			    "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
12084 			    plane->base.base.id, plane->base.name);
12085 
12086 		plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12087 		intel_plane_disable_noatomic(plane_crtc, plane);
12088 	}
12089 }
12090 
intel_crtc_has_encoders(struct intel_crtc * crtc)12091 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
12092 {
12093 	struct drm_device *dev = crtc->base.dev;
12094 	struct intel_encoder *encoder;
12095 
12096 	for_each_encoder_on_crtc(dev, &crtc->base, encoder)
12097 		return true;
12098 
12099 	return false;
12100 }
12101 
intel_encoder_find_connector(struct intel_encoder * encoder)12102 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
12103 {
12104 	struct drm_device *dev = encoder->base.dev;
12105 	struct intel_connector *connector;
12106 
12107 	for_each_connector_on_encoder(dev, &encoder->base, connector)
12108 		return connector;
12109 
12110 	return NULL;
12111 }
12112 
has_pch_trancoder(struct drm_i915_private * dev_priv,enum pipe pch_transcoder)12113 static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
12114 			      enum pipe pch_transcoder)
12115 {
12116 	return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
12117 		(HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
12118 }
12119 
intel_sanitize_frame_start_delay(const struct intel_crtc_state * crtc_state)12120 static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc_state)
12121 {
12122 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
12123 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
12124 	enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
12125 
12126 	if (DISPLAY_VER(dev_priv) >= 9 ||
12127 	    IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
12128 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
12129 		u32 val;
12130 
12131 		if (transcoder_is_dsi(cpu_transcoder))
12132 			return;
12133 
12134 		val = intel_de_read(dev_priv, reg);
12135 		val &= ~HSW_FRAME_START_DELAY_MASK;
12136 		val |= HSW_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12137 		intel_de_write(dev_priv, reg, val);
12138 	} else {
12139 		i915_reg_t reg = PIPECONF(cpu_transcoder);
12140 		u32 val;
12141 
12142 		val = intel_de_read(dev_priv, reg);
12143 		val &= ~PIPECONF_FRAME_START_DELAY_MASK;
12144 		val |= PIPECONF_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12145 		intel_de_write(dev_priv, reg, val);
12146 	}
12147 
12148 	if (!crtc_state->has_pch_encoder)
12149 		return;
12150 
12151 	if (HAS_PCH_IBX(dev_priv)) {
12152 		i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
12153 		u32 val;
12154 
12155 		val = intel_de_read(dev_priv, reg);
12156 		val &= ~TRANS_FRAME_START_DELAY_MASK;
12157 		val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12158 		intel_de_write(dev_priv, reg, val);
12159 	} else {
12160 		enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
12161 		i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
12162 		u32 val;
12163 
12164 		val = intel_de_read(dev_priv, reg);
12165 		val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
12166 		val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
12167 		intel_de_write(dev_priv, reg, val);
12168 	}
12169 }
12170 
intel_sanitize_crtc(struct intel_crtc * crtc,struct drm_modeset_acquire_ctx * ctx)12171 static void intel_sanitize_crtc(struct intel_crtc *crtc,
12172 				struct drm_modeset_acquire_ctx *ctx)
12173 {
12174 	struct drm_device *dev = crtc->base.dev;
12175 	struct drm_i915_private *dev_priv = to_i915(dev);
12176 	struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
12177 
12178 	if (crtc_state->hw.active) {
12179 		struct intel_plane *plane;
12180 
12181 		/* Clear any frame start delays used for debugging left by the BIOS */
12182 		intel_sanitize_frame_start_delay(crtc_state);
12183 
12184 		/* Disable everything but the primary plane */
12185 		for_each_intel_plane_on_crtc(dev, crtc, plane) {
12186 			const struct intel_plane_state *plane_state =
12187 				to_intel_plane_state(plane->base.state);
12188 
12189 			if (plane_state->uapi.visible &&
12190 			    plane->base.type != DRM_PLANE_TYPE_PRIMARY)
12191 				intel_plane_disable_noatomic(crtc, plane);
12192 		}
12193 
12194 		/*
12195 		 * Disable any background color set by the BIOS, but enable the
12196 		 * gamma and CSC to match how we program our planes.
12197 		 */
12198 		if (DISPLAY_VER(dev_priv) >= 9)
12199 			intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
12200 				       SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
12201 	}
12202 
12203 	/* Adjust the state of the output pipe according to whether we
12204 	 * have active connectors/encoders. */
12205 	if (crtc_state->hw.active && !intel_crtc_has_encoders(crtc) &&
12206 	    !crtc_state->bigjoiner_slave)
12207 		intel_crtc_disable_noatomic(crtc, ctx);
12208 
12209 	if (crtc_state->hw.active || HAS_GMCH(dev_priv)) {
12210 		/*
12211 		 * We start out with underrun reporting disabled to avoid races.
12212 		 * For correct bookkeeping mark this on active crtcs.
12213 		 *
12214 		 * Also on gmch platforms we dont have any hardware bits to
12215 		 * disable the underrun reporting. Which means we need to start
12216 		 * out with underrun reporting disabled also on inactive pipes,
12217 		 * since otherwise we'll complain about the garbage we read when
12218 		 * e.g. coming up after runtime pm.
12219 		 *
12220 		 * No protection against concurrent access is required - at
12221 		 * worst a fifo underrun happens which also sets this to false.
12222 		 */
12223 		crtc->cpu_fifo_underrun_disabled = true;
12224 		/*
12225 		 * We track the PCH trancoder underrun reporting state
12226 		 * within the crtc. With crtc for pipe A housing the underrun
12227 		 * reporting state for PCH transcoder A, crtc for pipe B housing
12228 		 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
12229 		 * and marking underrun reporting as disabled for the non-existing
12230 		 * PCH transcoders B and C would prevent enabling the south
12231 		 * error interrupt (see cpt_can_enable_serr_int()).
12232 		 */
12233 		if (has_pch_trancoder(dev_priv, crtc->pipe))
12234 			crtc->pch_fifo_underrun_disabled = true;
12235 	}
12236 }
12237 
has_bogus_dpll_config(const struct intel_crtc_state * crtc_state)12238 static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
12239 {
12240 	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
12241 
12242 	/*
12243 	 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
12244 	 * the hardware when a high res displays plugged in. DPLL P
12245 	 * divider is zero, and the pipe timings are bonkers. We'll
12246 	 * try to disable everything in that case.
12247 	 *
12248 	 * FIXME would be nice to be able to sanitize this state
12249 	 * without several WARNs, but for now let's take the easy
12250 	 * road.
12251 	 */
12252 	return IS_SANDYBRIDGE(dev_priv) &&
12253 		crtc_state->hw.active &&
12254 		crtc_state->shared_dpll &&
12255 		crtc_state->port_clock == 0;
12256 }
12257 
intel_sanitize_encoder(struct intel_encoder * encoder)12258 static void intel_sanitize_encoder(struct intel_encoder *encoder)
12259 {
12260 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
12261 	struct intel_connector *connector;
12262 	struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
12263 	struct intel_crtc_state *crtc_state = crtc ?
12264 		to_intel_crtc_state(crtc->base.state) : NULL;
12265 
12266 	/* We need to check both for a crtc link (meaning that the
12267 	 * encoder is active and trying to read from a pipe) and the
12268 	 * pipe itself being active. */
12269 	bool has_active_crtc = crtc_state &&
12270 		crtc_state->hw.active;
12271 
12272 	if (crtc_state && has_bogus_dpll_config(crtc_state)) {
12273 		drm_dbg_kms(&dev_priv->drm,
12274 			    "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
12275 			    pipe_name(crtc->pipe));
12276 		has_active_crtc = false;
12277 	}
12278 
12279 	connector = intel_encoder_find_connector(encoder);
12280 	if (connector && !has_active_crtc) {
12281 		drm_dbg_kms(&dev_priv->drm,
12282 			    "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
12283 			    encoder->base.base.id,
12284 			    encoder->base.name);
12285 
12286 		/* Connector is active, but has no active pipe. This is
12287 		 * fallout from our resume register restoring. Disable
12288 		 * the encoder manually again. */
12289 		if (crtc_state) {
12290 			struct drm_encoder *best_encoder;
12291 
12292 			drm_dbg_kms(&dev_priv->drm,
12293 				    "[ENCODER:%d:%s] manually disabled\n",
12294 				    encoder->base.base.id,
12295 				    encoder->base.name);
12296 
12297 			/* avoid oopsing in case the hooks consult best_encoder */
12298 			best_encoder = connector->base.state->best_encoder;
12299 			connector->base.state->best_encoder = &encoder->base;
12300 
12301 			/* FIXME NULL atomic state passed! */
12302 			if (encoder->disable)
12303 				encoder->disable(NULL, encoder, crtc_state,
12304 						 connector->base.state);
12305 			if (encoder->post_disable)
12306 				encoder->post_disable(NULL, encoder, crtc_state,
12307 						      connector->base.state);
12308 
12309 			connector->base.state->best_encoder = best_encoder;
12310 		}
12311 		encoder->base.crtc = NULL;
12312 
12313 		/* Inconsistent output/port/pipe state happens presumably due to
12314 		 * a bug in one of the get_hw_state functions. Or someplace else
12315 		 * in our code, like the register restore mess on resume. Clamp
12316 		 * things to off as a safer default. */
12317 
12318 		connector->base.dpms = DRM_MODE_DPMS_OFF;
12319 		connector->base.encoder = NULL;
12320 	}
12321 
12322 	/* notify opregion of the sanitized encoder state */
12323 	intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
12324 
12325 	if (HAS_DDI(dev_priv))
12326 		intel_ddi_sanitize_encoder_pll_mapping(encoder);
12327 }
12328 
12329 /* FIXME read out full plane state for all planes */
readout_plane_state(struct drm_i915_private * dev_priv)12330 static void readout_plane_state(struct drm_i915_private *dev_priv)
12331 {
12332 	struct intel_plane *plane;
12333 	struct intel_crtc *crtc;
12334 
12335 	for_each_intel_plane(&dev_priv->drm, plane) {
12336 		struct intel_plane_state *plane_state =
12337 			to_intel_plane_state(plane->base.state);
12338 		struct intel_crtc_state *crtc_state;
12339 		enum pipe pipe = PIPE_A;
12340 		bool visible;
12341 
12342 		visible = plane->get_hw_state(plane, &pipe);
12343 
12344 		crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12345 		crtc_state = to_intel_crtc_state(crtc->base.state);
12346 
12347 		intel_set_plane_visible(crtc_state, plane_state, visible);
12348 
12349 		drm_dbg_kms(&dev_priv->drm,
12350 			    "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
12351 			    plane->base.base.id, plane->base.name,
12352 			    enableddisabled(visible), pipe_name(pipe));
12353 	}
12354 
12355 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12356 		struct intel_crtc_state *crtc_state =
12357 			to_intel_crtc_state(crtc->base.state);
12358 
12359 		fixup_plane_bitmasks(crtc_state);
12360 	}
12361 }
12362 
intel_modeset_readout_hw_state(struct drm_device * dev)12363 static void intel_modeset_readout_hw_state(struct drm_device *dev)
12364 {
12365 	struct drm_i915_private *dev_priv = to_i915(dev);
12366 	struct intel_cdclk_state *cdclk_state =
12367 		to_intel_cdclk_state(dev_priv->cdclk.obj.state);
12368 	struct intel_dbuf_state *dbuf_state =
12369 		to_intel_dbuf_state(dev_priv->dbuf.obj.state);
12370 	enum pipe pipe;
12371 	struct intel_crtc *crtc;
12372 	struct intel_encoder *encoder;
12373 	struct intel_connector *connector;
12374 	struct drm_connector_list_iter conn_iter;
12375 	u8 active_pipes = 0;
12376 
12377 	for_each_intel_crtc(dev, crtc) {
12378 		struct intel_crtc_state *crtc_state =
12379 			to_intel_crtc_state(crtc->base.state);
12380 
12381 		__drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
12382 		intel_crtc_free_hw_state(crtc_state);
12383 		intel_crtc_state_reset(crtc_state, crtc);
12384 
12385 		intel_crtc_get_pipe_config(crtc_state);
12386 
12387 		crtc_state->hw.enable = crtc_state->hw.active;
12388 
12389 		crtc->base.enabled = crtc_state->hw.enable;
12390 		crtc->active = crtc_state->hw.active;
12391 
12392 		if (crtc_state->hw.active)
12393 			active_pipes |= BIT(crtc->pipe);
12394 
12395 		drm_dbg_kms(&dev_priv->drm,
12396 			    "[CRTC:%d:%s] hw state readout: %s\n",
12397 			    crtc->base.base.id, crtc->base.name,
12398 			    enableddisabled(crtc_state->hw.active));
12399 	}
12400 
12401 	dev_priv->active_pipes = cdclk_state->active_pipes =
12402 		dbuf_state->active_pipes = active_pipes;
12403 
12404 	readout_plane_state(dev_priv);
12405 
12406 	for_each_intel_encoder(dev, encoder) {
12407 		pipe = 0;
12408 
12409 		if (encoder->get_hw_state(encoder, &pipe)) {
12410 			struct intel_crtc_state *crtc_state;
12411 
12412 			crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
12413 			crtc_state = to_intel_crtc_state(crtc->base.state);
12414 
12415 			encoder->base.crtc = &crtc->base;
12416 			intel_encoder_get_config(encoder, crtc_state);
12417 			if (encoder->sync_state)
12418 				encoder->sync_state(encoder, crtc_state);
12419 
12420 			/* read out to slave crtc as well for bigjoiner */
12421 			if (crtc_state->bigjoiner) {
12422 				/* encoder should read be linked to bigjoiner master */
12423 				WARN_ON(crtc_state->bigjoiner_slave);
12424 
12425 				crtc = crtc_state->bigjoiner_linked_crtc;
12426 				crtc_state = to_intel_crtc_state(crtc->base.state);
12427 				intel_encoder_get_config(encoder, crtc_state);
12428 			}
12429 		} else {
12430 			encoder->base.crtc = NULL;
12431 		}
12432 
12433 		drm_dbg_kms(&dev_priv->drm,
12434 			    "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
12435 			    encoder->base.base.id, encoder->base.name,
12436 			    enableddisabled(encoder->base.crtc),
12437 			    pipe_name(pipe));
12438 	}
12439 
12440 	intel_dpll_readout_hw_state(dev_priv);
12441 
12442 	drm_connector_list_iter_begin(dev, &conn_iter);
12443 	for_each_intel_connector_iter(connector, &conn_iter) {
12444 		if (connector->get_hw_state(connector)) {
12445 			struct intel_crtc_state *crtc_state;
12446 			struct intel_crtc *crtc;
12447 
12448 			connector->base.dpms = DRM_MODE_DPMS_ON;
12449 
12450 			encoder = intel_attached_encoder(connector);
12451 			connector->base.encoder = &encoder->base;
12452 
12453 			crtc = to_intel_crtc(encoder->base.crtc);
12454 			crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL;
12455 
12456 			if (crtc_state && crtc_state->hw.active) {
12457 				/*
12458 				 * This has to be done during hardware readout
12459 				 * because anything calling .crtc_disable may
12460 				 * rely on the connector_mask being accurate.
12461 				 */
12462 				crtc_state->uapi.connector_mask |=
12463 					drm_connector_mask(&connector->base);
12464 				crtc_state->uapi.encoder_mask |=
12465 					drm_encoder_mask(&encoder->base);
12466 			}
12467 		} else {
12468 			connector->base.dpms = DRM_MODE_DPMS_OFF;
12469 			connector->base.encoder = NULL;
12470 		}
12471 		drm_dbg_kms(&dev_priv->drm,
12472 			    "[CONNECTOR:%d:%s] hw state readout: %s\n",
12473 			    connector->base.base.id, connector->base.name,
12474 			    enableddisabled(connector->base.encoder));
12475 	}
12476 	drm_connector_list_iter_end(&conn_iter);
12477 
12478 	for_each_intel_crtc(dev, crtc) {
12479 		struct intel_bw_state *bw_state =
12480 			to_intel_bw_state(dev_priv->bw_obj.state);
12481 		struct intel_crtc_state *crtc_state =
12482 			to_intel_crtc_state(crtc->base.state);
12483 		struct intel_plane *plane;
12484 		int min_cdclk = 0;
12485 
12486 		if (crtc_state->bigjoiner_slave)
12487 			continue;
12488 
12489 		if (crtc_state->hw.active) {
12490 			/*
12491 			 * The initial mode needs to be set in order to keep
12492 			 * the atomic core happy. It wants a valid mode if the
12493 			 * crtc's enabled, so we do the above call.
12494 			 *
12495 			 * But we don't set all the derived state fully, hence
12496 			 * set a flag to indicate that a full recalculation is
12497 			 * needed on the next commit.
12498 			 */
12499 			crtc_state->inherited = true;
12500 
12501 			intel_crtc_update_active_timings(crtc_state);
12502 
12503 			intel_crtc_copy_hw_to_uapi_state(crtc_state);
12504 		}
12505 
12506 		for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
12507 			const struct intel_plane_state *plane_state =
12508 				to_intel_plane_state(plane->base.state);
12509 
12510 			/*
12511 			 * FIXME don't have the fb yet, so can't
12512 			 * use intel_plane_data_rate() :(
12513 			 */
12514 			if (plane_state->uapi.visible)
12515 				crtc_state->data_rate[plane->id] =
12516 					4 * crtc_state->pixel_rate;
12517 			/*
12518 			 * FIXME don't have the fb yet, so can't
12519 			 * use plane->min_cdclk() :(
12520 			 */
12521 			if (plane_state->uapi.visible && plane->min_cdclk) {
12522 				if (crtc_state->double_wide || DISPLAY_VER(dev_priv) >= 10)
12523 					crtc_state->min_cdclk[plane->id] =
12524 						DIV_ROUND_UP(crtc_state->pixel_rate, 2);
12525 				else
12526 					crtc_state->min_cdclk[plane->id] =
12527 						crtc_state->pixel_rate;
12528 			}
12529 			drm_dbg_kms(&dev_priv->drm,
12530 				    "[PLANE:%d:%s] min_cdclk %d kHz\n",
12531 				    plane->base.base.id, plane->base.name,
12532 				    crtc_state->min_cdclk[plane->id]);
12533 		}
12534 
12535 		if (crtc_state->hw.active) {
12536 			min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
12537 			if (drm_WARN_ON(dev, min_cdclk < 0))
12538 				min_cdclk = 0;
12539 		}
12540 
12541 		cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
12542 		cdclk_state->min_voltage_level[crtc->pipe] =
12543 			crtc_state->min_voltage_level;
12544 
12545 		intel_bw_crtc_update(bw_state, crtc_state);
12546 
12547 		intel_pipe_config_sanity_check(dev_priv, crtc_state);
12548 
12549 		/* discard our incomplete slave state, copy it from master */
12550 		if (crtc_state->bigjoiner && crtc_state->hw.active) {
12551 			struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
12552 			struct intel_crtc_state *slave_crtc_state =
12553 				to_intel_crtc_state(slave->base.state);
12554 
12555 			copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
12556 			slave->base.mode = crtc->base.mode;
12557 
12558 			cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
12559 			cdclk_state->min_voltage_level[slave->pipe] =
12560 				crtc_state->min_voltage_level;
12561 
12562 			for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
12563 				const struct intel_plane_state *plane_state =
12564 					to_intel_plane_state(plane->base.state);
12565 
12566 				/*
12567 				 * FIXME don't have the fb yet, so can't
12568 				 * use intel_plane_data_rate() :(
12569 				 */
12570 				if (plane_state->uapi.visible)
12571 					crtc_state->data_rate[plane->id] =
12572 						4 * crtc_state->pixel_rate;
12573 				else
12574 					crtc_state->data_rate[plane->id] = 0;
12575 			}
12576 
12577 			intel_bw_crtc_update(bw_state, slave_crtc_state);
12578 			drm_calc_timestamping_constants(&slave->base,
12579 							&slave_crtc_state->hw.adjusted_mode);
12580 		}
12581 	}
12582 }
12583 
12584 static void
get_encoder_power_domains(struct drm_i915_private * dev_priv)12585 get_encoder_power_domains(struct drm_i915_private *dev_priv)
12586 {
12587 	struct intel_encoder *encoder;
12588 
12589 	for_each_intel_encoder(&dev_priv->drm, encoder) {
12590 		struct intel_crtc_state *crtc_state;
12591 
12592 		if (!encoder->get_power_domains)
12593 			continue;
12594 
12595 		/*
12596 		 * MST-primary and inactive encoders don't have a crtc state
12597 		 * and neither of these require any power domain references.
12598 		 */
12599 		if (!encoder->base.crtc)
12600 			continue;
12601 
12602 		crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
12603 		encoder->get_power_domains(encoder, crtc_state);
12604 	}
12605 }
12606 
intel_early_display_was(struct drm_i915_private * dev_priv)12607 static void intel_early_display_was(struct drm_i915_private *dev_priv)
12608 {
12609 	/*
12610 	 * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
12611 	 * Also known as Wa_14010480278.
12612 	 */
12613 	if (IS_DISPLAY_RANGE(dev_priv, 10, 12))
12614 		intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
12615 			       intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
12616 
12617 	if (IS_HASWELL(dev_priv)) {
12618 		/*
12619 		 * WaRsPkgCStateDisplayPMReq:hsw
12620 		 * System hang if this isn't done before disabling all planes!
12621 		 */
12622 		intel_de_write(dev_priv, CHICKEN_PAR1_1,
12623 			       intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
12624 	}
12625 
12626 	if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv)) {
12627 		/* Display WA #1142:kbl,cfl,cml */
12628 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
12629 			     KBL_ARB_FILL_SPARE_22, KBL_ARB_FILL_SPARE_22);
12630 		intel_de_rmw(dev_priv, CHICKEN_MISC_2,
12631 			     KBL_ARB_FILL_SPARE_13 | KBL_ARB_FILL_SPARE_14,
12632 			     KBL_ARB_FILL_SPARE_14);
12633 	}
12634 }
12635 
ibx_sanitize_pch_hdmi_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t hdmi_reg)12636 static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
12637 				       enum port port, i915_reg_t hdmi_reg)
12638 {
12639 	u32 val = intel_de_read(dev_priv, hdmi_reg);
12640 
12641 	if (val & SDVO_ENABLE ||
12642 	    (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
12643 		return;
12644 
12645 	drm_dbg_kms(&dev_priv->drm,
12646 		    "Sanitizing transcoder select for HDMI %c\n",
12647 		    port_name(port));
12648 
12649 	val &= ~SDVO_PIPE_SEL_MASK;
12650 	val |= SDVO_PIPE_SEL(PIPE_A);
12651 
12652 	intel_de_write(dev_priv, hdmi_reg, val);
12653 }
12654 
ibx_sanitize_pch_dp_port(struct drm_i915_private * dev_priv,enum port port,i915_reg_t dp_reg)12655 static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
12656 				     enum port port, i915_reg_t dp_reg)
12657 {
12658 	u32 val = intel_de_read(dev_priv, dp_reg);
12659 
12660 	if (val & DP_PORT_EN ||
12661 	    (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
12662 		return;
12663 
12664 	drm_dbg_kms(&dev_priv->drm,
12665 		    "Sanitizing transcoder select for DP %c\n",
12666 		    port_name(port));
12667 
12668 	val &= ~DP_PIPE_SEL_MASK;
12669 	val |= DP_PIPE_SEL(PIPE_A);
12670 
12671 	intel_de_write(dev_priv, dp_reg, val);
12672 }
12673 
ibx_sanitize_pch_ports(struct drm_i915_private * dev_priv)12674 static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
12675 {
12676 	/*
12677 	 * The BIOS may select transcoder B on some of the PCH
12678 	 * ports even it doesn't enable the port. This would trip
12679 	 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
12680 	 * Sanitize the transcoder select bits to prevent that. We
12681 	 * assume that the BIOS never actually enabled the port,
12682 	 * because if it did we'd actually have to toggle the port
12683 	 * on and back off to make the transcoder A select stick
12684 	 * (see. intel_dp_link_down(), intel_disable_hdmi(),
12685 	 * intel_disable_sdvo()).
12686 	 */
12687 	ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
12688 	ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
12689 	ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
12690 
12691 	/* PCH SDVOB multiplex with HDMIB */
12692 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
12693 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
12694 	ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
12695 }
12696 
12697 /* Scan out the current hw modeset state,
12698  * and sanitizes it to the current state
12699  */
12700 static void
intel_modeset_setup_hw_state(struct drm_device * dev,struct drm_modeset_acquire_ctx * ctx)12701 intel_modeset_setup_hw_state(struct drm_device *dev,
12702 			     struct drm_modeset_acquire_ctx *ctx)
12703 {
12704 	struct drm_i915_private *dev_priv = to_i915(dev);
12705 	struct intel_encoder *encoder;
12706 	struct intel_crtc *crtc;
12707 	intel_wakeref_t wakeref;
12708 
12709 	wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
12710 
12711 	intel_early_display_was(dev_priv);
12712 	intel_modeset_readout_hw_state(dev);
12713 
12714 	/* HW state is read out, now we need to sanitize this mess. */
12715 
12716 	/* Sanitize the TypeC port mode upfront, encoders depend on this */
12717 	for_each_intel_encoder(dev, encoder) {
12718 		enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
12719 
12720 		/* We need to sanitize only the MST primary port. */
12721 		if (encoder->type != INTEL_OUTPUT_DP_MST &&
12722 		    intel_phy_is_tc(dev_priv, phy))
12723 			intel_tc_port_sanitize(enc_to_dig_port(encoder));
12724 	}
12725 
12726 	get_encoder_power_domains(dev_priv);
12727 
12728 	if (HAS_PCH_IBX(dev_priv))
12729 		ibx_sanitize_pch_ports(dev_priv);
12730 
12731 	/*
12732 	 * intel_sanitize_plane_mapping() may need to do vblank
12733 	 * waits, so we need vblank interrupts restored beforehand.
12734 	 */
12735 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12736 		struct intel_crtc_state *crtc_state =
12737 			to_intel_crtc_state(crtc->base.state);
12738 
12739 		drm_crtc_vblank_reset(&crtc->base);
12740 
12741 		if (crtc_state->hw.active)
12742 			intel_crtc_vblank_on(crtc_state);
12743 	}
12744 
12745 	intel_sanitize_plane_mapping(dev_priv);
12746 
12747 	for_each_intel_encoder(dev, encoder)
12748 		intel_sanitize_encoder(encoder);
12749 
12750 	for_each_intel_crtc(&dev_priv->drm, crtc) {
12751 		struct intel_crtc_state *crtc_state =
12752 			to_intel_crtc_state(crtc->base.state);
12753 
12754 		intel_sanitize_crtc(crtc, ctx);
12755 		intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
12756 	}
12757 
12758 	intel_modeset_update_connector_atomic_state(dev);
12759 
12760 	intel_dpll_sanitize_state(dev_priv);
12761 
12762 	if (IS_G4X(dev_priv)) {
12763 		g4x_wm_get_hw_state(dev_priv);
12764 		g4x_wm_sanitize(dev_priv);
12765 	} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
12766 		vlv_wm_get_hw_state(dev_priv);
12767 		vlv_wm_sanitize(dev_priv);
12768 	} else if (DISPLAY_VER(dev_priv) >= 9) {
12769 		skl_wm_get_hw_state(dev_priv);
12770 	} else if (HAS_PCH_SPLIT(dev_priv)) {
12771 		ilk_wm_get_hw_state(dev_priv);
12772 	}
12773 
12774 	for_each_intel_crtc(dev, crtc) {
12775 		struct intel_crtc_state *crtc_state =
12776 			to_intel_crtc_state(crtc->base.state);
12777 		u64 put_domains;
12778 
12779 		put_domains = modeset_get_crtc_power_domains(crtc_state);
12780 		if (drm_WARN_ON(dev, put_domains))
12781 			modeset_put_crtc_power_domains(crtc, put_domains);
12782 	}
12783 
12784 	intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
12785 }
12786 
intel_display_resume(struct drm_device * dev)12787 void intel_display_resume(struct drm_device *dev)
12788 {
12789 	struct drm_i915_private *dev_priv = to_i915(dev);
12790 	struct drm_atomic_state *state = dev_priv->modeset_restore_state;
12791 	struct drm_modeset_acquire_ctx ctx;
12792 	int ret;
12793 
12794 	dev_priv->modeset_restore_state = NULL;
12795 	if (state)
12796 		state->acquire_ctx = &ctx;
12797 
12798 	drm_modeset_acquire_init(&ctx, 0);
12799 
12800 	while (1) {
12801 		ret = drm_modeset_lock_all_ctx(dev, &ctx);
12802 		if (ret != -EDEADLK)
12803 			break;
12804 
12805 		drm_modeset_backoff(&ctx);
12806 	}
12807 
12808 	if (!ret)
12809 		ret = __intel_display_resume(dev, state, &ctx);
12810 
12811 	intel_enable_ipc(dev_priv);
12812 	drm_modeset_drop_locks(&ctx);
12813 	drm_modeset_acquire_fini(&ctx);
12814 
12815 	if (ret)
12816 		drm_err(&dev_priv->drm,
12817 			"Restoring old state failed with %i\n", ret);
12818 	if (state)
12819 		drm_atomic_state_put(state);
12820 }
12821 
intel_hpd_poll_fini(struct drm_i915_private * i915)12822 static void intel_hpd_poll_fini(struct drm_i915_private *i915)
12823 {
12824 	struct intel_connector *connector;
12825 	struct drm_connector_list_iter conn_iter;
12826 
12827 	/* Kill all the work that may have been queued by hpd. */
12828 	drm_connector_list_iter_begin(&i915->drm, &conn_iter);
12829 	for_each_intel_connector_iter(connector, &conn_iter) {
12830 		if (connector->modeset_retry_work.func)
12831 			cancel_work_sync(&connector->modeset_retry_work);
12832 		if (connector->hdcp.shim) {
12833 			cancel_delayed_work_sync(&connector->hdcp.check_work);
12834 			cancel_work_sync(&connector->hdcp.prop_work);
12835 		}
12836 	}
12837 	drm_connector_list_iter_end(&conn_iter);
12838 }
12839 
12840 /* part #1: call before irq uninstall */
intel_modeset_driver_remove(struct drm_i915_private * i915)12841 void intel_modeset_driver_remove(struct drm_i915_private *i915)
12842 {
12843 	flush_workqueue(i915->flip_wq);
12844 	flush_workqueue(i915->modeset_wq);
12845 
12846 	flush_work(&i915->atomic_helper.free_work);
12847 	drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
12848 }
12849 
12850 /* part #2: call after irq uninstall */
intel_modeset_driver_remove_noirq(struct drm_i915_private * i915)12851 void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
12852 {
12853 	/*
12854 	 * Due to the hpd irq storm handling the hotplug work can re-arm the
12855 	 * poll handlers. Hence disable polling after hpd handling is shut down.
12856 	 */
12857 	intel_hpd_poll_fini(i915);
12858 
12859 	/*
12860 	 * MST topology needs to be suspended so we don't have any calls to
12861 	 * fbdev after it's finalized. MST will be destroyed later as part of
12862 	 * drm_mode_config_cleanup()
12863 	 */
12864 	intel_dp_mst_suspend(i915);
12865 
12866 	/* poll work can call into fbdev, hence clean that up afterwards */
12867 	intel_fbdev_fini(i915);
12868 
12869 	intel_unregister_dsm_handler();
12870 
12871 	intel_fbc_global_disable(i915);
12872 
12873 	/* flush any delayed tasks or pending work */
12874 	flush_scheduled_work();
12875 
12876 	intel_hdcp_component_fini(i915);
12877 
12878 	intel_mode_config_cleanup(i915);
12879 
12880 	intel_overlay_cleanup(i915);
12881 
12882 	intel_gmbus_teardown(i915);
12883 
12884 	destroy_workqueue(i915->flip_wq);
12885 	destroy_workqueue(i915->modeset_wq);
12886 
12887 	intel_fbc_cleanup_cfb(i915);
12888 }
12889 
12890 /* part #3: call after gem init */
intel_modeset_driver_remove_nogem(struct drm_i915_private * i915)12891 void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
12892 {
12893 	intel_csr_ucode_fini(i915);
12894 
12895 	intel_power_domains_driver_remove(i915);
12896 
12897 	intel_vga_unregister(i915);
12898 
12899 	intel_bios_driver_remove(i915);
12900 }
12901 
intel_display_driver_register(struct drm_i915_private * i915)12902 void intel_display_driver_register(struct drm_i915_private *i915)
12903 {
12904 	if (!HAS_DISPLAY(i915))
12905 		return;
12906 
12907 	intel_display_debugfs_register(i915);
12908 
12909 	/* Must be done after probing outputs */
12910 	intel_opregion_register(i915);
12911 	acpi_video_register();
12912 
12913 	intel_audio_init(i915);
12914 
12915 	/*
12916 	 * Some ports require correctly set-up hpd registers for
12917 	 * detection to work properly (leading to ghost connected
12918 	 * connector status), e.g. VGA on gm45.  Hence we can only set
12919 	 * up the initial fbdev config after hpd irqs are fully
12920 	 * enabled. We do it last so that the async config cannot run
12921 	 * before the connectors are registered.
12922 	 */
12923 	intel_fbdev_initial_config_async(&i915->drm);
12924 
12925 	/*
12926 	 * We need to coordinate the hotplugs with the asynchronous
12927 	 * fbdev configuration, for which we use the
12928 	 * fbdev->async_cookie.
12929 	 */
12930 	drm_kms_helper_poll_init(&i915->drm);
12931 }
12932 
intel_display_driver_unregister(struct drm_i915_private * i915)12933 void intel_display_driver_unregister(struct drm_i915_private *i915)
12934 {
12935 	if (!HAS_DISPLAY(i915))
12936 		return;
12937 
12938 	intel_fbdev_unregister(i915);
12939 	intel_audio_deinit(i915);
12940 
12941 	/*
12942 	 * After flushing the fbdev (incl. a late async config which
12943 	 * will have delayed queuing of a hotplug event), then flush
12944 	 * the hotplug events.
12945 	 */
12946 	drm_kms_helper_poll_fini(&i915->drm);
12947 	drm_atomic_helper_shutdown(&i915->drm);
12948 
12949 	acpi_video_unregister();
12950 	intel_opregion_unregister(i915);
12951 }
12952 
12953 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
12954 
12955 struct intel_display_error_state {
12956 
12957 	u32 power_well_driver;
12958 
12959 	struct intel_cursor_error_state {
12960 		u32 control;
12961 		u32 position;
12962 		u32 base;
12963 		u32 size;
12964 	} cursor[I915_MAX_PIPES];
12965 
12966 	struct intel_pipe_error_state {
12967 		bool power_domain_on;
12968 		u32 source;
12969 		u32 stat;
12970 	} pipe[I915_MAX_PIPES];
12971 
12972 	struct intel_plane_error_state {
12973 		u32 control;
12974 		u32 stride;
12975 		u32 size;
12976 		u32 pos;
12977 		u32 addr;
12978 		u32 surface;
12979 		u32 tile_offset;
12980 	} plane[I915_MAX_PIPES];
12981 
12982 	struct intel_transcoder_error_state {
12983 		bool available;
12984 		bool power_domain_on;
12985 		enum transcoder cpu_transcoder;
12986 
12987 		u32 conf;
12988 
12989 		u32 htotal;
12990 		u32 hblank;
12991 		u32 hsync;
12992 		u32 vtotal;
12993 		u32 vblank;
12994 		u32 vsync;
12995 	} transcoder[5];
12996 };
12997 
12998 struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private * dev_priv)12999 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
13000 {
13001 	struct intel_display_error_state *error;
13002 	int transcoders[] = {
13003 		TRANSCODER_A,
13004 		TRANSCODER_B,
13005 		TRANSCODER_C,
13006 		TRANSCODER_D,
13007 		TRANSCODER_EDP,
13008 	};
13009 	int i;
13010 
13011 	BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
13012 
13013 	if (!HAS_DISPLAY(dev_priv))
13014 		return NULL;
13015 
13016 	error = kzalloc(sizeof(*error), GFP_ATOMIC);
13017 	if (error == NULL)
13018 		return NULL;
13019 
13020 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13021 		error->power_well_driver = intel_de_read(dev_priv,
13022 							 HSW_PWR_WELL_CTL2);
13023 
13024 	for_each_pipe(dev_priv, i) {
13025 		error->pipe[i].power_domain_on =
13026 			__intel_display_power_is_enabled(dev_priv,
13027 							 POWER_DOMAIN_PIPE(i));
13028 		if (!error->pipe[i].power_domain_on)
13029 			continue;
13030 
13031 		error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
13032 		error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
13033 		error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
13034 
13035 		error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
13036 		error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
13037 		if (DISPLAY_VER(dev_priv) <= 3) {
13038 			error->plane[i].size = intel_de_read(dev_priv,
13039 							     DSPSIZE(i));
13040 			error->plane[i].pos = intel_de_read(dev_priv,
13041 							    DSPPOS(i));
13042 		}
13043 		if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13044 			error->plane[i].addr = intel_de_read(dev_priv,
13045 							     DSPADDR(i));
13046 		if (DISPLAY_VER(dev_priv) >= 4) {
13047 			error->plane[i].surface = intel_de_read(dev_priv,
13048 								DSPSURF(i));
13049 			error->plane[i].tile_offset = intel_de_read(dev_priv,
13050 								    DSPTILEOFF(i));
13051 		}
13052 
13053 		error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
13054 
13055 		if (HAS_GMCH(dev_priv))
13056 			error->pipe[i].stat = intel_de_read(dev_priv,
13057 							    PIPESTAT(i));
13058 	}
13059 
13060 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13061 		enum transcoder cpu_transcoder = transcoders[i];
13062 
13063 		if (!HAS_TRANSCODER(dev_priv, cpu_transcoder))
13064 			continue;
13065 
13066 		error->transcoder[i].available = true;
13067 		error->transcoder[i].power_domain_on =
13068 			__intel_display_power_is_enabled(dev_priv,
13069 				POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13070 		if (!error->transcoder[i].power_domain_on)
13071 			continue;
13072 
13073 		error->transcoder[i].cpu_transcoder = cpu_transcoder;
13074 
13075 		error->transcoder[i].conf = intel_de_read(dev_priv,
13076 							  PIPECONF(cpu_transcoder));
13077 		error->transcoder[i].htotal = intel_de_read(dev_priv,
13078 							    HTOTAL(cpu_transcoder));
13079 		error->transcoder[i].hblank = intel_de_read(dev_priv,
13080 							    HBLANK(cpu_transcoder));
13081 		error->transcoder[i].hsync = intel_de_read(dev_priv,
13082 							   HSYNC(cpu_transcoder));
13083 		error->transcoder[i].vtotal = intel_de_read(dev_priv,
13084 							    VTOTAL(cpu_transcoder));
13085 		error->transcoder[i].vblank = intel_de_read(dev_priv,
13086 							    VBLANK(cpu_transcoder));
13087 		error->transcoder[i].vsync = intel_de_read(dev_priv,
13088 							   VSYNC(cpu_transcoder));
13089 	}
13090 
13091 	return error;
13092 }
13093 
13094 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
13095 
13096 void
intel_display_print_error_state(struct drm_i915_error_state_buf * m,struct intel_display_error_state * error)13097 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
13098 				struct intel_display_error_state *error)
13099 {
13100 	struct drm_i915_private *dev_priv = m->i915;
13101 	int i;
13102 
13103 	if (!error)
13104 		return;
13105 
13106 	err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv));
13107 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
13108 		err_printf(m, "PWR_WELL_CTL2: %08x\n",
13109 			   error->power_well_driver);
13110 	for_each_pipe(dev_priv, i) {
13111 		err_printf(m, "Pipe [%d]:\n", i);
13112 		err_printf(m, "  Power: %s\n",
13113 			   onoff(error->pipe[i].power_domain_on));
13114 		err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
13115 		err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
13116 
13117 		err_printf(m, "Plane [%d]:\n", i);
13118 		err_printf(m, "  CNTR: %08x\n", error->plane[i].control);
13119 		err_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
13120 		if (DISPLAY_VER(dev_priv) <= 3) {
13121 			err_printf(m, "  SIZE: %08x\n", error->plane[i].size);
13122 			err_printf(m, "  POS: %08x\n", error->plane[i].pos);
13123 		}
13124 		if (DISPLAY_VER(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
13125 			err_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
13126 		if (DISPLAY_VER(dev_priv) >= 4) {
13127 			err_printf(m, "  SURF: %08x\n", error->plane[i].surface);
13128 			err_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
13129 		}
13130 
13131 		err_printf(m, "Cursor [%d]:\n", i);
13132 		err_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
13133 		err_printf(m, "  POS: %08x\n", error->cursor[i].position);
13134 		err_printf(m, "  BASE: %08x\n", error->cursor[i].base);
13135 	}
13136 
13137 	for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
13138 		if (!error->transcoder[i].available)
13139 			continue;
13140 
13141 		err_printf(m, "CPU transcoder: %s\n",
13142 			   transcoder_name(error->transcoder[i].cpu_transcoder));
13143 		err_printf(m, "  Power: %s\n",
13144 			   onoff(error->transcoder[i].power_domain_on));
13145 		err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
13146 		err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
13147 		err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
13148 		err_printf(m, "  HSYNC: %08x\n", error->transcoder[i].hsync);
13149 		err_printf(m, "  VTOTAL: %08x\n", error->transcoder[i].vtotal);
13150 		err_printf(m, "  VBLANK: %08x\n", error->transcoder[i].vblank);
13151 		err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
13152 	}
13153 }
13154 
13155 #endif
13156