xref: /linux/drivers/gpu/drm/i915/display/intel_fdi.c (revision 8466a141)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include <drm/drm_fixed.h>
9 
10 #include "i915_reg.h"
11 #include "intel_atomic.h"
12 #include "intel_crtc.h"
13 #include "intel_ddi.h"
14 #include "intel_de.h"
15 #include "intel_dp.h"
16 #include "intel_display_types.h"
17 #include "intel_fdi.h"
18 #include "intel_fdi_regs.h"
19 #include "intel_link_bw.h"
20 
21 struct intel_fdi_funcs {
22 	void (*fdi_link_train)(struct intel_crtc *crtc,
23 			       const struct intel_crtc_state *crtc_state);
24 };
25 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)26 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
27 			  enum pipe pipe, bool state)
28 {
29 	bool cur_state;
30 
31 	if (HAS_DDI(dev_priv)) {
32 		/*
33 		 * DDI does not have a specific FDI_TX register.
34 		 *
35 		 * FDI is never fed from EDP transcoder
36 		 * so pipe->transcoder cast is fine here.
37 		 */
38 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
39 		cur_state = intel_de_read(dev_priv,
40 					  TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
41 	} else {
42 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
43 	}
44 	I915_STATE_WARN(dev_priv, cur_state != state,
45 			"FDI TX state assertion failure (expected %s, current %s)\n",
46 			str_on_off(state), str_on_off(cur_state));
47 }
48 
assert_fdi_tx_enabled(struct drm_i915_private * i915,enum pipe pipe)49 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
50 {
51 	assert_fdi_tx(i915, pipe, true);
52 }
53 
assert_fdi_tx_disabled(struct drm_i915_private * i915,enum pipe pipe)54 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
55 {
56 	assert_fdi_tx(i915, pipe, false);
57 }
58 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)59 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
60 			  enum pipe pipe, bool state)
61 {
62 	bool cur_state;
63 
64 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
65 	I915_STATE_WARN(dev_priv, cur_state != state,
66 			"FDI RX state assertion failure (expected %s, current %s)\n",
67 			str_on_off(state), str_on_off(cur_state));
68 }
69 
assert_fdi_rx_enabled(struct drm_i915_private * i915,enum pipe pipe)70 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
71 {
72 	assert_fdi_rx(i915, pipe, true);
73 }
74 
assert_fdi_rx_disabled(struct drm_i915_private * i915,enum pipe pipe)75 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
76 {
77 	assert_fdi_rx(i915, pipe, false);
78 }
79 
assert_fdi_tx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)80 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
81 			       enum pipe pipe)
82 {
83 	bool cur_state;
84 
85 	/* ILK FDI PLL is always enabled */
86 	if (IS_IRONLAKE(i915))
87 		return;
88 
89 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
90 	if (HAS_DDI(i915))
91 		return;
92 
93 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
94 	I915_STATE_WARN(i915, !cur_state,
95 			"FDI TX PLL assertion failure, should be active but is disabled\n");
96 }
97 
assert_fdi_rx_pll(struct drm_i915_private * i915,enum pipe pipe,bool state)98 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
99 			      enum pipe pipe, bool state)
100 {
101 	bool cur_state;
102 
103 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
104 	I915_STATE_WARN(i915, cur_state != state,
105 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
106 			str_on_off(state), str_on_off(cur_state));
107 }
108 
assert_fdi_rx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)109 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
110 {
111 	assert_fdi_rx_pll(i915, pipe, true);
112 }
113 
assert_fdi_rx_pll_disabled(struct drm_i915_private * i915,enum pipe pipe)114 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
115 {
116 	assert_fdi_rx_pll(i915, pipe, false);
117 }
118 
intel_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)119 void intel_fdi_link_train(struct intel_crtc *crtc,
120 			  const struct intel_crtc_state *crtc_state)
121 {
122 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
123 
124 	dev_priv->display.funcs.fdi->fdi_link_train(crtc, crtc_state);
125 }
126 
127 /**
128  * intel_fdi_add_affected_crtcs - add CRTCs on FDI affected by other modeset CRTCs
129  * @state: intel atomic state
130  *
131  * Add a CRTC using FDI to @state if changing another CRTC's FDI BW usage is
132  * known to affect the available FDI BW for the former CRTC. In practice this
133  * means adding CRTC B on IVYBRIDGE if its use of FDI lanes is limited (by
134  * CRTC C) and CRTC C is getting disabled.
135  *
136  * Returns 0 in case of success, or a negative error code otherwise.
137  */
intel_fdi_add_affected_crtcs(struct intel_atomic_state * state)138 int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state)
139 {
140 	struct drm_i915_private *i915 = to_i915(state->base.dev);
141 	const struct intel_crtc_state *old_crtc_state;
142 	const struct intel_crtc_state *new_crtc_state;
143 	struct intel_crtc *crtc;
144 
145 	if (!IS_IVYBRIDGE(i915) || INTEL_NUM_PIPES(i915) != 3)
146 		return 0;
147 
148 	crtc = intel_crtc_for_pipe(i915, PIPE_C);
149 	new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
150 	if (!new_crtc_state)
151 		return 0;
152 
153 	if (!intel_crtc_needs_modeset(new_crtc_state))
154 		return 0;
155 
156 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
157 	if (!old_crtc_state->fdi_lanes)
158 		return 0;
159 
160 	crtc = intel_crtc_for_pipe(i915, PIPE_B);
161 	new_crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
162 	if (IS_ERR(new_crtc_state))
163 		return PTR_ERR(new_crtc_state);
164 
165 	old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
166 	if (!old_crtc_state->fdi_lanes)
167 		return 0;
168 
169 	return intel_modeset_pipes_in_mask_early(state,
170 						 "FDI link BW decrease on pipe C",
171 						 BIT(PIPE_B));
172 }
173 
174 /* units of 100MHz */
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)175 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
176 {
177 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
178 		return crtc_state->fdi_lanes;
179 
180 	return 0;
181 }
182 
ilk_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config,enum pipe * pipe_to_reduce)183 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
184 			       struct intel_crtc_state *pipe_config,
185 			       enum pipe *pipe_to_reduce)
186 {
187 	struct drm_i915_private *dev_priv = to_i915(dev);
188 	struct drm_atomic_state *state = pipe_config->uapi.state;
189 	struct intel_crtc *other_crtc;
190 	struct intel_crtc_state *other_crtc_state;
191 
192 	*pipe_to_reduce = pipe;
193 
194 	drm_dbg_kms(&dev_priv->drm,
195 		    "checking fdi config on pipe %c, lanes %i\n",
196 		    pipe_name(pipe), pipe_config->fdi_lanes);
197 	if (pipe_config->fdi_lanes > 4) {
198 		drm_dbg_kms(&dev_priv->drm,
199 			    "invalid fdi lane config on pipe %c: %i lanes\n",
200 			    pipe_name(pipe), pipe_config->fdi_lanes);
201 		return -EINVAL;
202 	}
203 
204 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
205 		if (pipe_config->fdi_lanes > 2) {
206 			drm_dbg_kms(&dev_priv->drm,
207 				    "only 2 lanes on haswell, required: %i lanes\n",
208 				    pipe_config->fdi_lanes);
209 			return -EINVAL;
210 		} else {
211 			return 0;
212 		}
213 	}
214 
215 	if (INTEL_NUM_PIPES(dev_priv) == 2)
216 		return 0;
217 
218 	/* Ivybridge 3 pipe is really complicated */
219 	switch (pipe) {
220 	case PIPE_A:
221 		return 0;
222 	case PIPE_B:
223 		if (pipe_config->fdi_lanes <= 2)
224 			return 0;
225 
226 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
227 		other_crtc_state =
228 			intel_atomic_get_crtc_state(state, other_crtc);
229 		if (IS_ERR(other_crtc_state))
230 			return PTR_ERR(other_crtc_state);
231 
232 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
233 			drm_dbg_kms(&dev_priv->drm,
234 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
235 				    pipe_name(pipe), pipe_config->fdi_lanes);
236 			return -EINVAL;
237 		}
238 		return 0;
239 	case PIPE_C:
240 		if (pipe_config->fdi_lanes > 2) {
241 			drm_dbg_kms(&dev_priv->drm,
242 				    "only 2 lanes on pipe %c: required %i lanes\n",
243 				    pipe_name(pipe), pipe_config->fdi_lanes);
244 			return -EINVAL;
245 		}
246 
247 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
248 		other_crtc_state =
249 			intel_atomic_get_crtc_state(state, other_crtc);
250 		if (IS_ERR(other_crtc_state))
251 			return PTR_ERR(other_crtc_state);
252 
253 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
254 			drm_dbg_kms(&dev_priv->drm,
255 				    "fdi link B uses too many lanes to enable link C\n");
256 
257 			*pipe_to_reduce = PIPE_B;
258 
259 			return -EINVAL;
260 		}
261 		return 0;
262 	default:
263 		MISSING_CASE(pipe);
264 		return 0;
265 	}
266 }
267 
intel_fdi_pll_freq_update(struct drm_i915_private * i915)268 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
269 {
270 	if (IS_IRONLAKE(i915)) {
271 		u32 fdi_pll_clk =
272 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
273 
274 		i915->display.fdi.pll_freq = (fdi_pll_clk + 2) * 10000;
275 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
276 		i915->display.fdi.pll_freq = 270000;
277 	} else {
278 		return;
279 	}
280 
281 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->display.fdi.pll_freq);
282 }
283 
intel_fdi_link_freq(struct drm_i915_private * i915,const struct intel_crtc_state * pipe_config)284 int intel_fdi_link_freq(struct drm_i915_private *i915,
285 			const struct intel_crtc_state *pipe_config)
286 {
287 	if (HAS_DDI(i915))
288 		return pipe_config->port_clock; /* SPLL */
289 	else
290 		return i915->display.fdi.pll_freq;
291 }
292 
293 /**
294  * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
295  * @crtc_state: the crtc state
296  *
297  * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
298  * call this function during state computation in the simple case where the
299  * link bpp will always match the pipe bpp. This is the case for all non-DP
300  * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
301  * of DSC compression.
302  *
303  * Returns %true in case of success, %false if pipe bpp would need to be
304  * reduced below its valid range.
305  */
intel_fdi_compute_pipe_bpp(struct intel_crtc_state * crtc_state)306 bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
307 {
308 	int pipe_bpp = min(crtc_state->pipe_bpp,
309 			   fxp_q4_to_int(crtc_state->max_link_bpp_x16));
310 
311 	pipe_bpp = rounddown(pipe_bpp, 2 * 3);
312 
313 	if (pipe_bpp < 6 * 3)
314 		return false;
315 
316 	crtc_state->pipe_bpp = pipe_bpp;
317 
318 	return true;
319 }
320 
ilk_fdi_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)321 int ilk_fdi_compute_config(struct intel_crtc *crtc,
322 			   struct intel_crtc_state *pipe_config)
323 {
324 	struct drm_device *dev = crtc->base.dev;
325 	struct drm_i915_private *i915 = to_i915(dev);
326 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
327 	int lane, link_bw, fdi_dotclock;
328 
329 	/* FDI is a binary signal running at ~2.7GHz, encoding
330 	 * each output octet as 10 bits. The actual frequency
331 	 * is stored as a divider into a 100MHz clock, and the
332 	 * mode pixel clock is stored in units of 1KHz.
333 	 * Hence the bw of each lane in terms of the mode signal
334 	 * is:
335 	 */
336 	link_bw = intel_fdi_link_freq(i915, pipe_config);
337 
338 	fdi_dotclock = adjusted_mode->crtc_clock;
339 
340 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
341 				      pipe_config->pipe_bpp);
342 
343 	pipe_config->fdi_lanes = lane;
344 
345 	intel_link_compute_m_n(fxp_q4_from_int(pipe_config->pipe_bpp),
346 			       lane, fdi_dotclock,
347 			       link_bw,
348 			       intel_dp_bw_fec_overhead(false),
349 			       &pipe_config->fdi_m_n);
350 
351 	return 0;
352 }
353 
intel_fdi_atomic_check_bw(struct intel_atomic_state * state,struct intel_crtc * crtc,struct intel_crtc_state * pipe_config,struct intel_link_bw_limits * limits)354 static int intel_fdi_atomic_check_bw(struct intel_atomic_state *state,
355 				     struct intel_crtc *crtc,
356 				     struct intel_crtc_state *pipe_config,
357 				     struct intel_link_bw_limits *limits)
358 {
359 	struct drm_i915_private *i915 = to_i915(crtc->base.dev);
360 	enum pipe pipe_to_reduce;
361 	int ret;
362 
363 	ret = ilk_check_fdi_lanes(&i915->drm, crtc->pipe, pipe_config,
364 				  &pipe_to_reduce);
365 	if (ret != -EINVAL)
366 		return ret;
367 
368 	ret = intel_link_bw_reduce_bpp(state, limits,
369 				       BIT(pipe_to_reduce),
370 				       "FDI link BW");
371 
372 	return ret ? : -EAGAIN;
373 }
374 
375 /**
376  * intel_fdi_atomic_check_link - check all modeset FDI link configuration
377  * @state: intel atomic state
378  * @limits: link BW limits
379  *
380  * Check the link configuration for all modeset FDI outputs. If the
381  * configuration is invalid @limits will be updated if possible to
382  * reduce the total BW, after which the configuration for all CRTCs in
383  * @state must be recomputed with the updated @limits.
384  *
385  * Returns:
386  *   - 0 if the confugration is valid
387  *   - %-EAGAIN, if the configuration is invalid and @limits got updated
388  *     with fallback values with which the configuration of all CRTCs
389  *     in @state must be recomputed
390  *   - Other negative error, if the configuration is invalid without a
391  *     fallback possibility, or the check failed for another reason
392  */
intel_fdi_atomic_check_link(struct intel_atomic_state * state,struct intel_link_bw_limits * limits)393 int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
394 				struct intel_link_bw_limits *limits)
395 {
396 	struct intel_crtc *crtc;
397 	struct intel_crtc_state *crtc_state;
398 	int i;
399 
400 	for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
401 		int ret;
402 
403 		if (!crtc_state->has_pch_encoder ||
404 		    !intel_crtc_needs_modeset(crtc_state) ||
405 		    !crtc_state->hw.enable)
406 			continue;
407 
408 		ret = intel_fdi_atomic_check_bw(state, crtc, crtc_state, limits);
409 		if (ret)
410 			return ret;
411 	}
412 
413 	return 0;
414 }
415 
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)416 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
417 {
418 	u32 temp;
419 
420 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
421 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
422 		return;
423 
424 	drm_WARN_ON(&dev_priv->drm,
425 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
426 		    FDI_RX_ENABLE);
427 	drm_WARN_ON(&dev_priv->drm,
428 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
429 		    FDI_RX_ENABLE);
430 
431 	temp &= ~FDI_BC_BIFURCATION_SELECT;
432 	if (enable)
433 		temp |= FDI_BC_BIFURCATION_SELECT;
434 
435 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
436 		    enable ? "en" : "dis");
437 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
438 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
439 }
440 
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)441 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
442 {
443 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
444 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
445 
446 	switch (crtc->pipe) {
447 	case PIPE_A:
448 		break;
449 	case PIPE_B:
450 		if (crtc_state->fdi_lanes > 2)
451 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
452 		else
453 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
454 
455 		break;
456 	case PIPE_C:
457 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
458 
459 		break;
460 	default:
461 		MISSING_CASE(crtc->pipe);
462 	}
463 }
464 
intel_fdi_normal_train(struct intel_crtc * crtc)465 void intel_fdi_normal_train(struct intel_crtc *crtc)
466 {
467 	struct drm_device *dev = crtc->base.dev;
468 	struct drm_i915_private *dev_priv = to_i915(dev);
469 	enum pipe pipe = crtc->pipe;
470 	i915_reg_t reg;
471 	u32 temp;
472 
473 	/* enable normal train */
474 	reg = FDI_TX_CTL(pipe);
475 	temp = intel_de_read(dev_priv, reg);
476 	if (IS_IVYBRIDGE(dev_priv)) {
477 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
478 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
479 	} else {
480 		temp &= ~FDI_LINK_TRAIN_NONE;
481 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
482 	}
483 	intel_de_write(dev_priv, reg, temp);
484 
485 	reg = FDI_RX_CTL(pipe);
486 	temp = intel_de_read(dev_priv, reg);
487 	if (HAS_PCH_CPT(dev_priv)) {
488 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
489 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
490 	} else {
491 		temp &= ~FDI_LINK_TRAIN_NONE;
492 		temp |= FDI_LINK_TRAIN_NONE;
493 	}
494 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
495 
496 	/* wait one idle pattern time */
497 	intel_de_posting_read(dev_priv, reg);
498 	udelay(1000);
499 
500 	/* IVB wants error correction enabled */
501 	if (IS_IVYBRIDGE(dev_priv))
502 		intel_de_rmw(dev_priv, reg, 0, FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
503 }
504 
505 /* The FDI link training functions for ILK/Ibexpeak. */
ilk_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)506 static void ilk_fdi_link_train(struct intel_crtc *crtc,
507 			       const struct intel_crtc_state *crtc_state)
508 {
509 	struct drm_device *dev = crtc->base.dev;
510 	struct drm_i915_private *dev_priv = to_i915(dev);
511 	enum pipe pipe = crtc->pipe;
512 	i915_reg_t reg;
513 	u32 temp, tries;
514 
515 	/*
516 	 * Write the TU size bits before fdi link training, so that error
517 	 * detection works.
518 	 */
519 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
520 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
521 
522 	/* FDI needs bits from pipe first */
523 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
524 
525 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
526 	   for train result */
527 	reg = FDI_RX_IMR(pipe);
528 	temp = intel_de_read(dev_priv, reg);
529 	temp &= ~FDI_RX_SYMBOL_LOCK;
530 	temp &= ~FDI_RX_BIT_LOCK;
531 	intel_de_write(dev_priv, reg, temp);
532 	intel_de_read(dev_priv, reg);
533 	udelay(150);
534 
535 	/* enable CPU FDI TX and PCH FDI RX */
536 	reg = FDI_TX_CTL(pipe);
537 	temp = intel_de_read(dev_priv, reg);
538 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
539 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
540 	temp &= ~FDI_LINK_TRAIN_NONE;
541 	temp |= FDI_LINK_TRAIN_PATTERN_1;
542 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
543 
544 	reg = FDI_RX_CTL(pipe);
545 	temp = intel_de_read(dev_priv, reg);
546 	temp &= ~FDI_LINK_TRAIN_NONE;
547 	temp |= FDI_LINK_TRAIN_PATTERN_1;
548 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
549 
550 	intel_de_posting_read(dev_priv, reg);
551 	udelay(150);
552 
553 	/* Ironlake workaround, enable clock pointer after FDI enable*/
554 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
555 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
556 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
557 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
558 
559 	reg = FDI_RX_IIR(pipe);
560 	for (tries = 0; tries < 5; tries++) {
561 		temp = intel_de_read(dev_priv, reg);
562 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
563 
564 		if ((temp & FDI_RX_BIT_LOCK)) {
565 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
566 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
567 			break;
568 		}
569 	}
570 	if (tries == 5)
571 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
572 
573 	/* Train 2 */
574 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
575 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
576 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
577 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_2);
578 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
579 	udelay(150);
580 
581 	reg = FDI_RX_IIR(pipe);
582 	for (tries = 0; tries < 5; tries++) {
583 		temp = intel_de_read(dev_priv, reg);
584 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
585 
586 		if (temp & FDI_RX_SYMBOL_LOCK) {
587 			intel_de_write(dev_priv, reg,
588 				       temp | FDI_RX_SYMBOL_LOCK);
589 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
590 			break;
591 		}
592 	}
593 	if (tries == 5)
594 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
595 
596 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
597 
598 }
599 
600 static const int snb_b_fdi_train_param[] = {
601 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
602 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
603 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
604 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
605 };
606 
607 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)608 static void gen6_fdi_link_train(struct intel_crtc *crtc,
609 				const struct intel_crtc_state *crtc_state)
610 {
611 	struct drm_device *dev = crtc->base.dev;
612 	struct drm_i915_private *dev_priv = to_i915(dev);
613 	enum pipe pipe = crtc->pipe;
614 	i915_reg_t reg;
615 	u32 temp, i, retry;
616 
617 	/*
618 	 * Write the TU size bits before fdi link training, so that error
619 	 * detection works.
620 	 */
621 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
622 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
623 
624 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
625 	   for train result */
626 	reg = FDI_RX_IMR(pipe);
627 	temp = intel_de_read(dev_priv, reg);
628 	temp &= ~FDI_RX_SYMBOL_LOCK;
629 	temp &= ~FDI_RX_BIT_LOCK;
630 	intel_de_write(dev_priv, reg, temp);
631 
632 	intel_de_posting_read(dev_priv, reg);
633 	udelay(150);
634 
635 	/* enable CPU FDI TX and PCH FDI RX */
636 	reg = FDI_TX_CTL(pipe);
637 	temp = intel_de_read(dev_priv, reg);
638 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
639 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
640 	temp &= ~FDI_LINK_TRAIN_NONE;
641 	temp |= FDI_LINK_TRAIN_PATTERN_1;
642 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
643 	/* SNB-B */
644 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
645 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
646 
647 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
648 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
649 
650 	reg = FDI_RX_CTL(pipe);
651 	temp = intel_de_read(dev_priv, reg);
652 	if (HAS_PCH_CPT(dev_priv)) {
653 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
654 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
655 	} else {
656 		temp &= ~FDI_LINK_TRAIN_NONE;
657 		temp |= FDI_LINK_TRAIN_PATTERN_1;
658 	}
659 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
660 
661 	intel_de_posting_read(dev_priv, reg);
662 	udelay(150);
663 
664 	for (i = 0; i < 4; i++) {
665 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
666 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
667 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
668 		udelay(500);
669 
670 		for (retry = 0; retry < 5; retry++) {
671 			reg = FDI_RX_IIR(pipe);
672 			temp = intel_de_read(dev_priv, reg);
673 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
674 			if (temp & FDI_RX_BIT_LOCK) {
675 				intel_de_write(dev_priv, reg,
676 					       temp | FDI_RX_BIT_LOCK);
677 				drm_dbg_kms(&dev_priv->drm,
678 					    "FDI train 1 done.\n");
679 				break;
680 			}
681 			udelay(50);
682 		}
683 		if (retry < 5)
684 			break;
685 	}
686 	if (i == 4)
687 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
688 
689 	/* Train 2 */
690 	reg = FDI_TX_CTL(pipe);
691 	temp = intel_de_read(dev_priv, reg);
692 	temp &= ~FDI_LINK_TRAIN_NONE;
693 	temp |= FDI_LINK_TRAIN_PATTERN_2;
694 	if (IS_SANDYBRIDGE(dev_priv)) {
695 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
696 		/* SNB-B */
697 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
698 	}
699 	intel_de_write(dev_priv, reg, temp);
700 
701 	reg = FDI_RX_CTL(pipe);
702 	temp = intel_de_read(dev_priv, reg);
703 	if (HAS_PCH_CPT(dev_priv)) {
704 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
705 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
706 	} else {
707 		temp &= ~FDI_LINK_TRAIN_NONE;
708 		temp |= FDI_LINK_TRAIN_PATTERN_2;
709 	}
710 	intel_de_write(dev_priv, reg, temp);
711 
712 	intel_de_posting_read(dev_priv, reg);
713 	udelay(150);
714 
715 	for (i = 0; i < 4; i++) {
716 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
717 			     FDI_LINK_TRAIN_VOL_EMP_MASK, snb_b_fdi_train_param[i]);
718 		intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
719 		udelay(500);
720 
721 		for (retry = 0; retry < 5; retry++) {
722 			reg = FDI_RX_IIR(pipe);
723 			temp = intel_de_read(dev_priv, reg);
724 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
725 			if (temp & FDI_RX_SYMBOL_LOCK) {
726 				intel_de_write(dev_priv, reg,
727 					       temp | FDI_RX_SYMBOL_LOCK);
728 				drm_dbg_kms(&dev_priv->drm,
729 					    "FDI train 2 done.\n");
730 				break;
731 			}
732 			udelay(50);
733 		}
734 		if (retry < 5)
735 			break;
736 	}
737 	if (i == 4)
738 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
739 
740 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
741 }
742 
743 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)744 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
745 				      const struct intel_crtc_state *crtc_state)
746 {
747 	struct drm_device *dev = crtc->base.dev;
748 	struct drm_i915_private *dev_priv = to_i915(dev);
749 	enum pipe pipe = crtc->pipe;
750 	i915_reg_t reg;
751 	u32 temp, i, j;
752 
753 	ivb_update_fdi_bc_bifurcation(crtc_state);
754 
755 	/*
756 	 * Write the TU size bits before fdi link training, so that error
757 	 * detection works.
758 	 */
759 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
760 		       intel_de_read(dev_priv, PIPE_DATA_M1(dev_priv, pipe)) & TU_SIZE_MASK);
761 
762 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
763 	   for train result */
764 	reg = FDI_RX_IMR(pipe);
765 	temp = intel_de_read(dev_priv, reg);
766 	temp &= ~FDI_RX_SYMBOL_LOCK;
767 	temp &= ~FDI_RX_BIT_LOCK;
768 	intel_de_write(dev_priv, reg, temp);
769 
770 	intel_de_posting_read(dev_priv, reg);
771 	udelay(150);
772 
773 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
774 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
775 
776 	/* Try each vswing and preemphasis setting twice before moving on */
777 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
778 		/* disable first in case we need to retry */
779 		reg = FDI_TX_CTL(pipe);
780 		temp = intel_de_read(dev_priv, reg);
781 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
782 		temp &= ~FDI_TX_ENABLE;
783 		intel_de_write(dev_priv, reg, temp);
784 
785 		reg = FDI_RX_CTL(pipe);
786 		temp = intel_de_read(dev_priv, reg);
787 		temp &= ~FDI_LINK_TRAIN_AUTO;
788 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
789 		temp &= ~FDI_RX_ENABLE;
790 		intel_de_write(dev_priv, reg, temp);
791 
792 		/* enable CPU FDI TX and PCH FDI RX */
793 		reg = FDI_TX_CTL(pipe);
794 		temp = intel_de_read(dev_priv, reg);
795 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
796 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
797 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
798 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
799 		temp |= snb_b_fdi_train_param[j/2];
800 		temp |= FDI_COMPOSITE_SYNC;
801 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
802 
803 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
804 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
805 
806 		reg = FDI_RX_CTL(pipe);
807 		temp = intel_de_read(dev_priv, reg);
808 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
809 		temp |= FDI_COMPOSITE_SYNC;
810 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
811 
812 		intel_de_posting_read(dev_priv, reg);
813 		udelay(1); /* should be 0.5us */
814 
815 		for (i = 0; i < 4; i++) {
816 			reg = FDI_RX_IIR(pipe);
817 			temp = intel_de_read(dev_priv, reg);
818 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
819 
820 			if (temp & FDI_RX_BIT_LOCK ||
821 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
822 				intel_de_write(dev_priv, reg,
823 					       temp | FDI_RX_BIT_LOCK);
824 				drm_dbg_kms(&dev_priv->drm,
825 					    "FDI train 1 done, level %i.\n",
826 					    i);
827 				break;
828 			}
829 			udelay(1); /* should be 0.5us */
830 		}
831 		if (i == 4) {
832 			drm_dbg_kms(&dev_priv->drm,
833 				    "FDI train 1 fail on vswing %d\n", j / 2);
834 			continue;
835 		}
836 
837 		/* Train 2 */
838 		intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
839 			     FDI_LINK_TRAIN_NONE_IVB,
840 			     FDI_LINK_TRAIN_PATTERN_2_IVB);
841 		intel_de_rmw(dev_priv, FDI_RX_CTL(pipe),
842 			     FDI_LINK_TRAIN_PATTERN_MASK_CPT,
843 			     FDI_LINK_TRAIN_PATTERN_2_CPT);
844 		intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
845 		udelay(2); /* should be 1.5us */
846 
847 		for (i = 0; i < 4; i++) {
848 			reg = FDI_RX_IIR(pipe);
849 			temp = intel_de_read(dev_priv, reg);
850 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
851 
852 			if (temp & FDI_RX_SYMBOL_LOCK ||
853 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
854 				intel_de_write(dev_priv, reg,
855 					       temp | FDI_RX_SYMBOL_LOCK);
856 				drm_dbg_kms(&dev_priv->drm,
857 					    "FDI train 2 done, level %i.\n",
858 					    i);
859 				goto train_done;
860 			}
861 			udelay(2); /* should be 1.5us */
862 		}
863 		if (i == 4)
864 			drm_dbg_kms(&dev_priv->drm,
865 				    "FDI train 2 fail on vswing %d\n", j / 2);
866 	}
867 
868 train_done:
869 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
870 }
871 
872 /* Starting with Haswell, different DDI ports can work in FDI mode for
873  * connection to the PCH-located connectors. For this, it is necessary to train
874  * both the DDI port and PCH receiver for the desired DDI buffer settings.
875  *
876  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
877  * please note that when FDI mode is active on DDI E, it shares 2 lines with
878  * DDI A (which is used for eDP)
879  */
hsw_fdi_link_train(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)880 void hsw_fdi_link_train(struct intel_encoder *encoder,
881 			const struct intel_crtc_state *crtc_state)
882 {
883 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
884 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
885 	u32 temp, i, rx_ctl_val;
886 	int n_entries;
887 
888 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
889 
890 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
891 
892 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
893 	 * mode set "sequence for CRT port" document:
894 	 * - TP1 to TP2 time with the default value
895 	 * - FDI delay to 90h
896 	 *
897 	 * WaFDIAutoLinkSetTimingOverrride:hsw
898 	 */
899 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
900 		       FDI_RX_PWRDN_LANE1_VAL(2) |
901 		       FDI_RX_PWRDN_LANE0_VAL(2) |
902 		       FDI_RX_TP1_TO_TP2_48 |
903 		       FDI_RX_FDI_DELAY_90);
904 
905 	/* Enable the PCH Receiver FDI PLL */
906 	rx_ctl_val = dev_priv->display.fdi.rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
907 		     FDI_RX_PLL_ENABLE |
908 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
909 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
910 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
911 	udelay(220);
912 
913 	/* Switch from Rawclk to PCDclk */
914 	rx_ctl_val |= FDI_PCDCLK;
915 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
916 
917 	/* Configure Port Clock Select */
918 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
919 	intel_ddi_enable_clock(encoder, crtc_state);
920 
921 	/* Start the training iterating through available voltages and emphasis,
922 	 * testing each value twice. */
923 	for (i = 0; i < n_entries * 2; i++) {
924 		/* Configure DP_TP_CTL with auto-training */
925 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
926 			       DP_TP_CTL_FDI_AUTOTRAIN |
927 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
928 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
929 			       DP_TP_CTL_ENABLE);
930 
931 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
932 		 * DDI E does not support port reversal, the functionality is
933 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
934 		 * port reversal bit */
935 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
936 			       DDI_BUF_CTL_ENABLE |
937 			       ((crtc_state->fdi_lanes - 1) << 1) |
938 			       DDI_BUF_TRANS_SELECT(i / 2));
939 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
940 
941 		udelay(600);
942 
943 		/* Program PCH FDI Receiver TU */
944 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
945 
946 		/* Enable PCH FDI Receiver with auto-training */
947 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
948 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
949 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
950 
951 		/* Wait for FDI receiver lane calibration */
952 		udelay(30);
953 
954 		/* Unset FDI_RX_MISC pwrdn lanes */
955 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
956 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK, 0);
957 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
958 
959 		/* Wait for FDI auto training time */
960 		udelay(5);
961 
962 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
963 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
964 			drm_dbg_kms(&dev_priv->drm,
965 				    "FDI link training done on step %d\n", i);
966 			break;
967 		}
968 
969 		/*
970 		 * Leave things enabled even if we failed to train FDI.
971 		 * Results in less fireworks from the state checker.
972 		 */
973 		if (i == n_entries * 2 - 1) {
974 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
975 			break;
976 		}
977 
978 		rx_ctl_val &= ~FDI_RX_ENABLE;
979 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
980 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
981 
982 		intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
983 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
984 
985 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
986 		intel_de_rmw(dev_priv, DP_TP_CTL(PORT_E), DP_TP_CTL_ENABLE, 0);
987 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
988 
989 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
990 
991 		/* Reset FDI_RX_MISC pwrdn lanes */
992 		intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
993 			     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
994 			     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
995 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
996 	}
997 
998 	/* Enable normal pixel sending for FDI */
999 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
1000 		       DP_TP_CTL_FDI_AUTOTRAIN |
1001 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
1002 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
1003 		       DP_TP_CTL_ENABLE);
1004 }
1005 
hsw_fdi_disable(struct intel_encoder * encoder)1006 void hsw_fdi_disable(struct intel_encoder *encoder)
1007 {
1008 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1009 
1010 	/*
1011 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
1012 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
1013 	 * step 13 is the correct place for it. Step 18 is where it was
1014 	 * originally before the BUN.
1015 	 */
1016 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_ENABLE, 0);
1017 	intel_de_rmw(dev_priv, DDI_BUF_CTL(PORT_E), DDI_BUF_CTL_ENABLE, 0);
1018 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
1019 	intel_ddi_disable_clock(encoder);
1020 	intel_de_rmw(dev_priv, FDI_RX_MISC(PIPE_A),
1021 		     FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK,
1022 		     FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2));
1023 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_PCDCLK, 0);
1024 	intel_de_rmw(dev_priv, FDI_RX_CTL(PIPE_A), FDI_RX_PLL_ENABLE, 0);
1025 }
1026 
ilk_fdi_pll_enable(const struct intel_crtc_state * crtc_state)1027 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
1028 {
1029 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1030 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1031 	enum pipe pipe = crtc->pipe;
1032 	i915_reg_t reg;
1033 	u32 temp;
1034 
1035 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
1036 	reg = FDI_RX_CTL(pipe);
1037 	temp = intel_de_read(dev_priv, reg);
1038 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
1039 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
1040 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1041 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
1042 
1043 	intel_de_posting_read(dev_priv, reg);
1044 	udelay(200);
1045 
1046 	/* Switch from Rawclk to PCDclk */
1047 	intel_de_rmw(dev_priv, reg, 0, FDI_PCDCLK);
1048 	intel_de_posting_read(dev_priv, reg);
1049 	udelay(200);
1050 
1051 	/* Enable CPU FDI TX PLL, always on for Ironlake */
1052 	reg = FDI_TX_CTL(pipe);
1053 	temp = intel_de_read(dev_priv, reg);
1054 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1055 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
1056 
1057 		intel_de_posting_read(dev_priv, reg);
1058 		udelay(100);
1059 	}
1060 }
1061 
ilk_fdi_pll_disable(struct intel_crtc * crtc)1062 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
1063 {
1064 	struct drm_device *dev = crtc->base.dev;
1065 	struct drm_i915_private *dev_priv = to_i915(dev);
1066 	enum pipe pipe = crtc->pipe;
1067 
1068 	/* Switch from PCDclk to Rawclk */
1069 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_PCDCLK, 0);
1070 
1071 	/* Disable CPU FDI TX PLL */
1072 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_PLL_ENABLE, 0);
1073 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1074 	udelay(100);
1075 
1076 	/* Wait for the clocks to turn off. */
1077 	intel_de_rmw(dev_priv, FDI_RX_CTL(pipe), FDI_RX_PLL_ENABLE, 0);
1078 	intel_de_posting_read(dev_priv, FDI_RX_CTL(pipe));
1079 	udelay(100);
1080 }
1081 
ilk_fdi_disable(struct intel_crtc * crtc)1082 void ilk_fdi_disable(struct intel_crtc *crtc)
1083 {
1084 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1085 	enum pipe pipe = crtc->pipe;
1086 	i915_reg_t reg;
1087 	u32 temp;
1088 
1089 	/* disable CPU FDI tx and PCH FDI rx */
1090 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe), FDI_TX_ENABLE, 0);
1091 	intel_de_posting_read(dev_priv, FDI_TX_CTL(pipe));
1092 
1093 	reg = FDI_RX_CTL(pipe);
1094 	temp = intel_de_read(dev_priv, reg);
1095 	temp &= ~(0x7 << 16);
1096 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1097 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1098 
1099 	intel_de_posting_read(dev_priv, reg);
1100 	udelay(100);
1101 
1102 	/* Ironlake workaround, disable clock pointer after downing FDI */
1103 	if (HAS_PCH_IBX(dev_priv))
1104 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1105 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1106 
1107 	/* still set train pattern 1 */
1108 	intel_de_rmw(dev_priv, FDI_TX_CTL(pipe),
1109 		     FDI_LINK_TRAIN_NONE, FDI_LINK_TRAIN_PATTERN_1);
1110 
1111 	reg = FDI_RX_CTL(pipe);
1112 	temp = intel_de_read(dev_priv, reg);
1113 	if (HAS_PCH_CPT(dev_priv)) {
1114 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1115 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1116 	} else {
1117 		temp &= ~FDI_LINK_TRAIN_NONE;
1118 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1119 	}
1120 	/* BPC in FDI rx is consistent with that in TRANSCONF */
1121 	temp &= ~(0x07 << 16);
1122 	temp |= (intel_de_read(dev_priv, TRANSCONF(dev_priv, pipe)) & TRANSCONF_BPC_MASK) << 11;
1123 	intel_de_write(dev_priv, reg, temp);
1124 
1125 	intel_de_posting_read(dev_priv, reg);
1126 	udelay(100);
1127 }
1128 
1129 static const struct intel_fdi_funcs ilk_funcs = {
1130 	.fdi_link_train = ilk_fdi_link_train,
1131 };
1132 
1133 static const struct intel_fdi_funcs gen6_funcs = {
1134 	.fdi_link_train = gen6_fdi_link_train,
1135 };
1136 
1137 static const struct intel_fdi_funcs ivb_funcs = {
1138 	.fdi_link_train = ivb_manual_fdi_link_train,
1139 };
1140 
1141 void
intel_fdi_init_hook(struct drm_i915_private * dev_priv)1142 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1143 {
1144 	if (IS_IRONLAKE(dev_priv)) {
1145 		dev_priv->display.funcs.fdi = &ilk_funcs;
1146 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1147 		dev_priv->display.funcs.fdi = &gen6_funcs;
1148 	} else if (IS_IVYBRIDGE(dev_priv)) {
1149 		/* FIXME: detect B0+ stepping and use auto training */
1150 		dev_priv->display.funcs.fdi = &ivb_funcs;
1151 	}
1152 }
1153