xref: /openbsd/sys/dev/pci/drm/i915/display/intel_tc.c (revision f005ef32)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_atomic.h"
9 #include "intel_cx0_phy_regs.h"
10 #include "intel_ddi.h"
11 #include "intel_de.h"
12 #include "intel_display.h"
13 #include "intel_display_driver.h"
14 #include "intel_display_power_map.h"
15 #include "intel_display_types.h"
16 #include "intel_dkl_phy_regs.h"
17 #include "intel_dp.h"
18 #include "intel_dp_mst.h"
19 #include "intel_mg_phy_regs.h"
20 #include "intel_modeset_lock.h"
21 #include "intel_tc.h"
22 
23 #define DP_PIN_ASSIGNMENT_C	0x3
24 #define DP_PIN_ASSIGNMENT_D	0x4
25 #define DP_PIN_ASSIGNMENT_E	0x5
26 
27 enum tc_port_mode {
28 	TC_PORT_DISCONNECTED,
29 	TC_PORT_TBT_ALT,
30 	TC_PORT_DP_ALT,
31 	TC_PORT_LEGACY,
32 };
33 
34 struct intel_tc_port;
35 
36 struct intel_tc_phy_ops {
37 	enum intel_display_power_domain (*cold_off_domain)(struct intel_tc_port *tc);
38 	u32 (*hpd_live_status)(struct intel_tc_port *tc);
39 	bool (*is_ready)(struct intel_tc_port *tc);
40 	bool (*is_owned)(struct intel_tc_port *tc);
41 	void (*get_hw_state)(struct intel_tc_port *tc);
42 	bool (*connect)(struct intel_tc_port *tc, int required_lanes);
43 	void (*disconnect)(struct intel_tc_port *tc);
44 	void (*init)(struct intel_tc_port *tc);
45 };
46 
47 struct intel_tc_port {
48 	struct intel_digital_port *dig_port;
49 
50 	const struct intel_tc_phy_ops *phy_ops;
51 
52 	struct rwlock lock;	/* protects the TypeC port mode */
53 	intel_wakeref_t lock_wakeref;
54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
55 	enum intel_display_power_domain lock_power_domain;
56 #endif
57 	struct delayed_work disconnect_phy_work;
58 	struct delayed_work link_reset_work;
59 	int link_refcount;
60 	bool legacy_port:1;
61 	const char *port_name;
62 	enum tc_port_mode mode;
63 	enum tc_port_mode init_mode;
64 	enum phy_fia phy_fia;
65 	u8 phy_fia_idx;
66 };
67 
68 static enum intel_display_power_domain
69 tc_phy_cold_off_domain(struct intel_tc_port *);
70 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc);
71 static bool tc_phy_is_ready(struct intel_tc_port *tc);
72 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc);
73 static enum tc_port_mode tc_phy_get_current_mode(struct intel_tc_port *tc);
74 
tc_port_mode_name(enum tc_port_mode mode)75 static const char *tc_port_mode_name(enum tc_port_mode mode)
76 {
77 	static const char * const names[] = {
78 		[TC_PORT_DISCONNECTED] = "disconnected",
79 		[TC_PORT_TBT_ALT] = "tbt-alt",
80 		[TC_PORT_DP_ALT] = "dp-alt",
81 		[TC_PORT_LEGACY] = "legacy",
82 	};
83 
84 	if (WARN_ON(mode >= ARRAY_SIZE(names)))
85 		mode = TC_PORT_DISCONNECTED;
86 
87 	return names[mode];
88 }
89 
to_tc_port(struct intel_digital_port * dig_port)90 static struct intel_tc_port *to_tc_port(struct intel_digital_port *dig_port)
91 {
92 	return dig_port->tc;
93 }
94 
tc_to_i915(struct intel_tc_port * tc)95 static struct drm_i915_private *tc_to_i915(struct intel_tc_port *tc)
96 {
97 	return to_i915(tc->dig_port->base.base.dev);
98 }
99 
intel_tc_port_in_mode(struct intel_digital_port * dig_port,enum tc_port_mode mode)100 static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
101 				  enum tc_port_mode mode)
102 {
103 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
104 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
105 	struct intel_tc_port *tc = to_tc_port(dig_port);
106 
107 	return intel_phy_is_tc(i915, phy) && tc->mode == mode;
108 }
109 
intel_tc_port_in_tbt_alt_mode(struct intel_digital_port * dig_port)110 bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
111 {
112 	return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
113 }
114 
intel_tc_port_in_dp_alt_mode(struct intel_digital_port * dig_port)115 bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
116 {
117 	return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
118 }
119 
intel_tc_port_in_legacy_mode(struct intel_digital_port * dig_port)120 bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
121 {
122 	return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
123 }
124 
125 /*
126  * The display power domains used for TC ports depending on the
127  * platform and TC mode (legacy, DP-alt, TBT):
128  *
129  * POWER_DOMAIN_DISPLAY_CORE:
130  * --------------------------
131  * ADLP/all modes:
132  *   - TCSS/IOM access for PHY ready state.
133  * ADLP+/all modes:
134  *   - DE/north-,south-HPD ISR access for HPD live state.
135  *
136  * POWER_DOMAIN_PORT_DDI_LANES_<port>:
137  * -----------------------------------
138  * ICL+/all modes:
139  *   - DE/DDI_BUF access for port enabled state.
140  * ADLP/all modes:
141  *   - DE/DDI_BUF access for PHY owned state.
142  *
143  * POWER_DOMAIN_AUX_USBC<TC port index>:
144  * -------------------------------------
145  * ICL/legacy mode:
146  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
147  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
148  *     main lanes.
149  * ADLP/legacy, DP-alt modes:
150  *   - TCSS/PHY: block TC-cold power state for using the PHY AUX and
151  *     main lanes.
152  *
153  * POWER_DOMAIN_TC_COLD_OFF:
154  * -------------------------
155  * ICL/DP-alt, TBT mode:
156  *   - TCSS/TBT: block TC-cold power state for using the (direct or
157  *     TBT DP-IN) AUX and main lanes.
158  *
159  * TGL/all modes:
160  *   - TCSS/IOM,FIA access for PHY ready, owned and HPD live state
161  *   - TCSS/PHY: block TC-cold power state for using the (direct or
162  *     TBT DP-IN) AUX and main lanes.
163  *
164  * ADLP/TBT mode:
165  *   - TCSS/TBT: block TC-cold power state for using the (TBT DP-IN)
166  *     AUX and main lanes.
167  *
168  * XELPDP+/all modes:
169  *   - TCSS/IOM,FIA access for PHY ready, owned state
170  *   - TCSS/PHY: block TC-cold power state for using the (direct or
171  *     TBT DP-IN) AUX and main lanes.
172  */
intel_tc_cold_requires_aux_pw(struct intel_digital_port * dig_port)173 bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
174 {
175 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
176 	struct intel_tc_port *tc = to_tc_port(dig_port);
177 
178 	return tc_phy_cold_off_domain(tc) ==
179 	       intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
180 }
181 
182 static intel_wakeref_t
__tc_cold_block(struct intel_tc_port * tc,enum intel_display_power_domain * domain)183 __tc_cold_block(struct intel_tc_port *tc, enum intel_display_power_domain *domain)
184 {
185 	struct drm_i915_private *i915 = tc_to_i915(tc);
186 
187 	*domain = tc_phy_cold_off_domain(tc);
188 
189 	return intel_display_power_get(i915, *domain);
190 }
191 
192 static intel_wakeref_t
tc_cold_block(struct intel_tc_port * tc)193 tc_cold_block(struct intel_tc_port *tc)
194 {
195 	enum intel_display_power_domain domain;
196 	intel_wakeref_t wakeref;
197 
198 	wakeref = __tc_cold_block(tc, &domain);
199 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
200 	tc->lock_power_domain = domain;
201 #endif
202 	return wakeref;
203 }
204 
205 static void
__tc_cold_unblock(struct intel_tc_port * tc,enum intel_display_power_domain domain,intel_wakeref_t wakeref)206 __tc_cold_unblock(struct intel_tc_port *tc, enum intel_display_power_domain domain,
207 		  intel_wakeref_t wakeref)
208 {
209 	struct drm_i915_private *i915 = tc_to_i915(tc);
210 
211 	intel_display_power_put(i915, domain, wakeref);
212 }
213 
214 static void
tc_cold_unblock(struct intel_tc_port * tc,intel_wakeref_t wakeref)215 tc_cold_unblock(struct intel_tc_port *tc, intel_wakeref_t wakeref)
216 {
217 	enum intel_display_power_domain domain = tc_phy_cold_off_domain(tc);
218 
219 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
220 	drm_WARN_ON(&tc_to_i915(tc)->drm, tc->lock_power_domain != domain);
221 #endif
222 	__tc_cold_unblock(tc, domain, wakeref);
223 }
224 
225 static void
assert_display_core_power_enabled(struct intel_tc_port * tc)226 assert_display_core_power_enabled(struct intel_tc_port *tc)
227 {
228 	struct drm_i915_private *i915 = tc_to_i915(tc);
229 
230 	drm_WARN_ON(&i915->drm,
231 		    !intel_display_power_is_enabled(i915, POWER_DOMAIN_DISPLAY_CORE));
232 }
233 
234 static void
assert_tc_cold_blocked(struct intel_tc_port * tc)235 assert_tc_cold_blocked(struct intel_tc_port *tc)
236 {
237 	struct drm_i915_private *i915 = tc_to_i915(tc);
238 	bool enabled;
239 
240 	enabled = intel_display_power_is_enabled(i915,
241 						 tc_phy_cold_off_domain(tc));
242 	drm_WARN_ON(&i915->drm, !enabled);
243 }
244 
245 static enum intel_display_power_domain
tc_port_power_domain(struct intel_tc_port * tc)246 tc_port_power_domain(struct intel_tc_port *tc)
247 {
248 	struct drm_i915_private *i915 = tc_to_i915(tc);
249 	enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
250 
251 	return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
252 }
253 
254 static void
assert_tc_port_power_enabled(struct intel_tc_port * tc)255 assert_tc_port_power_enabled(struct intel_tc_port *tc)
256 {
257 	struct drm_i915_private *i915 = tc_to_i915(tc);
258 
259 	drm_WARN_ON(&i915->drm,
260 		    !intel_display_power_is_enabled(i915, tc_port_power_domain(tc)));
261 }
262 
intel_tc_port_get_lane_mask(struct intel_digital_port * dig_port)263 u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
264 {
265 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
266 	struct intel_tc_port *tc = to_tc_port(dig_port);
267 	u32 lane_mask;
268 
269 	lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
270 
271 	drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
272 	assert_tc_cold_blocked(tc);
273 
274 	lane_mask &= DP_LANE_ASSIGNMENT_MASK(tc->phy_fia_idx);
275 	return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
276 }
277 
intel_tc_port_get_pin_assignment_mask(struct intel_digital_port * dig_port)278 u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
279 {
280 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
281 	struct intel_tc_port *tc = to_tc_port(dig_port);
282 	u32 pin_mask;
283 
284 	pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(tc->phy_fia));
285 
286 	drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
287 	assert_tc_cold_blocked(tc);
288 
289 	return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
290 	       DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
291 }
292 
mtl_tc_port_get_pin_assignment_mask(struct intel_digital_port * dig_port)293 static int mtl_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
294 {
295 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
296 	intel_wakeref_t wakeref;
297 	u32 pin_mask;
298 
299 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
300 		pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
301 
302 	switch (pin_mask) {
303 	default:
304 		MISSING_CASE(pin_mask);
305 		fallthrough;
306 	case DP_PIN_ASSIGNMENT_D:
307 		return 2;
308 	case DP_PIN_ASSIGNMENT_C:
309 	case DP_PIN_ASSIGNMENT_E:
310 		return 4;
311 	}
312 }
313 
intel_tc_port_fia_max_lane_count(struct intel_digital_port * dig_port)314 int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port)
315 {
316 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
317 	struct intel_tc_port *tc = to_tc_port(dig_port);
318 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
319 	intel_wakeref_t wakeref;
320 	u32 lane_mask;
321 
322 	if (!intel_phy_is_tc(i915, phy) || tc->mode != TC_PORT_DP_ALT)
323 		return 4;
324 
325 	assert_tc_cold_blocked(tc);
326 
327 	if (DISPLAY_VER(i915) >= 14)
328 		return mtl_tc_port_get_pin_assignment_mask(dig_port);
329 
330 	lane_mask = 0;
331 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref)
332 		lane_mask = intel_tc_port_get_lane_mask(dig_port);
333 
334 	switch (lane_mask) {
335 	default:
336 		MISSING_CASE(lane_mask);
337 		fallthrough;
338 	case 0x1:
339 	case 0x2:
340 	case 0x4:
341 	case 0x8:
342 		return 1;
343 	case 0x3:
344 	case 0xc:
345 		return 2;
346 	case 0xf:
347 		return 4;
348 	}
349 }
350 
intel_tc_port_set_fia_lane_count(struct intel_digital_port * dig_port,int required_lanes)351 void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
352 				      int required_lanes)
353 {
354 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
355 	struct intel_tc_port *tc = to_tc_port(dig_port);
356 	bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
357 	u32 val;
358 
359 	drm_WARN_ON(&i915->drm,
360 		    lane_reversal && tc->mode != TC_PORT_LEGACY);
361 
362 	assert_tc_cold_blocked(tc);
363 
364 	val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia));
365 	val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc->phy_fia_idx);
366 
367 	switch (required_lanes) {
368 	case 1:
369 		val |= lane_reversal ?
370 			DFLEXDPMLE1_DPMLETC_ML3(tc->phy_fia_idx) :
371 			DFLEXDPMLE1_DPMLETC_ML0(tc->phy_fia_idx);
372 		break;
373 	case 2:
374 		val |= lane_reversal ?
375 			DFLEXDPMLE1_DPMLETC_ML3_2(tc->phy_fia_idx) :
376 			DFLEXDPMLE1_DPMLETC_ML1_0(tc->phy_fia_idx);
377 		break;
378 	case 4:
379 		val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc->phy_fia_idx);
380 		break;
381 	default:
382 		MISSING_CASE(required_lanes);
383 	}
384 
385 	intel_de_write(i915, PORT_TX_DFLEXDPMLE1(tc->phy_fia), val);
386 }
387 
tc_port_fixup_legacy_flag(struct intel_tc_port * tc,u32 live_status_mask)388 static void tc_port_fixup_legacy_flag(struct intel_tc_port *tc,
389 				      u32 live_status_mask)
390 {
391 	struct drm_i915_private *i915 = tc_to_i915(tc);
392 	u32 valid_hpd_mask;
393 
394 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
395 
396 	if (hweight32(live_status_mask) != 1)
397 		return;
398 
399 	if (tc->legacy_port)
400 		valid_hpd_mask = BIT(TC_PORT_LEGACY);
401 	else
402 		valid_hpd_mask = BIT(TC_PORT_DP_ALT) |
403 				 BIT(TC_PORT_TBT_ALT);
404 
405 	if (!(live_status_mask & ~valid_hpd_mask))
406 		return;
407 
408 	/* If live status mismatches the VBT flag, trust the live status. */
409 	drm_dbg_kms(&i915->drm,
410 		    "Port %s: live status %08x mismatches the legacy port flag %08x, fixing flag\n",
411 		    tc->port_name, live_status_mask, valid_hpd_mask);
412 
413 	tc->legacy_port = !tc->legacy_port;
414 }
415 
tc_phy_load_fia_params(struct intel_tc_port * tc,bool modular_fia)416 static void tc_phy_load_fia_params(struct intel_tc_port *tc, bool modular_fia)
417 {
418 	struct drm_i915_private *i915 = tc_to_i915(tc);
419 	enum port port = tc->dig_port->base.port;
420 	enum tc_port tc_port = intel_port_to_tc(i915, port);
421 
422 	/*
423 	 * Each Modular FIA instance houses 2 TC ports. In SOC that has more
424 	 * than two TC ports, there are multiple instances of Modular FIA.
425 	 */
426 	if (modular_fia) {
427 		tc->phy_fia = tc_port / 2;
428 		tc->phy_fia_idx = tc_port % 2;
429 	} else {
430 		tc->phy_fia = FIA1;
431 		tc->phy_fia_idx = tc_port;
432 	}
433 }
434 
435 /*
436  * ICL TC PHY handlers
437  * -------------------
438  */
439 static enum intel_display_power_domain
icl_tc_phy_cold_off_domain(struct intel_tc_port * tc)440 icl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
441 {
442 	struct drm_i915_private *i915 = tc_to_i915(tc);
443 	struct intel_digital_port *dig_port = tc->dig_port;
444 
445 	if (tc->legacy_port)
446 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
447 
448 	return POWER_DOMAIN_TC_COLD_OFF;
449 }
450 
icl_tc_phy_hpd_live_status(struct intel_tc_port * tc)451 static u32 icl_tc_phy_hpd_live_status(struct intel_tc_port *tc)
452 {
453 	struct drm_i915_private *i915 = tc_to_i915(tc);
454 	struct intel_digital_port *dig_port = tc->dig_port;
455 	u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
456 	intel_wakeref_t wakeref;
457 	u32 fia_isr;
458 	u32 pch_isr;
459 	u32 mask = 0;
460 
461 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref) {
462 		fia_isr = intel_de_read(i915, PORT_TX_DFLEXDPSP(tc->phy_fia));
463 		pch_isr = intel_de_read(i915, SDEISR);
464 	}
465 
466 	if (fia_isr == 0xffffffff) {
467 		drm_dbg_kms(&i915->drm,
468 			    "Port %s: PHY in TCCOLD, nothing connected\n",
469 			    tc->port_name);
470 		return mask;
471 	}
472 
473 	if (fia_isr & TC_LIVE_STATE_TBT(tc->phy_fia_idx))
474 		mask |= BIT(TC_PORT_TBT_ALT);
475 	if (fia_isr & TC_LIVE_STATE_TC(tc->phy_fia_idx))
476 		mask |= BIT(TC_PORT_DP_ALT);
477 
478 	if (pch_isr & isr_bit)
479 		mask |= BIT(TC_PORT_LEGACY);
480 
481 	return mask;
482 }
483 
484 /*
485  * Return the PHY status complete flag indicating that display can acquire the
486  * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
487  * is connected and it's ready to switch the ownership to display. The flag
488  * will be left cleared when a TBT-alt sink is connected, where the PHY is
489  * owned by the TBT subsystem and so switching the ownership to display is not
490  * required.
491  */
icl_tc_phy_is_ready(struct intel_tc_port * tc)492 static bool icl_tc_phy_is_ready(struct intel_tc_port *tc)
493 {
494 	struct drm_i915_private *i915 = tc_to_i915(tc);
495 	u32 val;
496 
497 	assert_tc_cold_blocked(tc);
498 
499 	val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(tc->phy_fia));
500 	if (val == 0xffffffff) {
501 		drm_dbg_kms(&i915->drm,
502 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
503 			    tc->port_name);
504 		return false;
505 	}
506 
507 	return val & DP_PHY_MODE_STATUS_COMPLETED(tc->phy_fia_idx);
508 }
509 
icl_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)510 static bool icl_tc_phy_take_ownership(struct intel_tc_port *tc,
511 				      bool take)
512 {
513 	struct drm_i915_private *i915 = tc_to_i915(tc);
514 	u32 val;
515 
516 	assert_tc_cold_blocked(tc);
517 
518 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
519 	if (val == 0xffffffff) {
520 		drm_dbg_kms(&i915->drm,
521 			    "Port %s: PHY in TCCOLD, can't %s ownership\n",
522 			    tc->port_name, take ? "take" : "release");
523 
524 		return false;
525 	}
526 
527 	val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
528 	if (take)
529 		val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
530 
531 	intel_de_write(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia), val);
532 
533 	return true;
534 }
535 
icl_tc_phy_is_owned(struct intel_tc_port * tc)536 static bool icl_tc_phy_is_owned(struct intel_tc_port *tc)
537 {
538 	struct drm_i915_private *i915 = tc_to_i915(tc);
539 	u32 val;
540 
541 	assert_tc_cold_blocked(tc);
542 
543 	val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(tc->phy_fia));
544 	if (val == 0xffffffff) {
545 		drm_dbg_kms(&i915->drm,
546 			    "Port %s: PHY in TCCOLD, assume not owned\n",
547 			    tc->port_name);
548 		return false;
549 	}
550 
551 	return val & DP_PHY_MODE_STATUS_NOT_SAFE(tc->phy_fia_idx);
552 }
553 
icl_tc_phy_get_hw_state(struct intel_tc_port * tc)554 static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc)
555 {
556 	enum intel_display_power_domain domain;
557 	intel_wakeref_t tc_cold_wref;
558 
559 	tc_cold_wref = __tc_cold_block(tc, &domain);
560 
561 	tc->mode = tc_phy_get_current_mode(tc);
562 	if (tc->mode != TC_PORT_DISCONNECTED)
563 		tc->lock_wakeref = tc_cold_block(tc);
564 
565 	__tc_cold_unblock(tc, domain, tc_cold_wref);
566 }
567 
568 /*
569  * This function implements the first part of the Connect Flow described by our
570  * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
571  * lanes, EDID, etc) is done as needed in the typical places.
572  *
573  * Unlike the other ports, type-C ports are not available to use as soon as we
574  * get a hotplug. The type-C PHYs can be shared between multiple controllers:
575  * display, USB, etc. As a result, handshaking through FIA is required around
576  * connect and disconnect to cleanly transfer ownership with the controller and
577  * set the type-C power state.
578  */
tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port * tc,int required_lanes)579 static bool tc_phy_verify_legacy_or_dp_alt_mode(struct intel_tc_port *tc,
580 						int required_lanes)
581 {
582 	struct drm_i915_private *i915 = tc_to_i915(tc);
583 	struct intel_digital_port *dig_port = tc->dig_port;
584 	int max_lanes;
585 
586 	max_lanes = intel_tc_port_fia_max_lane_count(dig_port);
587 	if (tc->mode == TC_PORT_LEGACY) {
588 		drm_WARN_ON(&i915->drm, max_lanes != 4);
589 		return true;
590 	}
591 
592 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DP_ALT);
593 
594 	/*
595 	 * Now we have to re-check the live state, in case the port recently
596 	 * became disconnected. Not necessary for legacy mode.
597 	 */
598 	if (!(tc_phy_hpd_live_status(tc) & BIT(TC_PORT_DP_ALT))) {
599 		drm_dbg_kms(&i915->drm, "Port %s: PHY sudden disconnect\n",
600 			    tc->port_name);
601 		return false;
602 	}
603 
604 	if (max_lanes < required_lanes) {
605 		drm_dbg_kms(&i915->drm,
606 			    "Port %s: PHY max lanes %d < required lanes %d\n",
607 			    tc->port_name,
608 			    max_lanes, required_lanes);
609 		return false;
610 	}
611 
612 	return true;
613 }
614 
icl_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)615 static bool icl_tc_phy_connect(struct intel_tc_port *tc,
616 			       int required_lanes)
617 {
618 	struct drm_i915_private *i915 = tc_to_i915(tc);
619 
620 	tc->lock_wakeref = tc_cold_block(tc);
621 
622 	if (tc->mode == TC_PORT_TBT_ALT)
623 		return true;
624 
625 	if ((!tc_phy_is_ready(tc) ||
626 	     !icl_tc_phy_take_ownership(tc, true)) &&
627 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
628 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership (ready %s)\n",
629 			    tc->port_name,
630 			    str_yes_no(tc_phy_is_ready(tc)));
631 		goto out_unblock_tc_cold;
632 	}
633 
634 
635 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
636 		goto out_release_phy;
637 
638 	return true;
639 
640 out_release_phy:
641 	icl_tc_phy_take_ownership(tc, false);
642 out_unblock_tc_cold:
643 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
644 
645 	return false;
646 }
647 
648 /*
649  * See the comment at the connect function. This implements the Disconnect
650  * Flow.
651  */
icl_tc_phy_disconnect(struct intel_tc_port * tc)652 static void icl_tc_phy_disconnect(struct intel_tc_port *tc)
653 {
654 	switch (tc->mode) {
655 	case TC_PORT_LEGACY:
656 	case TC_PORT_DP_ALT:
657 		icl_tc_phy_take_ownership(tc, false);
658 		fallthrough;
659 	case TC_PORT_TBT_ALT:
660 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
661 		break;
662 	default:
663 		MISSING_CASE(tc->mode);
664 	}
665 }
666 
icl_tc_phy_init(struct intel_tc_port * tc)667 static void icl_tc_phy_init(struct intel_tc_port *tc)
668 {
669 	tc_phy_load_fia_params(tc, false);
670 }
671 
672 static const struct intel_tc_phy_ops icl_tc_phy_ops = {
673 	.cold_off_domain = icl_tc_phy_cold_off_domain,
674 	.hpd_live_status = icl_tc_phy_hpd_live_status,
675 	.is_ready = icl_tc_phy_is_ready,
676 	.is_owned = icl_tc_phy_is_owned,
677 	.get_hw_state = icl_tc_phy_get_hw_state,
678 	.connect = icl_tc_phy_connect,
679 	.disconnect = icl_tc_phy_disconnect,
680 	.init = icl_tc_phy_init,
681 };
682 
683 /*
684  * TGL TC PHY handlers
685  * -------------------
686  */
687 static enum intel_display_power_domain
tgl_tc_phy_cold_off_domain(struct intel_tc_port * tc)688 tgl_tc_phy_cold_off_domain(struct intel_tc_port *tc)
689 {
690 	return POWER_DOMAIN_TC_COLD_OFF;
691 }
692 
tgl_tc_phy_init(struct intel_tc_port * tc)693 static void tgl_tc_phy_init(struct intel_tc_port *tc)
694 {
695 	struct drm_i915_private *i915 = tc_to_i915(tc);
696 	intel_wakeref_t wakeref;
697 	u32 val;
698 
699 	with_intel_display_power(i915, tc_phy_cold_off_domain(tc), wakeref)
700 		val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
701 
702 	drm_WARN_ON(&i915->drm, val == 0xffffffff);
703 
704 	tc_phy_load_fia_params(tc, val & MODULAR_FIA_MASK);
705 }
706 
707 static const struct intel_tc_phy_ops tgl_tc_phy_ops = {
708 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
709 	.hpd_live_status = icl_tc_phy_hpd_live_status,
710 	.is_ready = icl_tc_phy_is_ready,
711 	.is_owned = icl_tc_phy_is_owned,
712 	.get_hw_state = icl_tc_phy_get_hw_state,
713 	.connect = icl_tc_phy_connect,
714 	.disconnect = icl_tc_phy_disconnect,
715 	.init = tgl_tc_phy_init,
716 };
717 
718 /*
719  * ADLP TC PHY handlers
720  * --------------------
721  */
722 static enum intel_display_power_domain
adlp_tc_phy_cold_off_domain(struct intel_tc_port * tc)723 adlp_tc_phy_cold_off_domain(struct intel_tc_port *tc)
724 {
725 	struct drm_i915_private *i915 = tc_to_i915(tc);
726 	struct intel_digital_port *dig_port = tc->dig_port;
727 
728 	if (tc->mode != TC_PORT_TBT_ALT)
729 		return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
730 
731 	return POWER_DOMAIN_TC_COLD_OFF;
732 }
733 
adlp_tc_phy_hpd_live_status(struct intel_tc_port * tc)734 static u32 adlp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
735 {
736 	struct drm_i915_private *i915 = tc_to_i915(tc);
737 	struct intel_digital_port *dig_port = tc->dig_port;
738 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
739 	u32 cpu_isr_bits = i915->display.hotplug.hpd[hpd_pin];
740 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
741 	intel_wakeref_t wakeref;
742 	u32 cpu_isr;
743 	u32 pch_isr;
744 	u32 mask = 0;
745 
746 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
747 		cpu_isr = intel_de_read(i915, GEN11_DE_HPD_ISR);
748 		pch_isr = intel_de_read(i915, SDEISR);
749 	}
750 
751 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TC_HOTPLUG_MASK))
752 		mask |= BIT(TC_PORT_DP_ALT);
753 	if (cpu_isr & (cpu_isr_bits & GEN11_DE_TBT_HOTPLUG_MASK))
754 		mask |= BIT(TC_PORT_TBT_ALT);
755 
756 	if (pch_isr & pch_isr_bit)
757 		mask |= BIT(TC_PORT_LEGACY);
758 
759 	return mask;
760 }
761 
762 /*
763  * Return the PHY status complete flag indicating that display can acquire the
764  * PHY ownership. The IOM firmware sets this flag when it's ready to switch
765  * the ownership to display, regardless of what sink is connected (TBT-alt,
766  * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
767  * subsystem and so switching the ownership to display is not required.
768  */
adlp_tc_phy_is_ready(struct intel_tc_port * tc)769 static bool adlp_tc_phy_is_ready(struct intel_tc_port *tc)
770 {
771 	struct drm_i915_private *i915 = tc_to_i915(tc);
772 	enum tc_port tc_port = intel_port_to_tc(i915, tc->dig_port->base.port);
773 	u32 val;
774 
775 	assert_display_core_power_enabled(tc);
776 
777 	val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
778 	if (val == 0xffffffff) {
779 		drm_dbg_kms(&i915->drm,
780 			    "Port %s: PHY in TCCOLD, assuming not ready\n",
781 			    tc->port_name);
782 		return false;
783 	}
784 
785 	return val & TCSS_DDI_STATUS_READY;
786 }
787 
adlp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)788 static bool adlp_tc_phy_take_ownership(struct intel_tc_port *tc,
789 				       bool take)
790 {
791 	struct drm_i915_private *i915 = tc_to_i915(tc);
792 	enum port port = tc->dig_port->base.port;
793 
794 	assert_tc_port_power_enabled(tc);
795 
796 	intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
797 		     take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
798 
799 	return true;
800 }
801 
adlp_tc_phy_is_owned(struct intel_tc_port * tc)802 static bool adlp_tc_phy_is_owned(struct intel_tc_port *tc)
803 {
804 	struct drm_i915_private *i915 = tc_to_i915(tc);
805 	enum port port = tc->dig_port->base.port;
806 	u32 val;
807 
808 	assert_tc_port_power_enabled(tc);
809 
810 	val = intel_de_read(i915, DDI_BUF_CTL(port));
811 	return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
812 }
813 
adlp_tc_phy_get_hw_state(struct intel_tc_port * tc)814 static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc)
815 {
816 	struct drm_i915_private *i915 = tc_to_i915(tc);
817 	enum intel_display_power_domain port_power_domain =
818 		tc_port_power_domain(tc);
819 	intel_wakeref_t port_wakeref;
820 
821 	port_wakeref = intel_display_power_get(i915, port_power_domain);
822 
823 	tc->mode = tc_phy_get_current_mode(tc);
824 	if (tc->mode != TC_PORT_DISCONNECTED)
825 		tc->lock_wakeref = tc_cold_block(tc);
826 
827 	intel_display_power_put(i915, port_power_domain, port_wakeref);
828 }
829 
adlp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)830 static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
831 {
832 	struct drm_i915_private *i915 = tc_to_i915(tc);
833 	enum intel_display_power_domain port_power_domain =
834 		tc_port_power_domain(tc);
835 	intel_wakeref_t port_wakeref;
836 
837 	if (tc->mode == TC_PORT_TBT_ALT) {
838 		tc->lock_wakeref = tc_cold_block(tc);
839 		return true;
840 	}
841 
842 	port_wakeref = intel_display_power_get(i915, port_power_domain);
843 
844 	if (!adlp_tc_phy_take_ownership(tc, true) &&
845 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
846 		drm_dbg_kms(&i915->drm, "Port %s: can't take PHY ownership\n",
847 			    tc->port_name);
848 		goto out_put_port_power;
849 	}
850 
851 	if (!tc_phy_is_ready(tc) &&
852 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
853 		drm_dbg_kms(&i915->drm, "Port %s: PHY not ready\n",
854 			    tc->port_name);
855 		goto out_release_phy;
856 	}
857 
858 	tc->lock_wakeref = tc_cold_block(tc);
859 
860 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
861 		goto out_unblock_tc_cold;
862 
863 	intel_display_power_put(i915, port_power_domain, port_wakeref);
864 
865 	return true;
866 
867 out_unblock_tc_cold:
868 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
869 out_release_phy:
870 	adlp_tc_phy_take_ownership(tc, false);
871 out_put_port_power:
872 	intel_display_power_put(i915, port_power_domain, port_wakeref);
873 
874 	return false;
875 }
876 
adlp_tc_phy_disconnect(struct intel_tc_port * tc)877 static void adlp_tc_phy_disconnect(struct intel_tc_port *tc)
878 {
879 	struct drm_i915_private *i915 = tc_to_i915(tc);
880 	enum intel_display_power_domain port_power_domain =
881 		tc_port_power_domain(tc);
882 	intel_wakeref_t port_wakeref;
883 
884 	port_wakeref = intel_display_power_get(i915, port_power_domain);
885 
886 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
887 
888 	switch (tc->mode) {
889 	case TC_PORT_LEGACY:
890 	case TC_PORT_DP_ALT:
891 		adlp_tc_phy_take_ownership(tc, false);
892 		fallthrough;
893 	case TC_PORT_TBT_ALT:
894 		break;
895 	default:
896 		MISSING_CASE(tc->mode);
897 	}
898 
899 	intel_display_power_put(i915, port_power_domain, port_wakeref);
900 }
901 
adlp_tc_phy_init(struct intel_tc_port * tc)902 static void adlp_tc_phy_init(struct intel_tc_port *tc)
903 {
904 	tc_phy_load_fia_params(tc, true);
905 }
906 
907 static const struct intel_tc_phy_ops adlp_tc_phy_ops = {
908 	.cold_off_domain = adlp_tc_phy_cold_off_domain,
909 	.hpd_live_status = adlp_tc_phy_hpd_live_status,
910 	.is_ready = adlp_tc_phy_is_ready,
911 	.is_owned = adlp_tc_phy_is_owned,
912 	.get_hw_state = adlp_tc_phy_get_hw_state,
913 	.connect = adlp_tc_phy_connect,
914 	.disconnect = adlp_tc_phy_disconnect,
915 	.init = adlp_tc_phy_init,
916 };
917 
918 /*
919  * XELPDP TC PHY handlers
920  * ----------------------
921  */
xelpdp_tc_phy_hpd_live_status(struct intel_tc_port * tc)922 static u32 xelpdp_tc_phy_hpd_live_status(struct intel_tc_port *tc)
923 {
924 	struct drm_i915_private *i915 = tc_to_i915(tc);
925 	struct intel_digital_port *dig_port = tc->dig_port;
926 	enum hpd_pin hpd_pin = dig_port->base.hpd_pin;
927 	u32 pica_isr_bits = i915->display.hotplug.hpd[hpd_pin];
928 	u32 pch_isr_bit = i915->display.hotplug.pch_hpd[hpd_pin];
929 	intel_wakeref_t wakeref;
930 	u32 pica_isr;
931 	u32 pch_isr;
932 	u32 mask = 0;
933 
934 	with_intel_display_power(i915, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
935 		pica_isr = intel_de_read(i915, PICAINTERRUPT_ISR);
936 		pch_isr = intel_de_read(i915, SDEISR);
937 	}
938 
939 	if (pica_isr & (pica_isr_bits & XELPDP_DP_ALT_HOTPLUG_MASK))
940 		mask |= BIT(TC_PORT_DP_ALT);
941 	if (pica_isr & (pica_isr_bits & XELPDP_TBT_HOTPLUG_MASK))
942 		mask |= BIT(TC_PORT_TBT_ALT);
943 
944 	if (tc->legacy_port && (pch_isr & pch_isr_bit))
945 		mask |= BIT(TC_PORT_LEGACY);
946 
947 	return mask;
948 }
949 
950 static bool
xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port * tc)951 xelpdp_tc_phy_tcss_power_is_enabled(struct intel_tc_port *tc)
952 {
953 	struct drm_i915_private *i915 = tc_to_i915(tc);
954 	enum port port = tc->dig_port->base.port;
955 
956 	assert_tc_cold_blocked(tc);
957 
958 	return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TCSS_POWER_STATE;
959 }
960 
961 static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port * tc,bool enabled)962 xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
963 {
964 	struct drm_i915_private *i915 = tc_to_i915(tc);
965 
966 	if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
967 		drm_dbg_kms(&i915->drm,
968 			    "Port %s: timeout waiting for TCSS power to get %s\n",
969 			    enabled ? "enabled" : "disabled",
970 			    tc->port_name);
971 		return false;
972 	}
973 
974 	return true;
975 }
976 
__xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)977 static void __xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
978 {
979 	struct drm_i915_private *i915 = tc_to_i915(tc);
980 	enum port port = tc->dig_port->base.port;
981 	u32 val;
982 
983 	assert_tc_cold_blocked(tc);
984 
985 	val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
986 	if (enable)
987 		val |= XELPDP_TCSS_POWER_REQUEST;
988 	else
989 		val &= ~XELPDP_TCSS_POWER_REQUEST;
990 	intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
991 }
992 
xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port * tc,bool enable)993 static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enable)
994 {
995 	struct drm_i915_private *i915 = tc_to_i915(tc);
996 
997 	__xelpdp_tc_phy_enable_tcss_power(tc, enable);
998 
999 	if ((!tc_phy_wait_for_ready(tc) ||
1000 	     !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) &&
1001 	    !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
1002 		if (enable) {
1003 			__xelpdp_tc_phy_enable_tcss_power(tc, false);
1004 			xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1005 		}
1006 
1007 		return false;
1008 	}
1009 
1010 	return true;
1011 }
1012 
xelpdp_tc_phy_take_ownership(struct intel_tc_port * tc,bool take)1013 static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
1014 {
1015 	struct drm_i915_private *i915 = tc_to_i915(tc);
1016 	enum port port = tc->dig_port->base.port;
1017 	u32 val;
1018 
1019 	assert_tc_cold_blocked(tc);
1020 
1021 	val = intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port));
1022 	if (take)
1023 		val |= XELPDP_TC_PHY_OWNERSHIP;
1024 	else
1025 		val &= ~XELPDP_TC_PHY_OWNERSHIP;
1026 	intel_de_write(i915, XELPDP_PORT_BUF_CTL1(port), val);
1027 }
1028 
xelpdp_tc_phy_is_owned(struct intel_tc_port * tc)1029 static bool xelpdp_tc_phy_is_owned(struct intel_tc_port *tc)
1030 {
1031 	struct drm_i915_private *i915 = tc_to_i915(tc);
1032 	enum port port = tc->dig_port->base.port;
1033 
1034 	assert_tc_cold_blocked(tc);
1035 
1036 	return intel_de_read(i915, XELPDP_PORT_BUF_CTL1(port)) & XELPDP_TC_PHY_OWNERSHIP;
1037 }
1038 
xelpdp_tc_phy_get_hw_state(struct intel_tc_port * tc)1039 static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc)
1040 {
1041 	struct drm_i915_private *i915 = tc_to_i915(tc);
1042 	intel_wakeref_t tc_cold_wref;
1043 	enum intel_display_power_domain domain;
1044 
1045 	tc_cold_wref = __tc_cold_block(tc, &domain);
1046 
1047 	tc->mode = tc_phy_get_current_mode(tc);
1048 	if (tc->mode != TC_PORT_DISCONNECTED)
1049 		tc->lock_wakeref = tc_cold_block(tc);
1050 
1051 	drm_WARN_ON(&i915->drm,
1052 		    (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) &&
1053 		    !xelpdp_tc_phy_tcss_power_is_enabled(tc));
1054 
1055 	__tc_cold_unblock(tc, domain, tc_cold_wref);
1056 }
1057 
xelpdp_tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1058 static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1059 {
1060 	tc->lock_wakeref = tc_cold_block(tc);
1061 
1062 	if (tc->mode == TC_PORT_TBT_ALT)
1063 		return true;
1064 
1065 	if (!xelpdp_tc_phy_enable_tcss_power(tc, true))
1066 		goto out_unblock_tccold;
1067 
1068 	xelpdp_tc_phy_take_ownership(tc, true);
1069 
1070 	if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes))
1071 		goto out_release_phy;
1072 
1073 	return true;
1074 
1075 out_release_phy:
1076 	xelpdp_tc_phy_take_ownership(tc, false);
1077 	xelpdp_tc_phy_wait_for_tcss_power(tc, false);
1078 
1079 out_unblock_tccold:
1080 	tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1081 
1082 	return false;
1083 }
1084 
xelpdp_tc_phy_disconnect(struct intel_tc_port * tc)1085 static void xelpdp_tc_phy_disconnect(struct intel_tc_port *tc)
1086 {
1087 	switch (tc->mode) {
1088 	case TC_PORT_LEGACY:
1089 	case TC_PORT_DP_ALT:
1090 		xelpdp_tc_phy_take_ownership(tc, false);
1091 		xelpdp_tc_phy_enable_tcss_power(tc, false);
1092 		fallthrough;
1093 	case TC_PORT_TBT_ALT:
1094 		tc_cold_unblock(tc, fetch_and_zero(&tc->lock_wakeref));
1095 		break;
1096 	default:
1097 		MISSING_CASE(tc->mode);
1098 	}
1099 }
1100 
1101 static const struct intel_tc_phy_ops xelpdp_tc_phy_ops = {
1102 	.cold_off_domain = tgl_tc_phy_cold_off_domain,
1103 	.hpd_live_status = xelpdp_tc_phy_hpd_live_status,
1104 	.is_ready = adlp_tc_phy_is_ready,
1105 	.is_owned = xelpdp_tc_phy_is_owned,
1106 	.get_hw_state = xelpdp_tc_phy_get_hw_state,
1107 	.connect = xelpdp_tc_phy_connect,
1108 	.disconnect = xelpdp_tc_phy_disconnect,
1109 	.init = adlp_tc_phy_init,
1110 };
1111 
1112 /*
1113  * Generic TC PHY handlers
1114  * -----------------------
1115  */
1116 static enum intel_display_power_domain
tc_phy_cold_off_domain(struct intel_tc_port * tc)1117 tc_phy_cold_off_domain(struct intel_tc_port *tc)
1118 {
1119 	return tc->phy_ops->cold_off_domain(tc);
1120 }
1121 
tc_phy_hpd_live_status(struct intel_tc_port * tc)1122 static u32 tc_phy_hpd_live_status(struct intel_tc_port *tc)
1123 {
1124 	struct drm_i915_private *i915 = tc_to_i915(tc);
1125 	u32 mask;
1126 
1127 	mask = tc->phy_ops->hpd_live_status(tc);
1128 
1129 	/* The sink can be connected only in a single mode. */
1130 	drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1);
1131 
1132 	return mask;
1133 }
1134 
tc_phy_is_ready(struct intel_tc_port * tc)1135 static bool tc_phy_is_ready(struct intel_tc_port *tc)
1136 {
1137 	return tc->phy_ops->is_ready(tc);
1138 }
1139 
tc_phy_is_owned(struct intel_tc_port * tc)1140 static bool tc_phy_is_owned(struct intel_tc_port *tc)
1141 {
1142 	return tc->phy_ops->is_owned(tc);
1143 }
1144 
tc_phy_get_hw_state(struct intel_tc_port * tc)1145 static void tc_phy_get_hw_state(struct intel_tc_port *tc)
1146 {
1147 	tc->phy_ops->get_hw_state(tc);
1148 }
1149 
tc_phy_is_ready_and_owned(struct intel_tc_port * tc,bool phy_is_ready,bool phy_is_owned)1150 static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc,
1151 				      bool phy_is_ready, bool phy_is_owned)
1152 {
1153 	struct drm_i915_private *i915 = tc_to_i915(tc);
1154 
1155 	drm_WARN_ON(&i915->drm, phy_is_owned && !phy_is_ready);
1156 
1157 	return phy_is_ready && phy_is_owned;
1158 }
1159 
tc_phy_is_connected(struct intel_tc_port * tc,enum icl_port_dpll_id port_pll_type)1160 static bool tc_phy_is_connected(struct intel_tc_port *tc,
1161 				enum icl_port_dpll_id port_pll_type)
1162 {
1163 	struct intel_encoder *encoder = &tc->dig_port->base;
1164 	struct drm_i915_private *i915 = to_i915(encoder->base.dev);
1165 	bool phy_is_ready = tc_phy_is_ready(tc);
1166 	bool phy_is_owned = tc_phy_is_owned(tc);
1167 	bool is_connected;
1168 
1169 	if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned))
1170 		is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY;
1171 	else
1172 		is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT;
1173 
1174 	drm_dbg_kms(&i915->drm,
1175 		    "Port %s: PHY connected: %s (ready: %s, owned: %s, pll_type: %s)\n",
1176 		    tc->port_name,
1177 		    str_yes_no(is_connected),
1178 		    str_yes_no(phy_is_ready),
1179 		    str_yes_no(phy_is_owned),
1180 		    port_pll_type == ICL_PORT_DPLL_DEFAULT ? "tbt" : "non-tbt");
1181 
1182 	return is_connected;
1183 }
1184 
tc_phy_wait_for_ready(struct intel_tc_port * tc)1185 static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
1186 {
1187 	struct drm_i915_private *i915 = tc_to_i915(tc);
1188 
1189 	if (wait_for(tc_phy_is_ready(tc), 500)) {
1190 		drm_err(&i915->drm, "Port %s: timeout waiting for PHY ready\n",
1191 			tc->port_name);
1192 
1193 		return false;
1194 	}
1195 
1196 	return true;
1197 }
1198 
1199 static enum tc_port_mode
hpd_mask_to_tc_mode(u32 live_status_mask)1200 hpd_mask_to_tc_mode(u32 live_status_mask)
1201 {
1202 	if (live_status_mask)
1203 		return fls(live_status_mask) - 1;
1204 
1205 	return TC_PORT_DISCONNECTED;
1206 }
1207 
1208 static enum tc_port_mode
tc_phy_hpd_live_mode(struct intel_tc_port * tc)1209 tc_phy_hpd_live_mode(struct intel_tc_port *tc)
1210 {
1211 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1212 
1213 	return hpd_mask_to_tc_mode(live_status_mask);
1214 }
1215 
1216 static enum tc_port_mode
get_tc_mode_in_phy_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1217 get_tc_mode_in_phy_owned_state(struct intel_tc_port *tc,
1218 			       enum tc_port_mode live_mode)
1219 {
1220 	switch (live_mode) {
1221 	case TC_PORT_LEGACY:
1222 	case TC_PORT_DP_ALT:
1223 		return live_mode;
1224 	default:
1225 		MISSING_CASE(live_mode);
1226 		fallthrough;
1227 	case TC_PORT_TBT_ALT:
1228 	case TC_PORT_DISCONNECTED:
1229 		if (tc->legacy_port)
1230 			return TC_PORT_LEGACY;
1231 		else
1232 			return TC_PORT_DP_ALT;
1233 	}
1234 }
1235 
1236 static enum tc_port_mode
get_tc_mode_in_phy_not_owned_state(struct intel_tc_port * tc,enum tc_port_mode live_mode)1237 get_tc_mode_in_phy_not_owned_state(struct intel_tc_port *tc,
1238 				   enum tc_port_mode live_mode)
1239 {
1240 	switch (live_mode) {
1241 	case TC_PORT_LEGACY:
1242 		return TC_PORT_DISCONNECTED;
1243 	case TC_PORT_DP_ALT:
1244 	case TC_PORT_TBT_ALT:
1245 		return TC_PORT_TBT_ALT;
1246 	default:
1247 		MISSING_CASE(live_mode);
1248 		fallthrough;
1249 	case TC_PORT_DISCONNECTED:
1250 		if (tc->legacy_port)
1251 			return TC_PORT_DISCONNECTED;
1252 		else
1253 			return TC_PORT_TBT_ALT;
1254 	}
1255 }
1256 
1257 static enum tc_port_mode
tc_phy_get_current_mode(struct intel_tc_port * tc)1258 tc_phy_get_current_mode(struct intel_tc_port *tc)
1259 {
1260 	struct drm_i915_private *i915 = tc_to_i915(tc);
1261 	enum tc_port_mode live_mode = tc_phy_hpd_live_mode(tc);
1262 	bool phy_is_ready;
1263 	bool phy_is_owned;
1264 	enum tc_port_mode mode;
1265 
1266 	/*
1267 	 * For legacy ports the IOM firmware initializes the PHY during boot-up
1268 	 * and system resume whether or not a sink is connected. Wait here for
1269 	 * the initialization to get ready.
1270 	 */
1271 	if (tc->legacy_port)
1272 		tc_phy_wait_for_ready(tc);
1273 
1274 	phy_is_ready = tc_phy_is_ready(tc);
1275 	phy_is_owned = tc_phy_is_owned(tc);
1276 
1277 	if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) {
1278 		mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode);
1279 	} else {
1280 		drm_WARN_ON(&i915->drm, live_mode == TC_PORT_TBT_ALT);
1281 		mode = get_tc_mode_in_phy_owned_state(tc, live_mode);
1282 	}
1283 
1284 	drm_dbg_kms(&i915->drm,
1285 		    "Port %s: PHY mode: %s (ready: %s, owned: %s, HPD: %s)\n",
1286 		    tc->port_name,
1287 		    tc_port_mode_name(mode),
1288 		    str_yes_no(phy_is_ready),
1289 		    str_yes_no(phy_is_owned),
1290 		    tc_port_mode_name(live_mode));
1291 
1292 	return mode;
1293 }
1294 
default_tc_mode(struct intel_tc_port * tc)1295 static enum tc_port_mode default_tc_mode(struct intel_tc_port *tc)
1296 {
1297 	if (tc->legacy_port)
1298 		return TC_PORT_LEGACY;
1299 
1300 	return TC_PORT_TBT_ALT;
1301 }
1302 
1303 static enum tc_port_mode
hpd_mask_to_target_mode(struct intel_tc_port * tc,u32 live_status_mask)1304 hpd_mask_to_target_mode(struct intel_tc_port *tc, u32 live_status_mask)
1305 {
1306 	enum tc_port_mode mode = hpd_mask_to_tc_mode(live_status_mask);
1307 
1308 	if (mode != TC_PORT_DISCONNECTED)
1309 		return mode;
1310 
1311 	return default_tc_mode(tc);
1312 }
1313 
1314 static enum tc_port_mode
tc_phy_get_target_mode(struct intel_tc_port * tc)1315 tc_phy_get_target_mode(struct intel_tc_port *tc)
1316 {
1317 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1318 
1319 	return hpd_mask_to_target_mode(tc, live_status_mask);
1320 }
1321 
tc_phy_connect(struct intel_tc_port * tc,int required_lanes)1322 static void tc_phy_connect(struct intel_tc_port *tc, int required_lanes)
1323 {
1324 	struct drm_i915_private *i915 = tc_to_i915(tc);
1325 	u32 live_status_mask = tc_phy_hpd_live_status(tc);
1326 	bool connected;
1327 
1328 	tc_port_fixup_legacy_flag(tc, live_status_mask);
1329 
1330 	tc->mode = hpd_mask_to_target_mode(tc, live_status_mask);
1331 
1332 	connected = tc->phy_ops->connect(tc, required_lanes);
1333 	if (!connected && tc->mode != default_tc_mode(tc)) {
1334 		tc->mode = default_tc_mode(tc);
1335 		connected = tc->phy_ops->connect(tc, required_lanes);
1336 	}
1337 
1338 	drm_WARN_ON(&i915->drm, !connected);
1339 }
1340 
tc_phy_disconnect(struct intel_tc_port * tc)1341 static void tc_phy_disconnect(struct intel_tc_port *tc)
1342 {
1343 	if (tc->mode != TC_PORT_DISCONNECTED) {
1344 		tc->phy_ops->disconnect(tc);
1345 		tc->mode = TC_PORT_DISCONNECTED;
1346 	}
1347 }
1348 
tc_phy_init(struct intel_tc_port * tc)1349 static void tc_phy_init(struct intel_tc_port *tc)
1350 {
1351 	mutex_lock(&tc->lock);
1352 	tc->phy_ops->init(tc);
1353 	mutex_unlock(&tc->lock);
1354 }
1355 
intel_tc_port_reset_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1356 static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
1357 				     int required_lanes, bool force_disconnect)
1358 {
1359 	struct drm_i915_private *i915 = tc_to_i915(tc);
1360 	struct intel_digital_port *dig_port = tc->dig_port;
1361 	enum tc_port_mode old_tc_mode = tc->mode;
1362 
1363 	intel_display_power_flush_work(i915);
1364 	if (!intel_tc_cold_requires_aux_pw(dig_port)) {
1365 		enum intel_display_power_domain aux_domain;
1366 		bool aux_powered;
1367 
1368 		aux_domain = intel_aux_power_domain(dig_port);
1369 		aux_powered = intel_display_power_is_enabled(i915, aux_domain);
1370 		drm_WARN_ON(&i915->drm, aux_powered);
1371 	}
1372 
1373 	tc_phy_disconnect(tc);
1374 	if (!force_disconnect)
1375 		tc_phy_connect(tc, required_lanes);
1376 
1377 	drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
1378 		    tc->port_name,
1379 		    tc_port_mode_name(old_tc_mode),
1380 		    tc_port_mode_name(tc->mode));
1381 }
1382 
intel_tc_port_needs_reset(struct intel_tc_port * tc)1383 static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
1384 {
1385 	return tc_phy_get_target_mode(tc) != tc->mode;
1386 }
1387 
intel_tc_port_update_mode(struct intel_tc_port * tc,int required_lanes,bool force_disconnect)1388 static void intel_tc_port_update_mode(struct intel_tc_port *tc,
1389 				      int required_lanes, bool force_disconnect)
1390 {
1391 	if (force_disconnect ||
1392 	    intel_tc_port_needs_reset(tc))
1393 		intel_tc_port_reset_mode(tc, required_lanes, force_disconnect);
1394 }
1395 
__intel_tc_port_get_link(struct intel_tc_port * tc)1396 static void __intel_tc_port_get_link(struct intel_tc_port *tc)
1397 {
1398 	tc->link_refcount++;
1399 }
1400 
__intel_tc_port_put_link(struct intel_tc_port * tc)1401 static void __intel_tc_port_put_link(struct intel_tc_port *tc)
1402 {
1403 	tc->link_refcount--;
1404 }
1405 
tc_port_is_enabled(struct intel_tc_port * tc)1406 static bool tc_port_is_enabled(struct intel_tc_port *tc)
1407 {
1408 	struct drm_i915_private *i915 = tc_to_i915(tc);
1409 	struct intel_digital_port *dig_port = tc->dig_port;
1410 
1411 	assert_tc_port_power_enabled(tc);
1412 
1413 	return intel_de_read(i915, DDI_BUF_CTL(dig_port->base.port)) &
1414 	       DDI_BUF_CTL_ENABLE;
1415 }
1416 
1417 /**
1418  * intel_tc_port_init_mode: Read out HW state and init the given port's TypeC mode
1419  * @dig_port: digital port
1420  *
1421  * Read out the HW state and initialize the TypeC mode of @dig_port. The mode
1422  * will be locked until intel_tc_port_sanitize_mode() is called.
1423  */
intel_tc_port_init_mode(struct intel_digital_port * dig_port)1424 void intel_tc_port_init_mode(struct intel_digital_port *dig_port)
1425 {
1426 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1427 	struct intel_tc_port *tc = to_tc_port(dig_port);
1428 	bool update_mode = false;
1429 
1430 	mutex_lock(&tc->lock);
1431 
1432 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_DISCONNECTED);
1433 	drm_WARN_ON(&i915->drm, tc->lock_wakeref);
1434 	drm_WARN_ON(&i915->drm, tc->link_refcount);
1435 
1436 	tc_phy_get_hw_state(tc);
1437 	/*
1438 	 * Save the initial mode for the state check in
1439 	 * intel_tc_port_sanitize_mode().
1440 	 */
1441 	tc->init_mode = tc->mode;
1442 
1443 	/*
1444 	 * The PHY needs to be connected for AUX to work during HW readout and
1445 	 * MST topology resume, but the PHY mode can only be changed if the
1446 	 * port is disabled.
1447 	 *
1448 	 * An exception is the case where BIOS leaves the PHY incorrectly
1449 	 * disconnected on an enabled legacy port. Work around that by
1450 	 * connecting the PHY even though the port is enabled. This doesn't
1451 	 * cause a problem as the PHY ownership state is ignored by the
1452 	 * IOM/TCSS firmware (only display can own the PHY in that case).
1453 	 */
1454 	if (!tc_port_is_enabled(tc)) {
1455 		update_mode = true;
1456 	} else if (tc->mode == TC_PORT_DISCONNECTED) {
1457 		drm_WARN_ON(&i915->drm, !tc->legacy_port);
1458 		drm_err(&i915->drm,
1459 			"Port %s: PHY disconnected on enabled port, connecting it\n",
1460 			tc->port_name);
1461 		update_mode = true;
1462 	}
1463 
1464 	if (update_mode)
1465 		intel_tc_port_update_mode(tc, 1, false);
1466 
1467 	/* Prevent changing tc->mode until intel_tc_port_sanitize_mode() is called. */
1468 	__intel_tc_port_get_link(tc);
1469 
1470 	mutex_unlock(&tc->lock);
1471 }
1472 
tc_port_has_active_links(struct intel_tc_port * tc,const struct intel_crtc_state * crtc_state)1473 static bool tc_port_has_active_links(struct intel_tc_port *tc,
1474 				     const struct intel_crtc_state *crtc_state)
1475 {
1476 	struct drm_i915_private *i915 = tc_to_i915(tc);
1477 	struct intel_digital_port *dig_port = tc->dig_port;
1478 	enum icl_port_dpll_id pll_type = ICL_PORT_DPLL_DEFAULT;
1479 	int active_links = 0;
1480 
1481 	if (dig_port->dp.is_mst) {
1482 		/* TODO: get the PLL type for MST, once HW readout is done for it. */
1483 		active_links = intel_dp_mst_encoder_active_links(dig_port);
1484 	} else if (crtc_state && crtc_state->hw.active) {
1485 		pll_type = intel_ddi_port_pll_type(&dig_port->base, crtc_state);
1486 		active_links = 1;
1487 	}
1488 
1489 	if (active_links && !tc_phy_is_connected(tc, pll_type))
1490 		drm_err(&i915->drm,
1491 			"Port %s: PHY disconnected with %d active link(s)\n",
1492 			tc->port_name, active_links);
1493 
1494 	return active_links;
1495 }
1496 
1497 /**
1498  * intel_tc_port_sanitize_mode: Sanitize the given port's TypeC mode
1499  * @dig_port: digital port
1500  * @crtc_state: atomic state of CRTC connected to @dig_port
1501  *
1502  * Sanitize @dig_port's TypeC mode wrt. the encoder's state right after driver
1503  * loading and system resume:
1504  * If the encoder is enabled keep the TypeC mode/PHY connected state locked until
1505  * the encoder is disabled.
1506  * If the encoder is disabled make sure the PHY is disconnected.
1507  * @crtc_state is valid if @dig_port is enabled, NULL otherwise.
1508  */
intel_tc_port_sanitize_mode(struct intel_digital_port * dig_port,const struct intel_crtc_state * crtc_state)1509 void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
1510 				 const struct intel_crtc_state *crtc_state)
1511 {
1512 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1513 	struct intel_tc_port *tc = to_tc_port(dig_port);
1514 
1515 	mutex_lock(&tc->lock);
1516 
1517 	drm_WARN_ON(&i915->drm, tc->link_refcount != 1);
1518 	if (!tc_port_has_active_links(tc, crtc_state)) {
1519 		/*
1520 		 * TBT-alt is the default mode in any case the PHY ownership is not
1521 		 * held (regardless of the sink's connected live state), so
1522 		 * we'll just switch to disconnected mode from it here without
1523 		 * a note.
1524 		 */
1525 		if (tc->init_mode != TC_PORT_TBT_ALT &&
1526 		    tc->init_mode != TC_PORT_DISCONNECTED)
1527 			drm_dbg_kms(&i915->drm,
1528 				    "Port %s: PHY left in %s mode on disabled port, disconnecting it\n",
1529 				    tc->port_name,
1530 				    tc_port_mode_name(tc->init_mode));
1531 		tc_phy_disconnect(tc);
1532 		__intel_tc_port_put_link(tc);
1533 	}
1534 
1535 	drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
1536 		    tc->port_name,
1537 		    tc_port_mode_name(tc->mode));
1538 
1539 	mutex_unlock(&tc->lock);
1540 }
1541 
1542 /*
1543  * The type-C ports are different because even when they are connected, they may
1544  * not be available/usable by the graphics driver: see the comment on
1545  * icl_tc_phy_connect(). So in our driver instead of adding the additional
1546  * concept of "usable" and make everything check for "connected and usable" we
1547  * define a port as "connected" when it is not only connected, but also when it
1548  * is usable by the rest of the driver. That maintains the old assumption that
1549  * connected ports are usable, and avoids exposing to the users objects they
1550  * can't really use.
1551  */
intel_tc_port_connected_locked(struct intel_encoder * encoder)1552 bool intel_tc_port_connected_locked(struct intel_encoder *encoder)
1553 {
1554 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1555 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1556 	struct intel_tc_port *tc = to_tc_port(dig_port);
1557 	u32 mask = ~0;
1558 
1559 	drm_WARN_ON(&i915->drm, !intel_tc_port_ref_held(dig_port));
1560 
1561 	if (tc->mode != TC_PORT_DISCONNECTED)
1562 		mask = BIT(tc->mode);
1563 
1564 	return tc_phy_hpd_live_status(tc) & mask;
1565 }
1566 
intel_tc_port_connected(struct intel_encoder * encoder)1567 bool intel_tc_port_connected(struct intel_encoder *encoder)
1568 {
1569 	struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1570 	struct intel_tc_port *tc = to_tc_port(dig_port);
1571 	bool is_connected;
1572 
1573 	mutex_lock(&tc->lock);
1574 	is_connected = intel_tc_port_connected_locked(encoder);
1575 	mutex_unlock(&tc->lock);
1576 
1577 	return is_connected;
1578 }
1579 
__intel_tc_port_link_needs_reset(struct intel_tc_port * tc)1580 static bool __intel_tc_port_link_needs_reset(struct intel_tc_port *tc)
1581 {
1582 	bool ret;
1583 
1584 	mutex_lock(&tc->lock);
1585 
1586 	ret = tc->link_refcount &&
1587 	      tc->mode == TC_PORT_DP_ALT &&
1588 	      intel_tc_port_needs_reset(tc);
1589 
1590 	mutex_unlock(&tc->lock);
1591 
1592 	return ret;
1593 }
1594 
intel_tc_port_link_needs_reset(struct intel_digital_port * dig_port)1595 bool intel_tc_port_link_needs_reset(struct intel_digital_port *dig_port)
1596 {
1597 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1598 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1599 
1600 	if (!intel_phy_is_tc(i915, phy))
1601 		return false;
1602 
1603 	return __intel_tc_port_link_needs_reset(to_tc_port(dig_port));
1604 }
1605 
reset_link_commit(struct intel_tc_port * tc,struct intel_atomic_state * state,struct drm_modeset_acquire_ctx * ctx)1606 static int reset_link_commit(struct intel_tc_port *tc,
1607 			     struct intel_atomic_state *state,
1608 			     struct drm_modeset_acquire_ctx *ctx)
1609 {
1610 	struct drm_i915_private *i915 = tc_to_i915(tc);
1611 	struct intel_digital_port *dig_port = tc->dig_port;
1612 	struct intel_dp *intel_dp = enc_to_intel_dp(&dig_port->base);
1613 	struct intel_crtc *crtc;
1614 	u8 pipe_mask;
1615 	int ret;
1616 
1617 	ret = drm_modeset_lock(&i915->drm.mode_config.connection_mutex, ctx);
1618 	if (ret)
1619 		return ret;
1620 
1621 	ret = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
1622 	if (ret)
1623 		return ret;
1624 
1625 	if (!pipe_mask)
1626 		return 0;
1627 
1628 	for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
1629 		struct intel_crtc_state *crtc_state;
1630 
1631 		crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
1632 		if (IS_ERR(crtc_state))
1633 			return PTR_ERR(crtc_state);
1634 
1635 		crtc_state->uapi.connectors_changed = true;
1636 	}
1637 
1638 	if (!__intel_tc_port_link_needs_reset(tc))
1639 		return 0;
1640 
1641 	return drm_atomic_commit(&state->base);
1642 }
1643 
reset_link(struct intel_tc_port * tc)1644 static int reset_link(struct intel_tc_port *tc)
1645 {
1646 	struct drm_i915_private *i915 = tc_to_i915(tc);
1647 	struct drm_modeset_acquire_ctx ctx;
1648 	struct drm_atomic_state *_state;
1649 	struct intel_atomic_state *state;
1650 	int ret;
1651 
1652 	_state = drm_atomic_state_alloc(&i915->drm);
1653 	if (!_state)
1654 		return -ENOMEM;
1655 
1656 	state = to_intel_atomic_state(_state);
1657 	state->internal = true;
1658 
1659 	intel_modeset_lock_ctx_retry(&ctx, state, 0, ret)
1660 		ret = reset_link_commit(tc, state, &ctx);
1661 
1662 	drm_atomic_state_put(&state->base);
1663 
1664 	return ret;
1665 }
1666 
intel_tc_port_link_reset_work(struct work_struct * work)1667 static void intel_tc_port_link_reset_work(struct work_struct *work)
1668 {
1669 	struct intel_tc_port *tc =
1670 		container_of(work, struct intel_tc_port, link_reset_work.work);
1671 	struct drm_i915_private *i915 = tc_to_i915(tc);
1672 	int ret;
1673 
1674 	if (!__intel_tc_port_link_needs_reset(tc))
1675 		return;
1676 
1677 	mutex_lock(&i915->drm.mode_config.mutex);
1678 
1679 	drm_dbg_kms(&i915->drm,
1680 		    "Port %s: TypeC DP-alt sink disconnected, resetting link\n",
1681 		    tc->port_name);
1682 	ret = reset_link(tc);
1683 	drm_WARN_ON(&i915->drm, ret);
1684 
1685 	mutex_unlock(&i915->drm.mode_config.mutex);
1686 }
1687 
intel_tc_port_link_reset(struct intel_digital_port * dig_port)1688 bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
1689 {
1690 	if (!intel_tc_port_link_needs_reset(dig_port))
1691 		return false;
1692 
1693 	queue_delayed_work(system_unbound_wq,
1694 			   &to_tc_port(dig_port)->link_reset_work,
1695 			   msecs_to_jiffies(2000));
1696 
1697 	return true;
1698 }
1699 
intel_tc_port_link_cancel_reset_work(struct intel_digital_port * dig_port)1700 void intel_tc_port_link_cancel_reset_work(struct intel_digital_port *dig_port)
1701 {
1702 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1703 	enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
1704 	struct intel_tc_port *tc = to_tc_port(dig_port);
1705 
1706 	if (!intel_phy_is_tc(i915, phy))
1707 		return;
1708 
1709 	cancel_delayed_work(&tc->link_reset_work);
1710 }
1711 
__intel_tc_port_lock(struct intel_tc_port * tc,int required_lanes)1712 static void __intel_tc_port_lock(struct intel_tc_port *tc,
1713 				 int required_lanes)
1714 {
1715 	struct drm_i915_private *i915 = tc_to_i915(tc);
1716 
1717 	mutex_lock(&tc->lock);
1718 
1719 	cancel_delayed_work(&tc->disconnect_phy_work);
1720 
1721 	if (!tc->link_refcount)
1722 		intel_tc_port_update_mode(tc, required_lanes,
1723 					  false);
1724 
1725 	drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_DISCONNECTED);
1726 	drm_WARN_ON(&i915->drm, tc->mode != TC_PORT_TBT_ALT &&
1727 				!tc_phy_is_owned(tc));
1728 }
1729 
intel_tc_port_lock(struct intel_digital_port * dig_port)1730 void intel_tc_port_lock(struct intel_digital_port *dig_port)
1731 {
1732 	__intel_tc_port_lock(to_tc_port(dig_port), 1);
1733 }
1734 
1735 /*
1736  * Disconnect the given digital port from its TypeC PHY (handing back the
1737  * control of the PHY to the TypeC subsystem). This will happen in a delayed
1738  * manner after each aux transactions and modeset disables.
1739  */
intel_tc_port_disconnect_phy_work(struct work_struct * work)1740 static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
1741 {
1742 	struct intel_tc_port *tc =
1743 		container_of(work, struct intel_tc_port, disconnect_phy_work.work);
1744 
1745 	mutex_lock(&tc->lock);
1746 
1747 	if (!tc->link_refcount)
1748 		intel_tc_port_update_mode(tc, 1, true);
1749 
1750 	mutex_unlock(&tc->lock);
1751 }
1752 
1753 /**
1754  * intel_tc_port_flush_work: flush the work disconnecting the PHY
1755  * @dig_port: digital port
1756  *
1757  * Flush the delayed work disconnecting an idle PHY.
1758  */
intel_tc_port_flush_work(struct intel_digital_port * dig_port)1759 static void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
1760 {
1761 	flush_delayed_work(&to_tc_port(dig_port)->disconnect_phy_work);
1762 }
1763 
intel_tc_port_suspend(struct intel_digital_port * dig_port)1764 void intel_tc_port_suspend(struct intel_digital_port *dig_port)
1765 {
1766 	struct intel_tc_port *tc = to_tc_port(dig_port);
1767 
1768 	cancel_delayed_work_sync(&tc->link_reset_work);
1769 	intel_tc_port_flush_work(dig_port);
1770 }
1771 
intel_tc_port_unlock(struct intel_digital_port * dig_port)1772 void intel_tc_port_unlock(struct intel_digital_port *dig_port)
1773 {
1774 	struct intel_tc_port *tc = to_tc_port(dig_port);
1775 
1776 	if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
1777 		queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
1778 				   msecs_to_jiffies(1000));
1779 
1780 	mutex_unlock(&tc->lock);
1781 }
1782 
intel_tc_port_ref_held(struct intel_digital_port * dig_port)1783 bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
1784 {
1785 	struct intel_tc_port *tc = to_tc_port(dig_port);
1786 
1787 	return mutex_is_locked(&tc->lock) ||
1788 	       tc->link_refcount;
1789 }
1790 
intel_tc_port_get_link(struct intel_digital_port * dig_port,int required_lanes)1791 void intel_tc_port_get_link(struct intel_digital_port *dig_port,
1792 			    int required_lanes)
1793 {
1794 	struct intel_tc_port *tc = to_tc_port(dig_port);
1795 
1796 	__intel_tc_port_lock(tc, required_lanes);
1797 	__intel_tc_port_get_link(tc);
1798 	intel_tc_port_unlock(dig_port);
1799 }
1800 
intel_tc_port_put_link(struct intel_digital_port * dig_port)1801 void intel_tc_port_put_link(struct intel_digital_port *dig_port)
1802 {
1803 	struct intel_tc_port *tc = to_tc_port(dig_port);
1804 
1805 	intel_tc_port_lock(dig_port);
1806 	__intel_tc_port_put_link(tc);
1807 	intel_tc_port_unlock(dig_port);
1808 
1809 	/*
1810 	 * The firmware will not update the HPD status of other TypeC ports
1811 	 * that are active in DP-alt mode with their sink disconnected, until
1812 	 * this port is disabled and its PHY gets disconnected. Make sure this
1813 	 * happens in a timely manner by disconnecting the PHY synchronously.
1814 	 */
1815 	intel_tc_port_flush_work(dig_port);
1816 }
1817 
intel_tc_port_init(struct intel_digital_port * dig_port,bool is_legacy)1818 int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
1819 {
1820 	struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
1821 	struct intel_tc_port *tc;
1822 	enum port port = dig_port->base.port;
1823 	enum tc_port tc_port = intel_port_to_tc(i915, port);
1824 
1825 	if (drm_WARN_ON(&i915->drm, tc_port == TC_PORT_NONE))
1826 		return -EINVAL;
1827 
1828 	tc = kzalloc(sizeof(*tc), GFP_KERNEL);
1829 	if (!tc)
1830 		return -ENOMEM;
1831 
1832 	dig_port->tc = tc;
1833 	tc->dig_port = dig_port;
1834 
1835 	if (DISPLAY_VER(i915) >= 14)
1836 		tc->phy_ops = &xelpdp_tc_phy_ops;
1837 	else if (DISPLAY_VER(i915) >= 13)
1838 		tc->phy_ops = &adlp_tc_phy_ops;
1839 	else if (DISPLAY_VER(i915) >= 12)
1840 		tc->phy_ops = &tgl_tc_phy_ops;
1841 	else
1842 		tc->phy_ops = &icl_tc_phy_ops;
1843 
1844 	tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
1845 				  tc_port + 1);
1846 	if (!tc->port_name) {
1847 		kfree(tc);
1848 		return -ENOMEM;
1849 	}
1850 
1851 	rw_init(&tc->lock, "itcp");
1852 	/* TODO: Combine the two works */
1853 	INIT_DELAYED_WORK(&tc->disconnect_phy_work, intel_tc_port_disconnect_phy_work);
1854 	INIT_DELAYED_WORK(&tc->link_reset_work, intel_tc_port_link_reset_work);
1855 	tc->legacy_port = is_legacy;
1856 	tc->mode = TC_PORT_DISCONNECTED;
1857 	tc->link_refcount = 0;
1858 
1859 	tc_phy_init(tc);
1860 
1861 	intel_tc_port_init_mode(dig_port);
1862 
1863 	return 0;
1864 }
1865 
intel_tc_port_cleanup(struct intel_digital_port * dig_port)1866 void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
1867 {
1868 	intel_tc_port_suspend(dig_port);
1869 
1870 	kfree(dig_port->tc->port_name);
1871 	kfree(dig_port->tc);
1872 	dig_port->tc = NULL;
1873 }
1874