11bb76ff1Sjsg /*
21bb76ff1Sjsg  * Copyright © 2014 Red Hat
31bb76ff1Sjsg  *
41bb76ff1Sjsg  * Permission to use, copy, modify, distribute, and sell this software and its
51bb76ff1Sjsg  * documentation for any purpose is hereby granted without fee, provided that
61bb76ff1Sjsg  * the above copyright notice appear in all copies and that both that copyright
71bb76ff1Sjsg  * notice and this permission notice appear in supporting documentation, and
81bb76ff1Sjsg  * that the name of the copyright holders not be used in advertising or
91bb76ff1Sjsg  * publicity pertaining to distribution of the software without specific,
101bb76ff1Sjsg  * written prior permission.  The copyright holders make no representations
111bb76ff1Sjsg  * about the suitability of this software for any purpose.  It is provided "as
121bb76ff1Sjsg  * is" without express or implied warranty.
131bb76ff1Sjsg  *
141bb76ff1Sjsg  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
151bb76ff1Sjsg  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
161bb76ff1Sjsg  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
171bb76ff1Sjsg  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
181bb76ff1Sjsg  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
191bb76ff1Sjsg  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
201bb76ff1Sjsg  * OF THIS SOFTWARE.
211bb76ff1Sjsg  */
221bb76ff1Sjsg 
231bb76ff1Sjsg #include <linux/bitfield.h>
241bb76ff1Sjsg #include <linux/delay.h>
251bb76ff1Sjsg #include <linux/errno.h>
261bb76ff1Sjsg #include <linux/i2c.h>
271bb76ff1Sjsg #include <linux/init.h>
281bb76ff1Sjsg #include <linux/kernel.h>
291bb76ff1Sjsg #include <linux/random.h>
301bb76ff1Sjsg #include <linux/sched.h>
311bb76ff1Sjsg #include <linux/seq_file.h>
321bb76ff1Sjsg #include <linux/iopoll.h>
331bb76ff1Sjsg 
341bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
351bb76ff1Sjsg #include <linux/stacktrace.h>
361bb76ff1Sjsg #include <linux/sort.h>
371bb76ff1Sjsg #include <linux/timekeeping.h>
381bb76ff1Sjsg #include <linux/math64.h>
391bb76ff1Sjsg #endif
401bb76ff1Sjsg 
411bb76ff1Sjsg #include <drm/display/drm_dp_mst_helper.h>
421bb76ff1Sjsg #include <drm/drm_atomic.h>
431bb76ff1Sjsg #include <drm/drm_atomic_helper.h>
441bb76ff1Sjsg #include <drm/drm_drv.h>
451bb76ff1Sjsg #include <drm/drm_edid.h>
461bb76ff1Sjsg #include <drm/drm_print.h>
471bb76ff1Sjsg #include <drm/drm_probe_helper.h>
481bb76ff1Sjsg 
491bb76ff1Sjsg #include "drm_dp_helper_internal.h"
501bb76ff1Sjsg #include "drm_dp_mst_topology_internal.h"
511bb76ff1Sjsg 
521bb76ff1Sjsg /**
531bb76ff1Sjsg  * DOC: dp mst helper
541bb76ff1Sjsg  *
551bb76ff1Sjsg  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
561bb76ff1Sjsg  * protocol. The helpers contain a topology manager and bandwidth manager.
571bb76ff1Sjsg  * The helpers encapsulate the sending and received of sideband msgs.
581bb76ff1Sjsg  */
591bb76ff1Sjsg struct drm_dp_pending_up_req {
601bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr hdr;
611bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body msg;
621bb76ff1Sjsg 	struct list_head next;
631bb76ff1Sjsg };
641bb76ff1Sjsg 
651bb76ff1Sjsg static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
661bb76ff1Sjsg 				  char *buf);
671bb76ff1Sjsg 
681bb76ff1Sjsg static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
691bb76ff1Sjsg 
701bb76ff1Sjsg static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
711bb76ff1Sjsg 				     int id, u8 start_slot, u8 num_slots);
721bb76ff1Sjsg 
731bb76ff1Sjsg static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
741bb76ff1Sjsg 				 struct drm_dp_mst_port *port,
751bb76ff1Sjsg 				 int offset, int size, u8 *bytes);
761bb76ff1Sjsg static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
771bb76ff1Sjsg 				  struct drm_dp_mst_port *port,
781bb76ff1Sjsg 				  int offset, int size, u8 *bytes);
791bb76ff1Sjsg 
801bb76ff1Sjsg static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
811bb76ff1Sjsg 				    struct drm_dp_mst_branch *mstb);
821bb76ff1Sjsg 
831bb76ff1Sjsg static void
841bb76ff1Sjsg drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
851bb76ff1Sjsg 				   struct drm_dp_mst_branch *mstb);
861bb76ff1Sjsg 
871bb76ff1Sjsg static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
881bb76ff1Sjsg 					   struct drm_dp_mst_branch *mstb,
891bb76ff1Sjsg 					   struct drm_dp_mst_port *port);
901bb76ff1Sjsg static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
911bb76ff1Sjsg 				 u8 *guid);
921bb76ff1Sjsg 
931bb76ff1Sjsg static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
941bb76ff1Sjsg static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
951bb76ff1Sjsg static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
961bb76ff1Sjsg 
971bb76ff1Sjsg static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
981bb76ff1Sjsg 						 struct drm_dp_mst_branch *branch);
991bb76ff1Sjsg 
1001bb76ff1Sjsg #define DBG_PREFIX "[dp_mst]"
1011bb76ff1Sjsg 
1021bb76ff1Sjsg #define DP_STR(x) [DP_ ## x] = #x
1031bb76ff1Sjsg 
drm_dp_mst_req_type_str(u8 req_type)1041bb76ff1Sjsg static const char *drm_dp_mst_req_type_str(u8 req_type)
1051bb76ff1Sjsg {
1061bb76ff1Sjsg 	static const char * const req_type_str[] = {
1071bb76ff1Sjsg 		DP_STR(GET_MSG_TRANSACTION_VERSION),
1081bb76ff1Sjsg 		DP_STR(LINK_ADDRESS),
1091bb76ff1Sjsg 		DP_STR(CONNECTION_STATUS_NOTIFY),
1101bb76ff1Sjsg 		DP_STR(ENUM_PATH_RESOURCES),
1111bb76ff1Sjsg 		DP_STR(ALLOCATE_PAYLOAD),
1121bb76ff1Sjsg 		DP_STR(QUERY_PAYLOAD),
1131bb76ff1Sjsg 		DP_STR(RESOURCE_STATUS_NOTIFY),
1141bb76ff1Sjsg 		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
1151bb76ff1Sjsg 		DP_STR(REMOTE_DPCD_READ),
1161bb76ff1Sjsg 		DP_STR(REMOTE_DPCD_WRITE),
1171bb76ff1Sjsg 		DP_STR(REMOTE_I2C_READ),
1181bb76ff1Sjsg 		DP_STR(REMOTE_I2C_WRITE),
1191bb76ff1Sjsg 		DP_STR(POWER_UP_PHY),
1201bb76ff1Sjsg 		DP_STR(POWER_DOWN_PHY),
1211bb76ff1Sjsg 		DP_STR(SINK_EVENT_NOTIFY),
1221bb76ff1Sjsg 		DP_STR(QUERY_STREAM_ENC_STATUS),
1231bb76ff1Sjsg 	};
1241bb76ff1Sjsg 
1251bb76ff1Sjsg 	if (req_type >= ARRAY_SIZE(req_type_str) ||
1261bb76ff1Sjsg 	    !req_type_str[req_type])
1271bb76ff1Sjsg 		return "unknown";
1281bb76ff1Sjsg 
1291bb76ff1Sjsg 	return req_type_str[req_type];
1301bb76ff1Sjsg }
1311bb76ff1Sjsg 
1321bb76ff1Sjsg #undef DP_STR
1331bb76ff1Sjsg #define DP_STR(x) [DP_NAK_ ## x] = #x
1341bb76ff1Sjsg 
drm_dp_mst_nak_reason_str(u8 nak_reason)1351bb76ff1Sjsg static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
1361bb76ff1Sjsg {
1371bb76ff1Sjsg 	static const char * const nak_reason_str[] = {
1381bb76ff1Sjsg 		DP_STR(WRITE_FAILURE),
1391bb76ff1Sjsg 		DP_STR(INVALID_READ),
1401bb76ff1Sjsg 		DP_STR(CRC_FAILURE),
1411bb76ff1Sjsg 		DP_STR(BAD_PARAM),
1421bb76ff1Sjsg 		DP_STR(DEFER),
1431bb76ff1Sjsg 		DP_STR(LINK_FAILURE),
1441bb76ff1Sjsg 		DP_STR(NO_RESOURCES),
1451bb76ff1Sjsg 		DP_STR(DPCD_FAIL),
1461bb76ff1Sjsg 		DP_STR(I2C_NAK),
1471bb76ff1Sjsg 		DP_STR(ALLOCATE_FAIL),
1481bb76ff1Sjsg 	};
1491bb76ff1Sjsg 
1501bb76ff1Sjsg 	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
1511bb76ff1Sjsg 	    !nak_reason_str[nak_reason])
1521bb76ff1Sjsg 		return "unknown";
1531bb76ff1Sjsg 
1541bb76ff1Sjsg 	return nak_reason_str[nak_reason];
1551bb76ff1Sjsg }
1561bb76ff1Sjsg 
1571bb76ff1Sjsg #undef DP_STR
1581bb76ff1Sjsg #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
1591bb76ff1Sjsg 
drm_dp_mst_sideband_tx_state_str(int state)1601bb76ff1Sjsg static const char *drm_dp_mst_sideband_tx_state_str(int state)
1611bb76ff1Sjsg {
1621bb76ff1Sjsg 	static const char * const sideband_reason_str[] = {
1631bb76ff1Sjsg 		DP_STR(QUEUED),
1641bb76ff1Sjsg 		DP_STR(START_SEND),
1651bb76ff1Sjsg 		DP_STR(SENT),
1661bb76ff1Sjsg 		DP_STR(RX),
1671bb76ff1Sjsg 		DP_STR(TIMEOUT),
1681bb76ff1Sjsg 	};
1691bb76ff1Sjsg 
1701bb76ff1Sjsg 	if (state >= ARRAY_SIZE(sideband_reason_str) ||
1711bb76ff1Sjsg 	    !sideband_reason_str[state])
1721bb76ff1Sjsg 		return "unknown";
1731bb76ff1Sjsg 
1741bb76ff1Sjsg 	return sideband_reason_str[state];
1751bb76ff1Sjsg }
1761bb76ff1Sjsg 
1771bb76ff1Sjsg static int
drm_dp_mst_rad_to_str(const u8 rad[8],u8 lct,char * out,size_t len)1781bb76ff1Sjsg drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
1791bb76ff1Sjsg {
1801bb76ff1Sjsg 	int i;
1811bb76ff1Sjsg 	u8 unpacked_rad[16];
1821bb76ff1Sjsg 
1831bb76ff1Sjsg 	for (i = 0; i < lct; i++) {
1841bb76ff1Sjsg 		if (i % 2)
1851bb76ff1Sjsg 			unpacked_rad[i] = rad[i / 2] >> 4;
1861bb76ff1Sjsg 		else
1871bb76ff1Sjsg 			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
1881bb76ff1Sjsg 	}
1891bb76ff1Sjsg 
1901bb76ff1Sjsg 	/* TODO: Eventually add something to printk so we can format the rad
1911bb76ff1Sjsg 	 * like this: 1.2.3
1921bb76ff1Sjsg 	 */
1931bb76ff1Sjsg 	return snprintf(out, len, "%*phC", lct, unpacked_rad);
1941bb76ff1Sjsg }
1951bb76ff1Sjsg 
1961bb76ff1Sjsg /* sideband msg handling */
drm_dp_msg_header_crc4(const uint8_t * data,size_t num_nibbles)1971bb76ff1Sjsg static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
1981bb76ff1Sjsg {
1991bb76ff1Sjsg 	u8 bitmask = 0x80;
2001bb76ff1Sjsg 	u8 bitshift = 7;
2011bb76ff1Sjsg 	u8 array_index = 0;
2021bb76ff1Sjsg 	int number_of_bits = num_nibbles * 4;
2031bb76ff1Sjsg 	u8 remainder = 0;
2041bb76ff1Sjsg 
2051bb76ff1Sjsg 	while (number_of_bits != 0) {
2061bb76ff1Sjsg 		number_of_bits--;
2071bb76ff1Sjsg 		remainder <<= 1;
2081bb76ff1Sjsg 		remainder |= (data[array_index] & bitmask) >> bitshift;
2091bb76ff1Sjsg 		bitmask >>= 1;
2101bb76ff1Sjsg 		bitshift--;
2111bb76ff1Sjsg 		if (bitmask == 0) {
2121bb76ff1Sjsg 			bitmask = 0x80;
2131bb76ff1Sjsg 			bitshift = 7;
2141bb76ff1Sjsg 			array_index++;
2151bb76ff1Sjsg 		}
2161bb76ff1Sjsg 		if ((remainder & 0x10) == 0x10)
2171bb76ff1Sjsg 			remainder ^= 0x13;
2181bb76ff1Sjsg 	}
2191bb76ff1Sjsg 
2201bb76ff1Sjsg 	number_of_bits = 4;
2211bb76ff1Sjsg 	while (number_of_bits != 0) {
2221bb76ff1Sjsg 		number_of_bits--;
2231bb76ff1Sjsg 		remainder <<= 1;
2241bb76ff1Sjsg 		if ((remainder & 0x10) != 0)
2251bb76ff1Sjsg 			remainder ^= 0x13;
2261bb76ff1Sjsg 	}
2271bb76ff1Sjsg 
2281bb76ff1Sjsg 	return remainder;
2291bb76ff1Sjsg }
2301bb76ff1Sjsg 
drm_dp_msg_data_crc4(const uint8_t * data,u8 number_of_bytes)2311bb76ff1Sjsg static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
2321bb76ff1Sjsg {
2331bb76ff1Sjsg 	u8 bitmask = 0x80;
2341bb76ff1Sjsg 	u8 bitshift = 7;
2351bb76ff1Sjsg 	u8 array_index = 0;
2361bb76ff1Sjsg 	int number_of_bits = number_of_bytes * 8;
2371bb76ff1Sjsg 	u16 remainder = 0;
2381bb76ff1Sjsg 
2391bb76ff1Sjsg 	while (number_of_bits != 0) {
2401bb76ff1Sjsg 		number_of_bits--;
2411bb76ff1Sjsg 		remainder <<= 1;
2421bb76ff1Sjsg 		remainder |= (data[array_index] & bitmask) >> bitshift;
2431bb76ff1Sjsg 		bitmask >>= 1;
2441bb76ff1Sjsg 		bitshift--;
2451bb76ff1Sjsg 		if (bitmask == 0) {
2461bb76ff1Sjsg 			bitmask = 0x80;
2471bb76ff1Sjsg 			bitshift = 7;
2481bb76ff1Sjsg 			array_index++;
2491bb76ff1Sjsg 		}
2501bb76ff1Sjsg 		if ((remainder & 0x100) == 0x100)
2511bb76ff1Sjsg 			remainder ^= 0xd5;
2521bb76ff1Sjsg 	}
2531bb76ff1Sjsg 
2541bb76ff1Sjsg 	number_of_bits = 8;
2551bb76ff1Sjsg 	while (number_of_bits != 0) {
2561bb76ff1Sjsg 		number_of_bits--;
2571bb76ff1Sjsg 		remainder <<= 1;
2581bb76ff1Sjsg 		if ((remainder & 0x100) != 0)
2591bb76ff1Sjsg 			remainder ^= 0xd5;
2601bb76ff1Sjsg 	}
2611bb76ff1Sjsg 
2621bb76ff1Sjsg 	return remainder & 0xff;
2631bb76ff1Sjsg }
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr * hdr)2641bb76ff1Sjsg static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
2651bb76ff1Sjsg {
2661bb76ff1Sjsg 	u8 size = 3;
2671bb76ff1Sjsg 
2681bb76ff1Sjsg 	size += (hdr->lct / 2);
2691bb76ff1Sjsg 	return size;
2701bb76ff1Sjsg }
2711bb76ff1Sjsg 
drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int * len)2721bb76ff1Sjsg static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
2731bb76ff1Sjsg 					   u8 *buf, int *len)
2741bb76ff1Sjsg {
2751bb76ff1Sjsg 	int idx = 0;
2761bb76ff1Sjsg 	int i;
2771bb76ff1Sjsg 	u8 crc4;
2781bb76ff1Sjsg 
2791bb76ff1Sjsg 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
2801bb76ff1Sjsg 	for (i = 0; i < (hdr->lct / 2); i++)
2811bb76ff1Sjsg 		buf[idx++] = hdr->rad[i];
2821bb76ff1Sjsg 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
2831bb76ff1Sjsg 		(hdr->msg_len & 0x3f);
2841bb76ff1Sjsg 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
2851bb76ff1Sjsg 
2861bb76ff1Sjsg 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
2871bb76ff1Sjsg 	buf[idx - 1] |= (crc4 & 0xf);
2881bb76ff1Sjsg 
2891bb76ff1Sjsg 	*len = idx;
2901bb76ff1Sjsg }
2911bb76ff1Sjsg 
drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int buflen,u8 * hdrlen)2921bb76ff1Sjsg static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
2931bb76ff1Sjsg 					   struct drm_dp_sideband_msg_hdr *hdr,
2941bb76ff1Sjsg 					   u8 *buf, int buflen, u8 *hdrlen)
2951bb76ff1Sjsg {
2961bb76ff1Sjsg 	u8 crc4;
2971bb76ff1Sjsg 	u8 len;
2981bb76ff1Sjsg 	int i;
2991bb76ff1Sjsg 	u8 idx;
3001bb76ff1Sjsg 
3011bb76ff1Sjsg 	if (buf[0] == 0)
3021bb76ff1Sjsg 		return false;
3031bb76ff1Sjsg 	len = 3;
3041bb76ff1Sjsg 	len += ((buf[0] & 0xf0) >> 4) / 2;
3051bb76ff1Sjsg 	if (len > buflen)
3061bb76ff1Sjsg 		return false;
3071bb76ff1Sjsg 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
3081bb76ff1Sjsg 
3091bb76ff1Sjsg 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
3101bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
3111bb76ff1Sjsg 		return false;
3121bb76ff1Sjsg 	}
3131bb76ff1Sjsg 
3141bb76ff1Sjsg 	hdr->lct = (buf[0] & 0xf0) >> 4;
3151bb76ff1Sjsg 	hdr->lcr = (buf[0] & 0xf);
3161bb76ff1Sjsg 	idx = 1;
3171bb76ff1Sjsg 	for (i = 0; i < (hdr->lct / 2); i++)
3181bb76ff1Sjsg 		hdr->rad[i] = buf[idx++];
3191bb76ff1Sjsg 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
3201bb76ff1Sjsg 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
3211bb76ff1Sjsg 	hdr->msg_len = buf[idx] & 0x3f;
3221bb76ff1Sjsg 	idx++;
3231bb76ff1Sjsg 	hdr->somt = (buf[idx] >> 7) & 0x1;
3241bb76ff1Sjsg 	hdr->eomt = (buf[idx] >> 6) & 0x1;
3251bb76ff1Sjsg 	hdr->seqno = (buf[idx] >> 4) & 0x1;
3261bb76ff1Sjsg 	idx++;
3271bb76ff1Sjsg 	*hdrlen = idx;
3281bb76ff1Sjsg 	return true;
3291bb76ff1Sjsg }
3301bb76ff1Sjsg 
3311bb76ff1Sjsg void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body * req,struct drm_dp_sideband_msg_tx * raw)3321bb76ff1Sjsg drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
3331bb76ff1Sjsg 			   struct drm_dp_sideband_msg_tx *raw)
3341bb76ff1Sjsg {
3351bb76ff1Sjsg 	int idx = 0;
3361bb76ff1Sjsg 	int i;
3371bb76ff1Sjsg 	u8 *buf = raw->msg;
3381bb76ff1Sjsg 
3391bb76ff1Sjsg 	buf[idx++] = req->req_type & 0x7f;
3401bb76ff1Sjsg 
3411bb76ff1Sjsg 	switch (req->req_type) {
3421bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
3431bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
3441bb76ff1Sjsg 	case DP_POWER_UP_PHY:
3451bb76ff1Sjsg 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
3461bb76ff1Sjsg 		idx++;
3471bb76ff1Sjsg 		break;
3481bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
3491bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
3501bb76ff1Sjsg 			(req->u.allocate_payload.number_sdp_streams & 0xf);
3511bb76ff1Sjsg 		idx++;
3521bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
3531bb76ff1Sjsg 		idx++;
3541bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
3551bb76ff1Sjsg 		idx++;
3561bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
3571bb76ff1Sjsg 		idx++;
3581bb76ff1Sjsg 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
3591bb76ff1Sjsg 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
3601bb76ff1Sjsg 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
3611bb76ff1Sjsg 			idx++;
3621bb76ff1Sjsg 		}
3631bb76ff1Sjsg 		if (req->u.allocate_payload.number_sdp_streams & 1) {
3641bb76ff1Sjsg 			i = req->u.allocate_payload.number_sdp_streams - 1;
3651bb76ff1Sjsg 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
3661bb76ff1Sjsg 			idx++;
3671bb76ff1Sjsg 		}
3681bb76ff1Sjsg 		break;
3691bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
3701bb76ff1Sjsg 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
3711bb76ff1Sjsg 		idx++;
3721bb76ff1Sjsg 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
3731bb76ff1Sjsg 		idx++;
3741bb76ff1Sjsg 		break;
3751bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
3761bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
3771bb76ff1Sjsg 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
3781bb76ff1Sjsg 		idx++;
3791bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
3801bb76ff1Sjsg 		idx++;
3811bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
3821bb76ff1Sjsg 		idx++;
3831bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.num_bytes);
3841bb76ff1Sjsg 		idx++;
3851bb76ff1Sjsg 		break;
3861bb76ff1Sjsg 
3871bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
3881bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
3891bb76ff1Sjsg 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
3901bb76ff1Sjsg 		idx++;
3911bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
3921bb76ff1Sjsg 		idx++;
3931bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
3941bb76ff1Sjsg 		idx++;
3951bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.num_bytes);
3961bb76ff1Sjsg 		idx++;
3971bb76ff1Sjsg 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
3981bb76ff1Sjsg 		idx += req->u.dpcd_write.num_bytes;
3991bb76ff1Sjsg 		break;
4001bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
4011bb76ff1Sjsg 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
4021bb76ff1Sjsg 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
4031bb76ff1Sjsg 		idx++;
4041bb76ff1Sjsg 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
4051bb76ff1Sjsg 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
4061bb76ff1Sjsg 			idx++;
4071bb76ff1Sjsg 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
4081bb76ff1Sjsg 			idx++;
4091bb76ff1Sjsg 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
4101bb76ff1Sjsg 			idx += req->u.i2c_read.transactions[i].num_bytes;
4111bb76ff1Sjsg 
4121bb76ff1Sjsg 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
4131bb76ff1Sjsg 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
4141bb76ff1Sjsg 			idx++;
4151bb76ff1Sjsg 		}
4161bb76ff1Sjsg 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
4171bb76ff1Sjsg 		idx++;
4181bb76ff1Sjsg 		buf[idx] = (req->u.i2c_read.num_bytes_read);
4191bb76ff1Sjsg 		idx++;
4201bb76ff1Sjsg 		break;
4211bb76ff1Sjsg 
4221bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
4231bb76ff1Sjsg 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
4241bb76ff1Sjsg 		idx++;
4251bb76ff1Sjsg 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
4261bb76ff1Sjsg 		idx++;
4271bb76ff1Sjsg 		buf[idx] = (req->u.i2c_write.num_bytes);
4281bb76ff1Sjsg 		idx++;
4291bb76ff1Sjsg 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
4301bb76ff1Sjsg 		idx += req->u.i2c_write.num_bytes;
4311bb76ff1Sjsg 		break;
4321bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS: {
4331bb76ff1Sjsg 		const struct drm_dp_query_stream_enc_status *msg;
4341bb76ff1Sjsg 
4351bb76ff1Sjsg 		msg = &req->u.enc_status;
4361bb76ff1Sjsg 		buf[idx] = msg->stream_id;
4371bb76ff1Sjsg 		idx++;
4381bb76ff1Sjsg 		memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
4391bb76ff1Sjsg 		idx += sizeof(msg->client_id);
4401bb76ff1Sjsg 		buf[idx] = 0;
4411bb76ff1Sjsg 		buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
4421bb76ff1Sjsg 		buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
4431bb76ff1Sjsg 		buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
4441bb76ff1Sjsg 		buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
4451bb76ff1Sjsg 		idx++;
4461bb76ff1Sjsg 		}
4471bb76ff1Sjsg 		break;
4481bb76ff1Sjsg 	}
4491bb76ff1Sjsg 	raw->cur_len = idx;
4501bb76ff1Sjsg }
4511bb76ff1Sjsg EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
4521bb76ff1Sjsg 
4531bb76ff1Sjsg /* Decode a sideband request we've encoded, mainly used for debugging */
4541bb76ff1Sjsg int
drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx * raw,struct drm_dp_sideband_msg_req_body * req)4551bb76ff1Sjsg drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
4561bb76ff1Sjsg 			   struct drm_dp_sideband_msg_req_body *req)
4571bb76ff1Sjsg {
4581bb76ff1Sjsg 	const u8 *buf = raw->msg;
4591bb76ff1Sjsg 	int i, idx = 0;
4601bb76ff1Sjsg 
4611bb76ff1Sjsg 	req->req_type = buf[idx++] & 0x7f;
4621bb76ff1Sjsg 	switch (req->req_type) {
4631bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
4641bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
4651bb76ff1Sjsg 	case DP_POWER_UP_PHY:
4661bb76ff1Sjsg 		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
4671bb76ff1Sjsg 		break;
4681bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
4691bb76ff1Sjsg 		{
4701bb76ff1Sjsg 			struct drm_dp_allocate_payload *a =
4711bb76ff1Sjsg 				&req->u.allocate_payload;
4721bb76ff1Sjsg 
4731bb76ff1Sjsg 			a->number_sdp_streams = buf[idx] & 0xf;
4741bb76ff1Sjsg 			a->port_number = (buf[idx] >> 4) & 0xf;
4751bb76ff1Sjsg 
4761bb76ff1Sjsg 			WARN_ON(buf[++idx] & 0x80);
4771bb76ff1Sjsg 			a->vcpi = buf[idx] & 0x7f;
4781bb76ff1Sjsg 
4791bb76ff1Sjsg 			a->pbn = buf[++idx] << 8;
4801bb76ff1Sjsg 			a->pbn |= buf[++idx];
4811bb76ff1Sjsg 
4821bb76ff1Sjsg 			idx++;
4831bb76ff1Sjsg 			for (i = 0; i < a->number_sdp_streams; i++) {
4841bb76ff1Sjsg 				a->sdp_stream_sink[i] =
4851bb76ff1Sjsg 					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
4861bb76ff1Sjsg 			}
4871bb76ff1Sjsg 		}
4881bb76ff1Sjsg 		break;
4891bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
4901bb76ff1Sjsg 		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
4911bb76ff1Sjsg 		WARN_ON(buf[++idx] & 0x80);
4921bb76ff1Sjsg 		req->u.query_payload.vcpi = buf[idx] & 0x7f;
4931bb76ff1Sjsg 		break;
4941bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
4951bb76ff1Sjsg 		{
4961bb76ff1Sjsg 			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
4971bb76ff1Sjsg 
4981bb76ff1Sjsg 			r->port_number = (buf[idx] >> 4) & 0xf;
4991bb76ff1Sjsg 
5001bb76ff1Sjsg 			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
5011bb76ff1Sjsg 			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
5021bb76ff1Sjsg 			r->dpcd_address |= buf[++idx] & 0xff;
5031bb76ff1Sjsg 
5041bb76ff1Sjsg 			r->num_bytes = buf[++idx];
5051bb76ff1Sjsg 		}
5061bb76ff1Sjsg 		break;
5071bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
5081bb76ff1Sjsg 		{
5091bb76ff1Sjsg 			struct drm_dp_remote_dpcd_write *w =
5101bb76ff1Sjsg 				&req->u.dpcd_write;
5111bb76ff1Sjsg 
5121bb76ff1Sjsg 			w->port_number = (buf[idx] >> 4) & 0xf;
5131bb76ff1Sjsg 
5141bb76ff1Sjsg 			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
5151bb76ff1Sjsg 			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
5161bb76ff1Sjsg 			w->dpcd_address |= buf[++idx] & 0xff;
5171bb76ff1Sjsg 
5181bb76ff1Sjsg 			w->num_bytes = buf[++idx];
5191bb76ff1Sjsg 
5201bb76ff1Sjsg 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
5211bb76ff1Sjsg 					   GFP_KERNEL);
5221bb76ff1Sjsg 			if (!w->bytes)
5231bb76ff1Sjsg 				return -ENOMEM;
5241bb76ff1Sjsg 		}
5251bb76ff1Sjsg 		break;
5261bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
5271bb76ff1Sjsg 		{
5281bb76ff1Sjsg 			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
5291bb76ff1Sjsg 			struct drm_dp_remote_i2c_read_tx *tx;
5301bb76ff1Sjsg 			bool failed = false;
5311bb76ff1Sjsg 
5321bb76ff1Sjsg 			r->num_transactions = buf[idx] & 0x3;
5331bb76ff1Sjsg 			r->port_number = (buf[idx] >> 4) & 0xf;
5341bb76ff1Sjsg 			for (i = 0; i < r->num_transactions; i++) {
5351bb76ff1Sjsg 				tx = &r->transactions[i];
5361bb76ff1Sjsg 
5371bb76ff1Sjsg 				tx->i2c_dev_id = buf[++idx] & 0x7f;
5381bb76ff1Sjsg 				tx->num_bytes = buf[++idx];
5391bb76ff1Sjsg 				tx->bytes = kmemdup(&buf[++idx],
5401bb76ff1Sjsg 						    tx->num_bytes,
5411bb76ff1Sjsg 						    GFP_KERNEL);
5421bb76ff1Sjsg 				if (!tx->bytes) {
5431bb76ff1Sjsg 					failed = true;
5441bb76ff1Sjsg 					break;
5451bb76ff1Sjsg 				}
5461bb76ff1Sjsg 				idx += tx->num_bytes;
5471bb76ff1Sjsg 				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
5481bb76ff1Sjsg 				tx->i2c_transaction_delay = buf[idx] & 0xf;
5491bb76ff1Sjsg 			}
5501bb76ff1Sjsg 
5511bb76ff1Sjsg 			if (failed) {
5521bb76ff1Sjsg 				for (i = 0; i < r->num_transactions; i++) {
5531bb76ff1Sjsg 					tx = &r->transactions[i];
5541bb76ff1Sjsg 					kfree(tx->bytes);
5551bb76ff1Sjsg 				}
5561bb76ff1Sjsg 				return -ENOMEM;
5571bb76ff1Sjsg 			}
5581bb76ff1Sjsg 
5591bb76ff1Sjsg 			r->read_i2c_device_id = buf[++idx] & 0x7f;
5601bb76ff1Sjsg 			r->num_bytes_read = buf[++idx];
5611bb76ff1Sjsg 		}
5621bb76ff1Sjsg 		break;
5631bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
5641bb76ff1Sjsg 		{
5651bb76ff1Sjsg 			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
5661bb76ff1Sjsg 
5671bb76ff1Sjsg 			w->port_number = (buf[idx] >> 4) & 0xf;
5681bb76ff1Sjsg 			w->write_i2c_device_id = buf[++idx] & 0x7f;
5691bb76ff1Sjsg 			w->num_bytes = buf[++idx];
5701bb76ff1Sjsg 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
5711bb76ff1Sjsg 					   GFP_KERNEL);
5721bb76ff1Sjsg 			if (!w->bytes)
5731bb76ff1Sjsg 				return -ENOMEM;
5741bb76ff1Sjsg 		}
5751bb76ff1Sjsg 		break;
5761bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS:
5771bb76ff1Sjsg 		req->u.enc_status.stream_id = buf[idx++];
5781bb76ff1Sjsg 		for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
5791bb76ff1Sjsg 			req->u.enc_status.client_id[i] = buf[idx++];
5801bb76ff1Sjsg 
5811bb76ff1Sjsg 		req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
5821bb76ff1Sjsg 							   buf[idx]);
5831bb76ff1Sjsg 		req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
5841bb76ff1Sjsg 								 buf[idx]);
5851bb76ff1Sjsg 		req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
5861bb76ff1Sjsg 							      buf[idx]);
5871bb76ff1Sjsg 		req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
5881bb76ff1Sjsg 								    buf[idx]);
5891bb76ff1Sjsg 		break;
5901bb76ff1Sjsg 	}
5911bb76ff1Sjsg 
5921bb76ff1Sjsg 	return 0;
5931bb76ff1Sjsg }
5941bb76ff1Sjsg EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
5951bb76ff1Sjsg 
5961bb76ff1Sjsg void
drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body * req,int indent,struct drm_printer * printer)5971bb76ff1Sjsg drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
5981bb76ff1Sjsg 				  int indent, struct drm_printer *printer)
5991bb76ff1Sjsg {
6001bb76ff1Sjsg 	int i;
6011bb76ff1Sjsg 
6021bb76ff1Sjsg #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
6031bb76ff1Sjsg 	if (req->req_type == DP_LINK_ADDRESS) {
6041bb76ff1Sjsg 		/* No contents to print */
6051bb76ff1Sjsg 		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
6061bb76ff1Sjsg 		return;
6071bb76ff1Sjsg 	}
6081bb76ff1Sjsg 
6091bb76ff1Sjsg 	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
6101bb76ff1Sjsg 	indent++;
6111bb76ff1Sjsg 
6121bb76ff1Sjsg 	switch (req->req_type) {
6131bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
6141bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
6151bb76ff1Sjsg 	case DP_POWER_UP_PHY:
6161bb76ff1Sjsg 		P("port=%d\n", req->u.port_num.port_number);
6171bb76ff1Sjsg 		break;
6181bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
6191bb76ff1Sjsg 		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
6201bb76ff1Sjsg 		  req->u.allocate_payload.port_number,
6211bb76ff1Sjsg 		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
6221bb76ff1Sjsg 		  req->u.allocate_payload.number_sdp_streams,
6231bb76ff1Sjsg 		  req->u.allocate_payload.number_sdp_streams,
6241bb76ff1Sjsg 		  req->u.allocate_payload.sdp_stream_sink);
6251bb76ff1Sjsg 		break;
6261bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
6271bb76ff1Sjsg 		P("port=%d vcpi=%d\n",
6281bb76ff1Sjsg 		  req->u.query_payload.port_number,
6291bb76ff1Sjsg 		  req->u.query_payload.vcpi);
6301bb76ff1Sjsg 		break;
6311bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
6321bb76ff1Sjsg 		P("port=%d dpcd_addr=%05x len=%d\n",
6331bb76ff1Sjsg 		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
6341bb76ff1Sjsg 		  req->u.dpcd_read.num_bytes);
6351bb76ff1Sjsg 		break;
6361bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
6371bb76ff1Sjsg 		P("port=%d addr=%05x len=%d: %*ph\n",
6381bb76ff1Sjsg 		  req->u.dpcd_write.port_number,
6391bb76ff1Sjsg 		  req->u.dpcd_write.dpcd_address,
6401bb76ff1Sjsg 		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
6411bb76ff1Sjsg 		  req->u.dpcd_write.bytes);
6421bb76ff1Sjsg 		break;
6431bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
6441bb76ff1Sjsg 		P("port=%d num_tx=%d id=%d size=%d:\n",
6451bb76ff1Sjsg 		  req->u.i2c_read.port_number,
6461bb76ff1Sjsg 		  req->u.i2c_read.num_transactions,
6471bb76ff1Sjsg 		  req->u.i2c_read.read_i2c_device_id,
6481bb76ff1Sjsg 		  req->u.i2c_read.num_bytes_read);
6491bb76ff1Sjsg 
6501bb76ff1Sjsg 		indent++;
6511bb76ff1Sjsg 		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
6521bb76ff1Sjsg 			const struct drm_dp_remote_i2c_read_tx *rtx =
6531bb76ff1Sjsg 				&req->u.i2c_read.transactions[i];
6541bb76ff1Sjsg 
6551bb76ff1Sjsg 			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
6561bb76ff1Sjsg 			  i, rtx->i2c_dev_id, rtx->num_bytes,
6571bb76ff1Sjsg 			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
6581bb76ff1Sjsg 			  rtx->num_bytes, rtx->bytes);
6591bb76ff1Sjsg 		}
6601bb76ff1Sjsg 		break;
6611bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
6621bb76ff1Sjsg 		P("port=%d id=%d size=%d: %*ph\n",
6631bb76ff1Sjsg 		  req->u.i2c_write.port_number,
6641bb76ff1Sjsg 		  req->u.i2c_write.write_i2c_device_id,
6651bb76ff1Sjsg 		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
6661bb76ff1Sjsg 		  req->u.i2c_write.bytes);
6671bb76ff1Sjsg 		break;
6681bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS:
6691bb76ff1Sjsg 		P("stream_id=%u client_id=%*ph stream_event=%x "
6701bb76ff1Sjsg 		  "valid_event=%d stream_behavior=%x valid_behavior=%d",
6711bb76ff1Sjsg 		  req->u.enc_status.stream_id,
6721bb76ff1Sjsg 		  (int)ARRAY_SIZE(req->u.enc_status.client_id),
6731bb76ff1Sjsg 		  req->u.enc_status.client_id, req->u.enc_status.stream_event,
6741bb76ff1Sjsg 		  req->u.enc_status.valid_stream_event,
6751bb76ff1Sjsg 		  req->u.enc_status.stream_behavior,
6761bb76ff1Sjsg 		  req->u.enc_status.valid_stream_behavior);
6771bb76ff1Sjsg 		break;
6781bb76ff1Sjsg 	default:
6791bb76ff1Sjsg 		P("???\n");
6801bb76ff1Sjsg 		break;
6811bb76ff1Sjsg 	}
6821bb76ff1Sjsg #undef P
6831bb76ff1Sjsg }
6841bb76ff1Sjsg EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
6851bb76ff1Sjsg 
6861bb76ff1Sjsg static inline void
drm_dp_mst_dump_sideband_msg_tx(struct drm_printer * p,const struct drm_dp_sideband_msg_tx * txmsg)6871bb76ff1Sjsg drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
6881bb76ff1Sjsg 				const struct drm_dp_sideband_msg_tx *txmsg)
6891bb76ff1Sjsg {
6901bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
6911bb76ff1Sjsg 	char buf[64];
6921bb76ff1Sjsg 	int ret;
6931bb76ff1Sjsg 	int i;
6941bb76ff1Sjsg 
6951bb76ff1Sjsg 	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
6961bb76ff1Sjsg 			      sizeof(buf));
6971bb76ff1Sjsg 	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
6981bb76ff1Sjsg 		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
6991bb76ff1Sjsg 		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
7001bb76ff1Sjsg 		   txmsg->path_msg, buf);
7011bb76ff1Sjsg 
7021bb76ff1Sjsg 	ret = drm_dp_decode_sideband_req(txmsg, &req);
7031bb76ff1Sjsg 	if (ret) {
7041bb76ff1Sjsg 		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
7051bb76ff1Sjsg 		return;
7061bb76ff1Sjsg 	}
7071bb76ff1Sjsg 	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
7081bb76ff1Sjsg 
7091bb76ff1Sjsg 	switch (req.req_type) {
7101bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
7111bb76ff1Sjsg 		kfree(req.u.dpcd_write.bytes);
7121bb76ff1Sjsg 		break;
7131bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
7141bb76ff1Sjsg 		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
7151bb76ff1Sjsg 			kfree(req.u.i2c_read.transactions[i].bytes);
7161bb76ff1Sjsg 		break;
7171bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
7181bb76ff1Sjsg 		kfree(req.u.i2c_write.bytes);
7191bb76ff1Sjsg 		break;
7201bb76ff1Sjsg 	}
7211bb76ff1Sjsg }
7221bb76ff1Sjsg 
drm_dp_crc_sideband_chunk_req(u8 * msg,u8 len)7231bb76ff1Sjsg static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
7241bb76ff1Sjsg {
7251bb76ff1Sjsg 	u8 crc4;
7261bb76ff1Sjsg 
7271bb76ff1Sjsg 	crc4 = drm_dp_msg_data_crc4(msg, len);
7281bb76ff1Sjsg 	msg[len] = crc4;
7291bb76ff1Sjsg }
7301bb76ff1Sjsg 
drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body * rep,struct drm_dp_sideband_msg_tx * raw)7311bb76ff1Sjsg static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
7321bb76ff1Sjsg 					 struct drm_dp_sideband_msg_tx *raw)
7331bb76ff1Sjsg {
7341bb76ff1Sjsg 	int idx = 0;
7351bb76ff1Sjsg 	u8 *buf = raw->msg;
7361bb76ff1Sjsg 
7371bb76ff1Sjsg 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
7381bb76ff1Sjsg 
7391bb76ff1Sjsg 	raw->cur_len = idx;
7401bb76ff1Sjsg }
7411bb76ff1Sjsg 
drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx * msg,struct drm_dp_sideband_msg_hdr * hdr,u8 hdrlen)7421bb76ff1Sjsg static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
7431bb76ff1Sjsg 					  struct drm_dp_sideband_msg_hdr *hdr,
7441bb76ff1Sjsg 					  u8 hdrlen)
7451bb76ff1Sjsg {
7461bb76ff1Sjsg 	/*
7471bb76ff1Sjsg 	 * ignore out-of-order messages or messages that are part of a
7481bb76ff1Sjsg 	 * failed transaction
7491bb76ff1Sjsg 	 */
7501bb76ff1Sjsg 	if (!hdr->somt && !msg->have_somt)
7511bb76ff1Sjsg 		return false;
7521bb76ff1Sjsg 
7531bb76ff1Sjsg 	/* get length contained in this portion */
7541bb76ff1Sjsg 	msg->curchunk_idx = 0;
7551bb76ff1Sjsg 	msg->curchunk_len = hdr->msg_len;
7561bb76ff1Sjsg 	msg->curchunk_hdrlen = hdrlen;
7571bb76ff1Sjsg 
7581bb76ff1Sjsg 	/* we have already gotten an somt - don't bother parsing */
7591bb76ff1Sjsg 	if (hdr->somt && msg->have_somt)
7601bb76ff1Sjsg 		return false;
7611bb76ff1Sjsg 
7621bb76ff1Sjsg 	if (hdr->somt) {
7631bb76ff1Sjsg 		memcpy(&msg->initial_hdr, hdr,
7641bb76ff1Sjsg 		       sizeof(struct drm_dp_sideband_msg_hdr));
7651bb76ff1Sjsg 		msg->have_somt = true;
7661bb76ff1Sjsg 	}
7671bb76ff1Sjsg 	if (hdr->eomt)
7681bb76ff1Sjsg 		msg->have_eomt = true;
7691bb76ff1Sjsg 
7701bb76ff1Sjsg 	return true;
7711bb76ff1Sjsg }
7721bb76ff1Sjsg 
7731bb76ff1Sjsg /* this adds a chunk of msg to the builder to get the final msg */
drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx * msg,u8 * replybuf,u8 replybuflen)7741bb76ff1Sjsg static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
7751bb76ff1Sjsg 					   u8 *replybuf, u8 replybuflen)
7761bb76ff1Sjsg {
7771bb76ff1Sjsg 	u8 crc4;
7781bb76ff1Sjsg 
7791bb76ff1Sjsg 	memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
7801bb76ff1Sjsg 	msg->curchunk_idx += replybuflen;
7811bb76ff1Sjsg 
7821bb76ff1Sjsg 	if (msg->curchunk_idx >= msg->curchunk_len) {
7831bb76ff1Sjsg 		/* do CRC */
7841bb76ff1Sjsg 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
7851bb76ff1Sjsg 		if (crc4 != msg->chunk[msg->curchunk_len - 1])
7861bb76ff1Sjsg 			print_hex_dump(KERN_DEBUG, "wrong crc",
7871bb76ff1Sjsg 				       DUMP_PREFIX_NONE, 16, 1,
7881bb76ff1Sjsg 				       msg->chunk,  msg->curchunk_len, false);
7891bb76ff1Sjsg 		/* copy chunk into bigger msg */
7901bb76ff1Sjsg 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
7911bb76ff1Sjsg 		msg->curlen += msg->curchunk_len - 1;
7921bb76ff1Sjsg 	}
7931bb76ff1Sjsg 	return true;
7941bb76ff1Sjsg }
7951bb76ff1Sjsg 
drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)7961bb76ff1Sjsg static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
7971bb76ff1Sjsg 					       struct drm_dp_sideband_msg_rx *raw,
7981bb76ff1Sjsg 					       struct drm_dp_sideband_msg_reply_body *repmsg)
7991bb76ff1Sjsg {
8001bb76ff1Sjsg 	int idx = 1;
8011bb76ff1Sjsg 	int i;
8021bb76ff1Sjsg 
8031bb76ff1Sjsg 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
8041bb76ff1Sjsg 	idx += 16;
8051bb76ff1Sjsg 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
8061bb76ff1Sjsg 	idx++;
8071bb76ff1Sjsg 	if (idx > raw->curlen)
8081bb76ff1Sjsg 		goto fail_len;
8091bb76ff1Sjsg 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
8101bb76ff1Sjsg 		if (raw->msg[idx] & 0x80)
8111bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].input_port = 1;
8121bb76ff1Sjsg 
8131bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
8141bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
8151bb76ff1Sjsg 
8161bb76ff1Sjsg 		idx++;
8171bb76ff1Sjsg 		if (idx > raw->curlen)
8181bb76ff1Sjsg 			goto fail_len;
8191bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
8201bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
8211bb76ff1Sjsg 		if (repmsg->u.link_addr.ports[i].input_port == 0)
8221bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
8231bb76ff1Sjsg 		idx++;
8241bb76ff1Sjsg 		if (idx > raw->curlen)
8251bb76ff1Sjsg 			goto fail_len;
8261bb76ff1Sjsg 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
8271bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
8281bb76ff1Sjsg 			idx++;
8291bb76ff1Sjsg 			if (idx > raw->curlen)
8301bb76ff1Sjsg 				goto fail_len;
8311bb76ff1Sjsg 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
8321bb76ff1Sjsg 			idx += 16;
8331bb76ff1Sjsg 			if (idx > raw->curlen)
8341bb76ff1Sjsg 				goto fail_len;
8351bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
8361bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
8371bb76ff1Sjsg 			idx++;
8381bb76ff1Sjsg 
8391bb76ff1Sjsg 		}
8401bb76ff1Sjsg 		if (idx > raw->curlen)
8411bb76ff1Sjsg 			goto fail_len;
8421bb76ff1Sjsg 	}
8431bb76ff1Sjsg 
8441bb76ff1Sjsg 	return true;
8451bb76ff1Sjsg fail_len:
8461bb76ff1Sjsg 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
8471bb76ff1Sjsg 	return false;
8481bb76ff1Sjsg }
8491bb76ff1Sjsg 
drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)8501bb76ff1Sjsg static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
8511bb76ff1Sjsg 						   struct drm_dp_sideband_msg_reply_body *repmsg)
8521bb76ff1Sjsg {
8531bb76ff1Sjsg 	int idx = 1;
8541bb76ff1Sjsg 
8551bb76ff1Sjsg 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
8561bb76ff1Sjsg 	idx++;
8571bb76ff1Sjsg 	if (idx > raw->curlen)
8581bb76ff1Sjsg 		goto fail_len;
8591bb76ff1Sjsg 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
8601bb76ff1Sjsg 	idx++;
8611bb76ff1Sjsg 	if (idx > raw->curlen)
8621bb76ff1Sjsg 		goto fail_len;
8631bb76ff1Sjsg 
8641bb76ff1Sjsg 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
8651bb76ff1Sjsg 	return true;
8661bb76ff1Sjsg fail_len:
8671bb76ff1Sjsg 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
8681bb76ff1Sjsg 	return false;
8691bb76ff1Sjsg }
8701bb76ff1Sjsg 
drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)8711bb76ff1Sjsg static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
8721bb76ff1Sjsg 						      struct drm_dp_sideband_msg_reply_body *repmsg)
8731bb76ff1Sjsg {
8741bb76ff1Sjsg 	int idx = 1;
8751bb76ff1Sjsg 
8761bb76ff1Sjsg 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
8771bb76ff1Sjsg 	idx++;
8781bb76ff1Sjsg 	if (idx > raw->curlen)
8791bb76ff1Sjsg 		goto fail_len;
8801bb76ff1Sjsg 	return true;
8811bb76ff1Sjsg fail_len:
8821bb76ff1Sjsg 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
8831bb76ff1Sjsg 	return false;
8841bb76ff1Sjsg }
8851bb76ff1Sjsg 
drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)8861bb76ff1Sjsg static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
8871bb76ff1Sjsg 						      struct drm_dp_sideband_msg_reply_body *repmsg)
8881bb76ff1Sjsg {
8891bb76ff1Sjsg 	int idx = 1;
8901bb76ff1Sjsg 
8911bb76ff1Sjsg 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
8921bb76ff1Sjsg 	idx++;
8931bb76ff1Sjsg 	if (idx > raw->curlen)
8941bb76ff1Sjsg 		goto fail_len;
8951bb76ff1Sjsg 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
8961bb76ff1Sjsg 	idx++;
8971bb76ff1Sjsg 	/* TODO check */
8981bb76ff1Sjsg 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
8991bb76ff1Sjsg 	return true;
9001bb76ff1Sjsg fail_len:
9011bb76ff1Sjsg 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
9021bb76ff1Sjsg 	return false;
9031bb76ff1Sjsg }
9041bb76ff1Sjsg 
drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)9051bb76ff1Sjsg static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
9061bb76ff1Sjsg 							  struct drm_dp_sideband_msg_reply_body *repmsg)
9071bb76ff1Sjsg {
9081bb76ff1Sjsg 	int idx = 1;
9091bb76ff1Sjsg 
9101bb76ff1Sjsg 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
9111bb76ff1Sjsg 	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
9121bb76ff1Sjsg 	idx++;
9131bb76ff1Sjsg 	if (idx > raw->curlen)
9141bb76ff1Sjsg 		goto fail_len;
9151bb76ff1Sjsg 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
9161bb76ff1Sjsg 	idx += 2;
9171bb76ff1Sjsg 	if (idx > raw->curlen)
9181bb76ff1Sjsg 		goto fail_len;
9191bb76ff1Sjsg 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
9201bb76ff1Sjsg 	idx += 2;
9211bb76ff1Sjsg 	if (idx > raw->curlen)
9221bb76ff1Sjsg 		goto fail_len;
9231bb76ff1Sjsg 	return true;
9241bb76ff1Sjsg fail_len:
9251bb76ff1Sjsg 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
9261bb76ff1Sjsg 	return false;
9271bb76ff1Sjsg }
9281bb76ff1Sjsg 
drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)9291bb76ff1Sjsg static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
9301bb76ff1Sjsg 							  struct drm_dp_sideband_msg_reply_body *repmsg)
9311bb76ff1Sjsg {
9321bb76ff1Sjsg 	int idx = 1;
9331bb76ff1Sjsg 
9341bb76ff1Sjsg 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
9351bb76ff1Sjsg 	idx++;
9361bb76ff1Sjsg 	if (idx > raw->curlen)
9371bb76ff1Sjsg 		goto fail_len;
9381bb76ff1Sjsg 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
9391bb76ff1Sjsg 	idx++;
9401bb76ff1Sjsg 	if (idx > raw->curlen)
9411bb76ff1Sjsg 		goto fail_len;
9421bb76ff1Sjsg 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
9431bb76ff1Sjsg 	idx += 2;
9441bb76ff1Sjsg 	if (idx > raw->curlen)
9451bb76ff1Sjsg 		goto fail_len;
9461bb76ff1Sjsg 	return true;
9471bb76ff1Sjsg fail_len:
9481bb76ff1Sjsg 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
9491bb76ff1Sjsg 	return false;
9501bb76ff1Sjsg }
9511bb76ff1Sjsg 
drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)9521bb76ff1Sjsg static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
9531bb76ff1Sjsg 						    struct drm_dp_sideband_msg_reply_body *repmsg)
9541bb76ff1Sjsg {
9551bb76ff1Sjsg 	int idx = 1;
9561bb76ff1Sjsg 
9571bb76ff1Sjsg 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
9581bb76ff1Sjsg 	idx++;
9591bb76ff1Sjsg 	if (idx > raw->curlen)
9601bb76ff1Sjsg 		goto fail_len;
9611bb76ff1Sjsg 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
9621bb76ff1Sjsg 	idx += 2;
9631bb76ff1Sjsg 	if (idx > raw->curlen)
9641bb76ff1Sjsg 		goto fail_len;
9651bb76ff1Sjsg 	return true;
9661bb76ff1Sjsg fail_len:
9671bb76ff1Sjsg 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
9681bb76ff1Sjsg 	return false;
9691bb76ff1Sjsg }
9701bb76ff1Sjsg 
drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)9711bb76ff1Sjsg static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
9721bb76ff1Sjsg 						       struct drm_dp_sideband_msg_reply_body *repmsg)
9731bb76ff1Sjsg {
9741bb76ff1Sjsg 	int idx = 1;
9751bb76ff1Sjsg 
9761bb76ff1Sjsg 	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
9771bb76ff1Sjsg 	idx++;
9781bb76ff1Sjsg 	if (idx > raw->curlen) {
9791bb76ff1Sjsg 		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
9801bb76ff1Sjsg 			      idx, raw->curlen);
9811bb76ff1Sjsg 		return false;
9821bb76ff1Sjsg 	}
9831bb76ff1Sjsg 	return true;
9841bb76ff1Sjsg }
9851bb76ff1Sjsg 
9861bb76ff1Sjsg static bool
drm_dp_sideband_parse_query_stream_enc_status(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)9871bb76ff1Sjsg drm_dp_sideband_parse_query_stream_enc_status(
9881bb76ff1Sjsg 				struct drm_dp_sideband_msg_rx *raw,
9891bb76ff1Sjsg 				struct drm_dp_sideband_msg_reply_body *repmsg)
9901bb76ff1Sjsg {
9911bb76ff1Sjsg 	struct drm_dp_query_stream_enc_status_ack_reply *reply;
9921bb76ff1Sjsg 
9931bb76ff1Sjsg 	reply = &repmsg->u.enc_status;
9941bb76ff1Sjsg 
9951bb76ff1Sjsg 	reply->stream_id = raw->msg[3];
9961bb76ff1Sjsg 
9971bb76ff1Sjsg 	reply->reply_signed = raw->msg[2] & BIT(0);
9981bb76ff1Sjsg 
9991bb76ff1Sjsg 	/*
10001bb76ff1Sjsg 	 * NOTE: It's my impression from reading the spec that the below parsing
10011bb76ff1Sjsg 	 * is correct. However I noticed while testing with an HDCP 1.4 display
10021bb76ff1Sjsg 	 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
10031bb76ff1Sjsg 	 * would expect both bits to be set. So keep the parsing following the
10041bb76ff1Sjsg 	 * spec, but beware reality might not match the spec (at least for some
10051bb76ff1Sjsg 	 * configurations).
10061bb76ff1Sjsg 	 */
10071bb76ff1Sjsg 	reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
10081bb76ff1Sjsg 	reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
10091bb76ff1Sjsg 
10101bb76ff1Sjsg 	reply->query_capable_device_present = raw->msg[2] & BIT(5);
10111bb76ff1Sjsg 	reply->legacy_device_present = raw->msg[2] & BIT(6);
10121bb76ff1Sjsg 	reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
10131bb76ff1Sjsg 
10141bb76ff1Sjsg 	reply->auth_completed = !!(raw->msg[1] & BIT(3));
10151bb76ff1Sjsg 	reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
10161bb76ff1Sjsg 	reply->repeater_present = !!(raw->msg[1] & BIT(5));
10171bb76ff1Sjsg 	reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
10181bb76ff1Sjsg 
10191bb76ff1Sjsg 	return true;
10201bb76ff1Sjsg }
10211bb76ff1Sjsg 
drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * msg)10221bb76ff1Sjsg static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
10231bb76ff1Sjsg 					struct drm_dp_sideband_msg_rx *raw,
10241bb76ff1Sjsg 					struct drm_dp_sideband_msg_reply_body *msg)
10251bb76ff1Sjsg {
10261bb76ff1Sjsg 	memset(msg, 0, sizeof(*msg));
10271bb76ff1Sjsg 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
10281bb76ff1Sjsg 	msg->req_type = (raw->msg[0] & 0x7f);
10291bb76ff1Sjsg 
10301bb76ff1Sjsg 	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
10311bb76ff1Sjsg 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
10321bb76ff1Sjsg 		msg->u.nak.reason = raw->msg[17];
10331bb76ff1Sjsg 		msg->u.nak.nak_data = raw->msg[18];
10341bb76ff1Sjsg 		return false;
10351bb76ff1Sjsg 	}
10361bb76ff1Sjsg 
10371bb76ff1Sjsg 	switch (msg->req_type) {
10381bb76ff1Sjsg 	case DP_LINK_ADDRESS:
10391bb76ff1Sjsg 		return drm_dp_sideband_parse_link_address(mgr, raw, msg);
10401bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
10411bb76ff1Sjsg 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
10421bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
10431bb76ff1Sjsg 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
10441bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
10451bb76ff1Sjsg 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
10461bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
10471bb76ff1Sjsg 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
10481bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
10491bb76ff1Sjsg 		return true; /* since there's nothing to parse */
10501bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
10511bb76ff1Sjsg 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
10521bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
10531bb76ff1Sjsg 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
10541bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
10551bb76ff1Sjsg 	case DP_POWER_UP_PHY:
10561bb76ff1Sjsg 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
10571bb76ff1Sjsg 	case DP_CLEAR_PAYLOAD_ID_TABLE:
10581bb76ff1Sjsg 		return true; /* since there's nothing to parse */
10591bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS:
10601bb76ff1Sjsg 		return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
10611bb76ff1Sjsg 	default:
10621bb76ff1Sjsg 		drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
10631bb76ff1Sjsg 			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
10641bb76ff1Sjsg 		return false;
10651bb76ff1Sjsg 	}
10661bb76ff1Sjsg }
10671bb76ff1Sjsg 
10681bb76ff1Sjsg static bool
drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)10691bb76ff1Sjsg drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
10701bb76ff1Sjsg 					       struct drm_dp_sideband_msg_rx *raw,
10711bb76ff1Sjsg 					       struct drm_dp_sideband_msg_req_body *msg)
10721bb76ff1Sjsg {
10731bb76ff1Sjsg 	int idx = 1;
10741bb76ff1Sjsg 
10751bb76ff1Sjsg 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
10761bb76ff1Sjsg 	idx++;
10771bb76ff1Sjsg 	if (idx > raw->curlen)
10781bb76ff1Sjsg 		goto fail_len;
10791bb76ff1Sjsg 
10801bb76ff1Sjsg 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
10811bb76ff1Sjsg 	idx += 16;
10821bb76ff1Sjsg 	if (idx > raw->curlen)
10831bb76ff1Sjsg 		goto fail_len;
10841bb76ff1Sjsg 
10851bb76ff1Sjsg 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
10861bb76ff1Sjsg 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
10871bb76ff1Sjsg 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
10881bb76ff1Sjsg 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
10891bb76ff1Sjsg 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
10901bb76ff1Sjsg 	idx++;
10911bb76ff1Sjsg 	return true;
10921bb76ff1Sjsg fail_len:
10931bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
10941bb76ff1Sjsg 		    idx, raw->curlen);
10951bb76ff1Sjsg 	return false;
10961bb76ff1Sjsg }
10971bb76ff1Sjsg 
drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)10981bb76ff1Sjsg static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
10991bb76ff1Sjsg 							 struct drm_dp_sideband_msg_rx *raw,
11001bb76ff1Sjsg 							 struct drm_dp_sideband_msg_req_body *msg)
11011bb76ff1Sjsg {
11021bb76ff1Sjsg 	int idx = 1;
11031bb76ff1Sjsg 
11041bb76ff1Sjsg 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
11051bb76ff1Sjsg 	idx++;
11061bb76ff1Sjsg 	if (idx > raw->curlen)
11071bb76ff1Sjsg 		goto fail_len;
11081bb76ff1Sjsg 
11091bb76ff1Sjsg 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
11101bb76ff1Sjsg 	idx += 16;
11111bb76ff1Sjsg 	if (idx > raw->curlen)
11121bb76ff1Sjsg 		goto fail_len;
11131bb76ff1Sjsg 
11141bb76ff1Sjsg 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
11151bb76ff1Sjsg 	idx++;
11161bb76ff1Sjsg 	return true;
11171bb76ff1Sjsg fail_len:
11181bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
11191bb76ff1Sjsg 	return false;
11201bb76ff1Sjsg }
11211bb76ff1Sjsg 
drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)11221bb76ff1Sjsg static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
11231bb76ff1Sjsg 				      struct drm_dp_sideband_msg_rx *raw,
11241bb76ff1Sjsg 				      struct drm_dp_sideband_msg_req_body *msg)
11251bb76ff1Sjsg {
11261bb76ff1Sjsg 	memset(msg, 0, sizeof(*msg));
11271bb76ff1Sjsg 	msg->req_type = (raw->msg[0] & 0x7f);
11281bb76ff1Sjsg 
11291bb76ff1Sjsg 	switch (msg->req_type) {
11301bb76ff1Sjsg 	case DP_CONNECTION_STATUS_NOTIFY:
11311bb76ff1Sjsg 		return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
11321bb76ff1Sjsg 	case DP_RESOURCE_STATUS_NOTIFY:
11331bb76ff1Sjsg 		return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
11341bb76ff1Sjsg 	default:
11351bb76ff1Sjsg 		drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
11361bb76ff1Sjsg 			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
11371bb76ff1Sjsg 		return false;
11381bb76ff1Sjsg 	}
11391bb76ff1Sjsg }
11401bb76ff1Sjsg 
build_dpcd_write(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes,u8 * bytes)11411bb76ff1Sjsg static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
11421bb76ff1Sjsg 			     u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
11431bb76ff1Sjsg {
11441bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11451bb76ff1Sjsg 
11461bb76ff1Sjsg 	req.req_type = DP_REMOTE_DPCD_WRITE;
11471bb76ff1Sjsg 	req.u.dpcd_write.port_number = port_num;
11481bb76ff1Sjsg 	req.u.dpcd_write.dpcd_address = offset;
11491bb76ff1Sjsg 	req.u.dpcd_write.num_bytes = num_bytes;
11501bb76ff1Sjsg 	req.u.dpcd_write.bytes = bytes;
11511bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11521bb76ff1Sjsg }
11531bb76ff1Sjsg 
build_link_address(struct drm_dp_sideband_msg_tx * msg)11541bb76ff1Sjsg static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
11551bb76ff1Sjsg {
11561bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11571bb76ff1Sjsg 
11581bb76ff1Sjsg 	req.req_type = DP_LINK_ADDRESS;
11591bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11601bb76ff1Sjsg }
11611bb76ff1Sjsg 
build_clear_payload_id_table(struct drm_dp_sideband_msg_tx * msg)11621bb76ff1Sjsg static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
11631bb76ff1Sjsg {
11641bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11651bb76ff1Sjsg 
11661bb76ff1Sjsg 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
11671bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11681bb76ff1Sjsg 	msg->path_msg = true;
11691bb76ff1Sjsg }
11701bb76ff1Sjsg 
build_enum_path_resources(struct drm_dp_sideband_msg_tx * msg,int port_num)11711bb76ff1Sjsg static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
11721bb76ff1Sjsg 				     int port_num)
11731bb76ff1Sjsg {
11741bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11751bb76ff1Sjsg 
11761bb76ff1Sjsg 	req.req_type = DP_ENUM_PATH_RESOURCES;
11771bb76ff1Sjsg 	req.u.port_num.port_number = port_num;
11781bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11791bb76ff1Sjsg 	msg->path_msg = true;
11801bb76ff1Sjsg 	return 0;
11811bb76ff1Sjsg }
11821bb76ff1Sjsg 
build_allocate_payload(struct drm_dp_sideband_msg_tx * msg,int port_num,u8 vcpi,uint16_t pbn,u8 number_sdp_streams,u8 * sdp_stream_sink)11831bb76ff1Sjsg static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
11841bb76ff1Sjsg 				   int port_num,
11851bb76ff1Sjsg 				   u8 vcpi, uint16_t pbn,
11861bb76ff1Sjsg 				   u8 number_sdp_streams,
11871bb76ff1Sjsg 				   u8 *sdp_stream_sink)
11881bb76ff1Sjsg {
11891bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11901bb76ff1Sjsg 
11911bb76ff1Sjsg 	memset(&req, 0, sizeof(req));
11921bb76ff1Sjsg 	req.req_type = DP_ALLOCATE_PAYLOAD;
11931bb76ff1Sjsg 	req.u.allocate_payload.port_number = port_num;
11941bb76ff1Sjsg 	req.u.allocate_payload.vcpi = vcpi;
11951bb76ff1Sjsg 	req.u.allocate_payload.pbn = pbn;
11961bb76ff1Sjsg 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
11971bb76ff1Sjsg 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
11981bb76ff1Sjsg 		   number_sdp_streams);
11991bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
12001bb76ff1Sjsg 	msg->path_msg = true;
12011bb76ff1Sjsg }
12021bb76ff1Sjsg 
build_power_updown_phy(struct drm_dp_sideband_msg_tx * msg,int port_num,bool power_up)12031bb76ff1Sjsg static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
12041bb76ff1Sjsg 				   int port_num, bool power_up)
12051bb76ff1Sjsg {
12061bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
12071bb76ff1Sjsg 
12081bb76ff1Sjsg 	if (power_up)
12091bb76ff1Sjsg 		req.req_type = DP_POWER_UP_PHY;
12101bb76ff1Sjsg 	else
12111bb76ff1Sjsg 		req.req_type = DP_POWER_DOWN_PHY;
12121bb76ff1Sjsg 
12131bb76ff1Sjsg 	req.u.port_num.port_number = port_num;
12141bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
12151bb76ff1Sjsg 	msg->path_msg = true;
12161bb76ff1Sjsg }
12171bb76ff1Sjsg 
12181bb76ff1Sjsg static int
build_query_stream_enc_status(struct drm_dp_sideband_msg_tx * msg,u8 stream_id,u8 * q_id)12191bb76ff1Sjsg build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
12201bb76ff1Sjsg 			      u8 *q_id)
12211bb76ff1Sjsg {
12221bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
12231bb76ff1Sjsg 
12241bb76ff1Sjsg 	req.req_type = DP_QUERY_STREAM_ENC_STATUS;
12251bb76ff1Sjsg 	req.u.enc_status.stream_id = stream_id;
12261bb76ff1Sjsg 	memcpy(req.u.enc_status.client_id, q_id,
12271bb76ff1Sjsg 	       sizeof(req.u.enc_status.client_id));
12281bb76ff1Sjsg 	req.u.enc_status.stream_event = 0;
12291bb76ff1Sjsg 	req.u.enc_status.valid_stream_event = false;
12301bb76ff1Sjsg 	req.u.enc_status.stream_behavior = 0;
12311bb76ff1Sjsg 	req.u.enc_status.valid_stream_behavior = false;
12321bb76ff1Sjsg 
12331bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
12341bb76ff1Sjsg 	return 0;
12351bb76ff1Sjsg }
12361bb76ff1Sjsg 
check_txmsg_state(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)12371bb76ff1Sjsg static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
12381bb76ff1Sjsg 			      struct drm_dp_sideband_msg_tx *txmsg)
12391bb76ff1Sjsg {
12401bb76ff1Sjsg 	unsigned int state;
12411bb76ff1Sjsg 
12421bb76ff1Sjsg 	/*
12431bb76ff1Sjsg 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
12441bb76ff1Sjsg 	 * cases we check here are terminal states. For those the barriers
12451bb76ff1Sjsg 	 * provided by the wake_up/wait_event pair are enough.
12461bb76ff1Sjsg 	 */
12471bb76ff1Sjsg 	state = READ_ONCE(txmsg->state);
12481bb76ff1Sjsg 	return (state == DRM_DP_SIDEBAND_TX_RX ||
12491bb76ff1Sjsg 		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
12501bb76ff1Sjsg }
12511bb76ff1Sjsg 
drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch * mstb,struct drm_dp_sideband_msg_tx * txmsg)12521bb76ff1Sjsg static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
12531bb76ff1Sjsg 				    struct drm_dp_sideband_msg_tx *txmsg)
12541bb76ff1Sjsg {
12551bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
12561bb76ff1Sjsg 	unsigned long wait_timeout = msecs_to_jiffies(4000);
12571bb76ff1Sjsg 	unsigned long wait_expires = jiffies + wait_timeout;
12581bb76ff1Sjsg 	int ret;
12591bb76ff1Sjsg 
12601bb76ff1Sjsg 	for (;;) {
12611bb76ff1Sjsg 		/*
12621bb76ff1Sjsg 		 * If the driver provides a way for this, change to
12631bb76ff1Sjsg 		 * poll-waiting for the MST reply interrupt if we didn't receive
12641bb76ff1Sjsg 		 * it for 50 msec. This would cater for cases where the HPD
12651bb76ff1Sjsg 		 * pulse signal got lost somewhere, even though the sink raised
12661bb76ff1Sjsg 		 * the corresponding MST interrupt correctly. One example is the
12671bb76ff1Sjsg 		 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
12681bb76ff1Sjsg 		 * filters out short pulses with a duration less than ~540 usec.
12691bb76ff1Sjsg 		 *
12701bb76ff1Sjsg 		 * The poll period is 50 msec to avoid missing an interrupt
12711bb76ff1Sjsg 		 * after the sink has cleared it (after a 110msec timeout
12721bb76ff1Sjsg 		 * since it raised the interrupt).
12731bb76ff1Sjsg 		 */
12741bb76ff1Sjsg 		ret = wait_event_timeout(mgr->tx_waitq,
12751bb76ff1Sjsg 					 check_txmsg_state(mgr, txmsg),
12761bb76ff1Sjsg 					 mgr->cbs->poll_hpd_irq ?
12771bb76ff1Sjsg 						msecs_to_jiffies(50) :
12781bb76ff1Sjsg 						wait_timeout);
12791bb76ff1Sjsg 
12801bb76ff1Sjsg 		if (ret || !mgr->cbs->poll_hpd_irq ||
12811bb76ff1Sjsg 		    time_after(jiffies, wait_expires))
12821bb76ff1Sjsg 			break;
12831bb76ff1Sjsg 
12841bb76ff1Sjsg 		mgr->cbs->poll_hpd_irq(mgr);
12851bb76ff1Sjsg 	}
12861bb76ff1Sjsg 
12871bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
12881bb76ff1Sjsg 	if (ret > 0) {
12891bb76ff1Sjsg 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
12901bb76ff1Sjsg 			ret = -EIO;
12911bb76ff1Sjsg 			goto out;
12921bb76ff1Sjsg 		}
12931bb76ff1Sjsg 	} else {
12941bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
12951bb76ff1Sjsg 			    txmsg, txmsg->state, txmsg->seqno);
12961bb76ff1Sjsg 
12971bb76ff1Sjsg 		/* dump some state */
12981bb76ff1Sjsg 		ret = -EIO;
12991bb76ff1Sjsg 
13001bb76ff1Sjsg 		/* remove from q */
13011bb76ff1Sjsg 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
13021bb76ff1Sjsg 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
13031bb76ff1Sjsg 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
13041bb76ff1Sjsg 			list_del(&txmsg->next);
13051bb76ff1Sjsg 	}
13061bb76ff1Sjsg out:
13071bb76ff1Sjsg 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
13081bb76ff1Sjsg 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
13091bb76ff1Sjsg 
13101bb76ff1Sjsg 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
13111bb76ff1Sjsg 	}
13121bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
13131bb76ff1Sjsg 
13141bb76ff1Sjsg 	drm_dp_mst_kick_tx(mgr);
13151bb76ff1Sjsg 	return ret;
13161bb76ff1Sjsg }
13171bb76ff1Sjsg 
drm_dp_add_mst_branch_device(u8 lct,u8 * rad)13181bb76ff1Sjsg static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
13191bb76ff1Sjsg {
13201bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
13211bb76ff1Sjsg 
13221bb76ff1Sjsg 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
13231bb76ff1Sjsg 	if (!mstb)
13241bb76ff1Sjsg 		return NULL;
13251bb76ff1Sjsg 
13261bb76ff1Sjsg 	mstb->lct = lct;
13271bb76ff1Sjsg 	if (lct > 1)
13281bb76ff1Sjsg 		memcpy(mstb->rad, rad, lct / 2);
13291bb76ff1Sjsg 	INIT_LIST_HEAD(&mstb->ports);
13301bb76ff1Sjsg 	kref_init(&mstb->topology_kref);
13311bb76ff1Sjsg 	kref_init(&mstb->malloc_kref);
13321bb76ff1Sjsg 	return mstb;
13331bb76ff1Sjsg }
13341bb76ff1Sjsg 
drm_dp_free_mst_branch_device(struct kref * kref)13351bb76ff1Sjsg static void drm_dp_free_mst_branch_device(struct kref *kref)
13361bb76ff1Sjsg {
13371bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb =
13381bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
13391bb76ff1Sjsg 
13401bb76ff1Sjsg 	if (mstb->port_parent)
13411bb76ff1Sjsg 		drm_dp_mst_put_port_malloc(mstb->port_parent);
13421bb76ff1Sjsg 
13431bb76ff1Sjsg 	kfree(mstb);
13441bb76ff1Sjsg }
13451bb76ff1Sjsg 
13461bb76ff1Sjsg /**
13471bb76ff1Sjsg  * DOC: Branch device and port refcounting
13481bb76ff1Sjsg  *
13491bb76ff1Sjsg  * Topology refcount overview
13501bb76ff1Sjsg  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
13511bb76ff1Sjsg  *
13521bb76ff1Sjsg  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
13531bb76ff1Sjsg  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
13541bb76ff1Sjsg  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
13551bb76ff1Sjsg  *
13561bb76ff1Sjsg  * Topology refcounts are not exposed to drivers, and are handled internally
13571bb76ff1Sjsg  * by the DP MST helpers. The helpers use them in order to prevent the
13581bb76ff1Sjsg  * in-memory topology state from being changed in the middle of critical
13591bb76ff1Sjsg  * operations like changing the internal state of payload allocations. This
13601bb76ff1Sjsg  * means each branch and port will be considered to be connected to the rest
13611bb76ff1Sjsg  * of the topology until its topology refcount reaches zero. Additionally,
13621bb76ff1Sjsg  * for ports this means that their associated &struct drm_connector will stay
13631bb76ff1Sjsg  * registered with userspace until the port's refcount reaches 0.
13641bb76ff1Sjsg  *
13651bb76ff1Sjsg  * Malloc refcount overview
13661bb76ff1Sjsg  * ~~~~~~~~~~~~~~~~~~~~~~~~
13671bb76ff1Sjsg  *
13681bb76ff1Sjsg  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
13691bb76ff1Sjsg  * drm_dp_mst_branch allocated even after all of its topology references have
13701bb76ff1Sjsg  * been dropped, so that the driver or MST helpers can safely access each
13711bb76ff1Sjsg  * branch's last known state before it was disconnected from the topology.
13721bb76ff1Sjsg  * When the malloc refcount of a port or branch reaches 0, the memory
13731bb76ff1Sjsg  * allocation containing the &struct drm_dp_mst_branch or &struct
13741bb76ff1Sjsg  * drm_dp_mst_port respectively will be freed.
13751bb76ff1Sjsg  *
13761bb76ff1Sjsg  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
13771bb76ff1Sjsg  * to drivers. As of writing this documentation, there are no drivers that
13781bb76ff1Sjsg  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
13791bb76ff1Sjsg  * helpers. Exposing this API to drivers in a race-free manner would take more
13801bb76ff1Sjsg  * tweaking of the refcounting scheme, however patches are welcome provided
13811bb76ff1Sjsg  * there is a legitimate driver usecase for this.
13821bb76ff1Sjsg  *
13831bb76ff1Sjsg  * Refcount relationships in a topology
13841bb76ff1Sjsg  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13851bb76ff1Sjsg  *
13861bb76ff1Sjsg  * Let's take a look at why the relationship between topology and malloc
13871bb76ff1Sjsg  * refcounts is designed the way it is.
13881bb76ff1Sjsg  *
13891bb76ff1Sjsg  * .. kernel-figure:: dp-mst/topology-figure-1.dot
13901bb76ff1Sjsg  *
13911bb76ff1Sjsg  *    An example of topology and malloc refs in a DP MST topology with two
13921bb76ff1Sjsg  *    active payloads. Topology refcount increments are indicated by solid
13931bb76ff1Sjsg  *    lines, and malloc refcount increments are indicated by dashed lines.
13941bb76ff1Sjsg  *    Each starts from the branch which incremented the refcount, and ends at
13951bb76ff1Sjsg  *    the branch to which the refcount belongs to, i.e. the arrow points the
13961bb76ff1Sjsg  *    same way as the C pointers used to reference a structure.
13971bb76ff1Sjsg  *
13981bb76ff1Sjsg  * As you can see in the above figure, every branch increments the topology
13991bb76ff1Sjsg  * refcount of its children, and increments the malloc refcount of its
14001bb76ff1Sjsg  * parent. Additionally, every payload increments the malloc refcount of its
14011bb76ff1Sjsg  * assigned port by 1.
14021bb76ff1Sjsg  *
14031bb76ff1Sjsg  * So, what would happen if MSTB #3 from the above figure was unplugged from
14041bb76ff1Sjsg  * the system, but the driver hadn't yet removed payload #2 from port #3? The
14051bb76ff1Sjsg  * topology would start to look like the figure below.
14061bb76ff1Sjsg  *
14071bb76ff1Sjsg  * .. kernel-figure:: dp-mst/topology-figure-2.dot
14081bb76ff1Sjsg  *
14091bb76ff1Sjsg  *    Ports and branch devices which have been released from memory are
14101bb76ff1Sjsg  *    colored grey, and references which have been removed are colored red.
14111bb76ff1Sjsg  *
14121bb76ff1Sjsg  * Whenever a port or branch device's topology refcount reaches zero, it will
14131bb76ff1Sjsg  * decrement the topology refcounts of all its children, the malloc refcount
14141bb76ff1Sjsg  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
14151bb76ff1Sjsg  * #4, this means they both have been disconnected from the topology and freed
14161bb76ff1Sjsg  * from memory. But, because payload #2 is still holding a reference to port
14171bb76ff1Sjsg  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
14181bb76ff1Sjsg  * is still accessible from memory. This also means port #3 has not yet
14191bb76ff1Sjsg  * decremented the malloc refcount of MSTB #3, so its &struct
14201bb76ff1Sjsg  * drm_dp_mst_branch will also stay allocated in memory until port #3's
14211bb76ff1Sjsg  * malloc refcount reaches 0.
14221bb76ff1Sjsg  *
14231bb76ff1Sjsg  * This relationship is necessary because in order to release payload #2, we
14241bb76ff1Sjsg  * need to be able to figure out the last relative of port #3 that's still
14251bb76ff1Sjsg  * connected to the topology. In this case, we would travel up the topology as
14261bb76ff1Sjsg  * shown below.
14271bb76ff1Sjsg  *
14281bb76ff1Sjsg  * .. kernel-figure:: dp-mst/topology-figure-3.dot
14291bb76ff1Sjsg  *
14301bb76ff1Sjsg  * And finally, remove payload #2 by communicating with port #2 through
14311bb76ff1Sjsg  * sideband transactions.
14321bb76ff1Sjsg  */
14331bb76ff1Sjsg 
14341bb76ff1Sjsg /**
14351bb76ff1Sjsg  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
14361bb76ff1Sjsg  * device
14371bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
14381bb76ff1Sjsg  *
14391bb76ff1Sjsg  * Increments &drm_dp_mst_branch.malloc_kref. When
14401bb76ff1Sjsg  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
14411bb76ff1Sjsg  * will be released and @mstb may no longer be used.
14421bb76ff1Sjsg  *
14431bb76ff1Sjsg  * See also: drm_dp_mst_put_mstb_malloc()
14441bb76ff1Sjsg  */
14451bb76ff1Sjsg static void
drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch * mstb)14461bb76ff1Sjsg drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
14471bb76ff1Sjsg {
14481bb76ff1Sjsg 	kref_get(&mstb->malloc_kref);
14491bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
14501bb76ff1Sjsg }
14511bb76ff1Sjsg 
14521bb76ff1Sjsg /**
14531bb76ff1Sjsg  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
14541bb76ff1Sjsg  * device
14551bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
14561bb76ff1Sjsg  *
14571bb76ff1Sjsg  * Decrements &drm_dp_mst_branch.malloc_kref. When
14581bb76ff1Sjsg  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
14591bb76ff1Sjsg  * will be released and @mstb may no longer be used.
14601bb76ff1Sjsg  *
14611bb76ff1Sjsg  * See also: drm_dp_mst_get_mstb_malloc()
14621bb76ff1Sjsg  */
14631bb76ff1Sjsg static void
drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch * mstb)14641bb76ff1Sjsg drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
14651bb76ff1Sjsg {
14661bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
14671bb76ff1Sjsg 	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
14681bb76ff1Sjsg }
14691bb76ff1Sjsg 
drm_dp_free_mst_port(struct kref * kref)14701bb76ff1Sjsg static void drm_dp_free_mst_port(struct kref *kref)
14711bb76ff1Sjsg {
14721bb76ff1Sjsg 	struct drm_dp_mst_port *port =
14731bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_port, malloc_kref);
14741bb76ff1Sjsg 
14751bb76ff1Sjsg 	drm_dp_mst_put_mstb_malloc(port->parent);
14761bb76ff1Sjsg 	kfree(port);
14771bb76ff1Sjsg }
14781bb76ff1Sjsg 
14791bb76ff1Sjsg /**
14801bb76ff1Sjsg  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
14811bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
14821bb76ff1Sjsg  *
14831bb76ff1Sjsg  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
14841bb76ff1Sjsg  * reaches 0, the memory allocation for @port will be released and @port may
14851bb76ff1Sjsg  * no longer be used.
14861bb76ff1Sjsg  *
14871bb76ff1Sjsg  * Because @port could potentially be freed at any time by the DP MST helpers
14881bb76ff1Sjsg  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
14891bb76ff1Sjsg  * function, drivers that which to make use of &struct drm_dp_mst_port should
14901bb76ff1Sjsg  * ensure that they grab at least one main malloc reference to their MST ports
14911bb76ff1Sjsg  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
14921bb76ff1Sjsg  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
14931bb76ff1Sjsg  *
14941bb76ff1Sjsg  * See also: drm_dp_mst_put_port_malloc()
14951bb76ff1Sjsg  */
14961bb76ff1Sjsg void
drm_dp_mst_get_port_malloc(struct drm_dp_mst_port * port)14971bb76ff1Sjsg drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
14981bb76ff1Sjsg {
14991bb76ff1Sjsg 	kref_get(&port->malloc_kref);
15001bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
15011bb76ff1Sjsg }
15021bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
15031bb76ff1Sjsg 
15041bb76ff1Sjsg /**
15051bb76ff1Sjsg  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
15061bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
15071bb76ff1Sjsg  *
15081bb76ff1Sjsg  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
15091bb76ff1Sjsg  * reaches 0, the memory allocation for @port will be released and @port may
15101bb76ff1Sjsg  * no longer be used.
15111bb76ff1Sjsg  *
15121bb76ff1Sjsg  * See also: drm_dp_mst_get_port_malloc()
15131bb76ff1Sjsg  */
15141bb76ff1Sjsg void
drm_dp_mst_put_port_malloc(struct drm_dp_mst_port * port)15151bb76ff1Sjsg drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
15161bb76ff1Sjsg {
15171bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
15181bb76ff1Sjsg 	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
15191bb76ff1Sjsg }
15201bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
15211bb76ff1Sjsg 
15221bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
15231bb76ff1Sjsg 
15241bb76ff1Sjsg #define STACK_DEPTH 8
15251bb76ff1Sjsg 
15261bb76ff1Sjsg static noinline void
__topology_ref_save(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_ref_history * history,enum drm_dp_mst_topology_ref_type type)15271bb76ff1Sjsg __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
15281bb76ff1Sjsg 		    struct drm_dp_mst_topology_ref_history *history,
15291bb76ff1Sjsg 		    enum drm_dp_mst_topology_ref_type type)
15301bb76ff1Sjsg {
15311bb76ff1Sjsg 	struct drm_dp_mst_topology_ref_entry *entry = NULL;
15321bb76ff1Sjsg 	depot_stack_handle_t backtrace;
15331bb76ff1Sjsg 	ulong stack_entries[STACK_DEPTH];
15341bb76ff1Sjsg 	uint n;
15351bb76ff1Sjsg 	int i;
15361bb76ff1Sjsg 
15371bb76ff1Sjsg 	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
15381bb76ff1Sjsg 	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
15391bb76ff1Sjsg 	if (!backtrace)
15401bb76ff1Sjsg 		return;
15411bb76ff1Sjsg 
15421bb76ff1Sjsg 	/* Try to find an existing entry for this backtrace */
15431bb76ff1Sjsg 	for (i = 0; i < history->len; i++) {
15441bb76ff1Sjsg 		if (history->entries[i].backtrace == backtrace) {
15451bb76ff1Sjsg 			entry = &history->entries[i];
15461bb76ff1Sjsg 			break;
15471bb76ff1Sjsg 		}
15481bb76ff1Sjsg 	}
15491bb76ff1Sjsg 
15501bb76ff1Sjsg 	/* Otherwise add one */
15511bb76ff1Sjsg 	if (!entry) {
15521bb76ff1Sjsg 		struct drm_dp_mst_topology_ref_entry *new;
15531bb76ff1Sjsg 		int new_len = history->len + 1;
15541bb76ff1Sjsg 
15551bb76ff1Sjsg 		new = krealloc(history->entries, sizeof(*new) * new_len,
15561bb76ff1Sjsg 			       GFP_KERNEL);
15571bb76ff1Sjsg 		if (!new)
15581bb76ff1Sjsg 			return;
15591bb76ff1Sjsg 
15601bb76ff1Sjsg 		entry = &new[history->len];
15611bb76ff1Sjsg 		history->len = new_len;
15621bb76ff1Sjsg 		history->entries = new;
15631bb76ff1Sjsg 
15641bb76ff1Sjsg 		entry->backtrace = backtrace;
15651bb76ff1Sjsg 		entry->type = type;
15661bb76ff1Sjsg 		entry->count = 0;
15671bb76ff1Sjsg 	}
15681bb76ff1Sjsg 	entry->count++;
15691bb76ff1Sjsg 	entry->ts_nsec = ktime_get_ns();
15701bb76ff1Sjsg }
15711bb76ff1Sjsg 
15721bb76ff1Sjsg static int
topology_ref_history_cmp(const void * a,const void * b)15731bb76ff1Sjsg topology_ref_history_cmp(const void *a, const void *b)
15741bb76ff1Sjsg {
15751bb76ff1Sjsg 	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
15761bb76ff1Sjsg 
15771bb76ff1Sjsg 	if (entry_a->ts_nsec > entry_b->ts_nsec)
15781bb76ff1Sjsg 		return 1;
15791bb76ff1Sjsg 	else if (entry_a->ts_nsec < entry_b->ts_nsec)
15801bb76ff1Sjsg 		return -1;
15811bb76ff1Sjsg 	else
15821bb76ff1Sjsg 		return 0;
15831bb76ff1Sjsg }
15841bb76ff1Sjsg 
15851bb76ff1Sjsg static inline const char *
topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)15861bb76ff1Sjsg topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
15871bb76ff1Sjsg {
15881bb76ff1Sjsg 	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
15891bb76ff1Sjsg 		return "get";
15901bb76ff1Sjsg 	else
15911bb76ff1Sjsg 		return "put";
15921bb76ff1Sjsg }
15931bb76ff1Sjsg 
15941bb76ff1Sjsg static void
__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history * history,void * ptr,const char * type_str)15951bb76ff1Sjsg __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
15961bb76ff1Sjsg 			    void *ptr, const char *type_str)
15971bb76ff1Sjsg {
15981bb76ff1Sjsg 	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
15991bb76ff1Sjsg 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
16001bb76ff1Sjsg 	int i;
16011bb76ff1Sjsg 
16021bb76ff1Sjsg 	if (!buf)
16031bb76ff1Sjsg 		return;
16041bb76ff1Sjsg 
16051bb76ff1Sjsg 	if (!history->len)
16061bb76ff1Sjsg 		goto out;
16071bb76ff1Sjsg 
16081bb76ff1Sjsg 	/* First, sort the list so that it goes from oldest to newest
16091bb76ff1Sjsg 	 * reference entry
16101bb76ff1Sjsg 	 */
16111bb76ff1Sjsg 	sort(history->entries, history->len, sizeof(*history->entries),
16121bb76ff1Sjsg 	     topology_ref_history_cmp, NULL);
16131bb76ff1Sjsg 
16141bb76ff1Sjsg 	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
16151bb76ff1Sjsg 		   type_str, ptr);
16161bb76ff1Sjsg 
16171bb76ff1Sjsg 	for (i = 0; i < history->len; i++) {
16181bb76ff1Sjsg 		const struct drm_dp_mst_topology_ref_entry *entry =
16191bb76ff1Sjsg 			&history->entries[i];
16201bb76ff1Sjsg 		u64 ts_nsec = entry->ts_nsec;
16211bb76ff1Sjsg 		u32 rem_nsec = do_div(ts_nsec, 1000000000);
16221bb76ff1Sjsg 
16231bb76ff1Sjsg 		stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
16241bb76ff1Sjsg 
16251bb76ff1Sjsg 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
16261bb76ff1Sjsg 			   entry->count,
16271bb76ff1Sjsg 			   topology_ref_type_to_str(entry->type),
16281bb76ff1Sjsg 			   ts_nsec, rem_nsec / 1000, buf);
16291bb76ff1Sjsg 	}
16301bb76ff1Sjsg 
16311bb76ff1Sjsg 	/* Now free the history, since this is the only time we expose it */
16321bb76ff1Sjsg 	kfree(history->entries);
16331bb76ff1Sjsg out:
16341bb76ff1Sjsg 	kfree(buf);
16351bb76ff1Sjsg }
16361bb76ff1Sjsg 
16371bb76ff1Sjsg static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch * mstb)16381bb76ff1Sjsg drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
16391bb76ff1Sjsg {
16401bb76ff1Sjsg 	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
16411bb76ff1Sjsg 				    "MSTB");
16421bb76ff1Sjsg }
16431bb76ff1Sjsg 
16441bb76ff1Sjsg static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port * port)16451bb76ff1Sjsg drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
16461bb76ff1Sjsg {
16471bb76ff1Sjsg 	__dump_topology_ref_history(&port->topology_ref_history, port,
16481bb76ff1Sjsg 				    "Port");
16491bb76ff1Sjsg }
16501bb76ff1Sjsg 
16511bb76ff1Sjsg static __always_inline void
save_mstb_topology_ref(struct drm_dp_mst_branch * mstb,enum drm_dp_mst_topology_ref_type type)16521bb76ff1Sjsg save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
16531bb76ff1Sjsg 		       enum drm_dp_mst_topology_ref_type type)
16541bb76ff1Sjsg {
16551bb76ff1Sjsg 	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
16561bb76ff1Sjsg }
16571bb76ff1Sjsg 
16581bb76ff1Sjsg static __always_inline void
save_port_topology_ref(struct drm_dp_mst_port * port,enum drm_dp_mst_topology_ref_type type)16591bb76ff1Sjsg save_port_topology_ref(struct drm_dp_mst_port *port,
16601bb76ff1Sjsg 		       enum drm_dp_mst_topology_ref_type type)
16611bb76ff1Sjsg {
16621bb76ff1Sjsg 	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
16631bb76ff1Sjsg }
16641bb76ff1Sjsg 
16651bb76ff1Sjsg static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr * mgr)16661bb76ff1Sjsg topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
16671bb76ff1Sjsg {
16681bb76ff1Sjsg 	mutex_lock(&mgr->topology_ref_history_lock);
16691bb76ff1Sjsg }
16701bb76ff1Sjsg 
16711bb76ff1Sjsg static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr * mgr)16721bb76ff1Sjsg topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
16731bb76ff1Sjsg {
16741bb76ff1Sjsg 	mutex_unlock(&mgr->topology_ref_history_lock);
16751bb76ff1Sjsg }
16761bb76ff1Sjsg #else
16771bb76ff1Sjsg static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr * mgr)16781bb76ff1Sjsg topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
16791bb76ff1Sjsg static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr * mgr)16801bb76ff1Sjsg topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
16811bb76ff1Sjsg static inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch * mstb)16821bb76ff1Sjsg drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
16831bb76ff1Sjsg static inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port * port)16841bb76ff1Sjsg drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
16851bb76ff1Sjsg #define save_mstb_topology_ref(mstb, type)
16861bb76ff1Sjsg #define save_port_topology_ref(port, type)
16871bb76ff1Sjsg #endif
16881bb76ff1Sjsg 
16891bb76ff1Sjsg struct drm_dp_mst_atomic_payload *
drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state * state,struct drm_dp_mst_port * port)16901bb76ff1Sjsg drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
16911bb76ff1Sjsg 				 struct drm_dp_mst_port *port)
16921bb76ff1Sjsg {
16931bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
16941bb76ff1Sjsg 
16951bb76ff1Sjsg 	list_for_each_entry(payload, &state->payloads, next)
16961bb76ff1Sjsg 		if (payload->port == port)
16971bb76ff1Sjsg 			return payload;
16981bb76ff1Sjsg 
16991bb76ff1Sjsg 	return NULL;
17001bb76ff1Sjsg }
17011bb76ff1Sjsg EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
17021bb76ff1Sjsg 
drm_dp_destroy_mst_branch_device(struct kref * kref)17031bb76ff1Sjsg static void drm_dp_destroy_mst_branch_device(struct kref *kref)
17041bb76ff1Sjsg {
17051bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb =
17061bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_branch, topology_kref);
17071bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
17081bb76ff1Sjsg 
17091bb76ff1Sjsg 	drm_dp_mst_dump_mstb_topology_history(mstb);
17101bb76ff1Sjsg 
17111bb76ff1Sjsg 	INIT_LIST_HEAD(&mstb->destroy_next);
17121bb76ff1Sjsg 
17131bb76ff1Sjsg 	/*
17141bb76ff1Sjsg 	 * This can get called under mgr->mutex, so we need to perform the
17151bb76ff1Sjsg 	 * actual destruction of the mstb in another worker
17161bb76ff1Sjsg 	 */
17171bb76ff1Sjsg 	mutex_lock(&mgr->delayed_destroy_lock);
17181bb76ff1Sjsg 	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
17191bb76ff1Sjsg 	mutex_unlock(&mgr->delayed_destroy_lock);
17201bb76ff1Sjsg 	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
17211bb76ff1Sjsg }
17221bb76ff1Sjsg 
17231bb76ff1Sjsg /**
17241bb76ff1Sjsg  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
17251bb76ff1Sjsg  * branch device unless it's zero
17261bb76ff1Sjsg  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
17271bb76ff1Sjsg  *
17281bb76ff1Sjsg  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
17291bb76ff1Sjsg  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
17301bb76ff1Sjsg  * reached 0). Holding a topology reference implies that a malloc reference
17311bb76ff1Sjsg  * will be held to @mstb as long as the user holds the topology reference.
17321bb76ff1Sjsg  *
17331bb76ff1Sjsg  * Care should be taken to ensure that the user has at least one malloc
17341bb76ff1Sjsg  * reference to @mstb. If you already have a topology reference to @mstb, you
17351bb76ff1Sjsg  * should use drm_dp_mst_topology_get_mstb() instead.
17361bb76ff1Sjsg  *
17371bb76ff1Sjsg  * See also:
17381bb76ff1Sjsg  * drm_dp_mst_topology_get_mstb()
17391bb76ff1Sjsg  * drm_dp_mst_topology_put_mstb()
17401bb76ff1Sjsg  *
17411bb76ff1Sjsg  * Returns:
17421bb76ff1Sjsg  * * 1: A topology reference was grabbed successfully
17431bb76ff1Sjsg  * * 0: @port is no longer in the topology, no reference was grabbed
17441bb76ff1Sjsg  */
17451bb76ff1Sjsg static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch * mstb)17461bb76ff1Sjsg drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
17471bb76ff1Sjsg {
17481bb76ff1Sjsg 	int ret;
17491bb76ff1Sjsg 
17501bb76ff1Sjsg 	topology_ref_history_lock(mstb->mgr);
17511bb76ff1Sjsg 	ret = kref_get_unless_zero(&mstb->topology_kref);
17521bb76ff1Sjsg 	if (ret) {
17531bb76ff1Sjsg 		drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
17541bb76ff1Sjsg 		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
17551bb76ff1Sjsg 	}
17561bb76ff1Sjsg 
17571bb76ff1Sjsg 	topology_ref_history_unlock(mstb->mgr);
17581bb76ff1Sjsg 
17591bb76ff1Sjsg 	return ret;
17601bb76ff1Sjsg }
17611bb76ff1Sjsg 
17621bb76ff1Sjsg /**
17631bb76ff1Sjsg  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
17641bb76ff1Sjsg  * branch device
17651bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
17661bb76ff1Sjsg  *
17671bb76ff1Sjsg  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
17681bb76ff1Sjsg  * not it's already reached 0. This is only valid to use in scenarios where
17691bb76ff1Sjsg  * you are already guaranteed to have at least one active topology reference
17701bb76ff1Sjsg  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
17711bb76ff1Sjsg  *
17721bb76ff1Sjsg  * See also:
17731bb76ff1Sjsg  * drm_dp_mst_topology_try_get_mstb()
17741bb76ff1Sjsg  * drm_dp_mst_topology_put_mstb()
17751bb76ff1Sjsg  */
drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch * mstb)17761bb76ff1Sjsg static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
17771bb76ff1Sjsg {
17781bb76ff1Sjsg 	topology_ref_history_lock(mstb->mgr);
17791bb76ff1Sjsg 
17801bb76ff1Sjsg 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
17811bb76ff1Sjsg 	WARN_ON(kref_read(&mstb->topology_kref) == 0);
17821bb76ff1Sjsg 	kref_get(&mstb->topology_kref);
17831bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
17841bb76ff1Sjsg 
17851bb76ff1Sjsg 	topology_ref_history_unlock(mstb->mgr);
17861bb76ff1Sjsg }
17871bb76ff1Sjsg 
17881bb76ff1Sjsg /**
17891bb76ff1Sjsg  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
17901bb76ff1Sjsg  * device
17911bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
17921bb76ff1Sjsg  *
17931bb76ff1Sjsg  * Releases a topology reference from @mstb by decrementing
17941bb76ff1Sjsg  * &drm_dp_mst_branch.topology_kref.
17951bb76ff1Sjsg  *
17961bb76ff1Sjsg  * See also:
17971bb76ff1Sjsg  * drm_dp_mst_topology_try_get_mstb()
17981bb76ff1Sjsg  * drm_dp_mst_topology_get_mstb()
17991bb76ff1Sjsg  */
18001bb76ff1Sjsg static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch * mstb)18011bb76ff1Sjsg drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
18021bb76ff1Sjsg {
18031bb76ff1Sjsg 	topology_ref_history_lock(mstb->mgr);
18041bb76ff1Sjsg 
18051bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
18061bb76ff1Sjsg 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
18071bb76ff1Sjsg 
18081bb76ff1Sjsg 	topology_ref_history_unlock(mstb->mgr);
18091bb76ff1Sjsg 	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
18101bb76ff1Sjsg }
18111bb76ff1Sjsg 
drm_dp_destroy_port(struct kref * kref)18121bb76ff1Sjsg static void drm_dp_destroy_port(struct kref *kref)
18131bb76ff1Sjsg {
18141bb76ff1Sjsg 	struct drm_dp_mst_port *port =
18151bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_port, topology_kref);
18161bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
18171bb76ff1Sjsg 
18181bb76ff1Sjsg 	drm_dp_mst_dump_port_topology_history(port);
18191bb76ff1Sjsg 
18201bb76ff1Sjsg 	/* There's nothing that needs locking to destroy an input port yet */
18211bb76ff1Sjsg 	if (port->input) {
18221bb76ff1Sjsg 		drm_dp_mst_put_port_malloc(port);
18231bb76ff1Sjsg 		return;
18241bb76ff1Sjsg 	}
18251bb76ff1Sjsg 
1826f005ef32Sjsg 	drm_edid_free(port->cached_edid);
18271bb76ff1Sjsg 
18281bb76ff1Sjsg 	/*
18291bb76ff1Sjsg 	 * we can't destroy the connector here, as we might be holding the
18301bb76ff1Sjsg 	 * mode_config.mutex from an EDID retrieval
18311bb76ff1Sjsg 	 */
18321bb76ff1Sjsg 	mutex_lock(&mgr->delayed_destroy_lock);
18331bb76ff1Sjsg 	list_add(&port->next, &mgr->destroy_port_list);
18341bb76ff1Sjsg 	mutex_unlock(&mgr->delayed_destroy_lock);
18351bb76ff1Sjsg 	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
18361bb76ff1Sjsg }
18371bb76ff1Sjsg 
18381bb76ff1Sjsg /**
18391bb76ff1Sjsg  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
18401bb76ff1Sjsg  * port unless it's zero
18411bb76ff1Sjsg  * @port: &struct drm_dp_mst_port to increment the topology refcount of
18421bb76ff1Sjsg  *
18431bb76ff1Sjsg  * Attempts to grab a topology reference to @port, if it hasn't yet been
18441bb76ff1Sjsg  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
18451bb76ff1Sjsg  * 0). Holding a topology reference implies that a malloc reference will be
18461bb76ff1Sjsg  * held to @port as long as the user holds the topology reference.
18471bb76ff1Sjsg  *
18481bb76ff1Sjsg  * Care should be taken to ensure that the user has at least one malloc
18491bb76ff1Sjsg  * reference to @port. If you already have a topology reference to @port, you
18501bb76ff1Sjsg  * should use drm_dp_mst_topology_get_port() instead.
18511bb76ff1Sjsg  *
18521bb76ff1Sjsg  * See also:
18531bb76ff1Sjsg  * drm_dp_mst_topology_get_port()
18541bb76ff1Sjsg  * drm_dp_mst_topology_put_port()
18551bb76ff1Sjsg  *
18561bb76ff1Sjsg  * Returns:
18571bb76ff1Sjsg  * * 1: A topology reference was grabbed successfully
18581bb76ff1Sjsg  * * 0: @port is no longer in the topology, no reference was grabbed
18591bb76ff1Sjsg  */
18601bb76ff1Sjsg static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port * port)18611bb76ff1Sjsg drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
18621bb76ff1Sjsg {
18631bb76ff1Sjsg 	int ret;
18641bb76ff1Sjsg 
18651bb76ff1Sjsg 	topology_ref_history_lock(port->mgr);
18661bb76ff1Sjsg 	ret = kref_get_unless_zero(&port->topology_kref);
18671bb76ff1Sjsg 	if (ret) {
18681bb76ff1Sjsg 		drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
18691bb76ff1Sjsg 		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
18701bb76ff1Sjsg 	}
18711bb76ff1Sjsg 
18721bb76ff1Sjsg 	topology_ref_history_unlock(port->mgr);
18731bb76ff1Sjsg 	return ret;
18741bb76ff1Sjsg }
18751bb76ff1Sjsg 
18761bb76ff1Sjsg /**
18771bb76ff1Sjsg  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
18781bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
18791bb76ff1Sjsg  *
18801bb76ff1Sjsg  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
18811bb76ff1Sjsg  * not it's already reached 0. This is only valid to use in scenarios where
18821bb76ff1Sjsg  * you are already guaranteed to have at least one active topology reference
18831bb76ff1Sjsg  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
18841bb76ff1Sjsg  *
18851bb76ff1Sjsg  * See also:
18861bb76ff1Sjsg  * drm_dp_mst_topology_try_get_port()
18871bb76ff1Sjsg  * drm_dp_mst_topology_put_port()
18881bb76ff1Sjsg  */
drm_dp_mst_topology_get_port(struct drm_dp_mst_port * port)18891bb76ff1Sjsg static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
18901bb76ff1Sjsg {
18911bb76ff1Sjsg 	topology_ref_history_lock(port->mgr);
18921bb76ff1Sjsg 
18931bb76ff1Sjsg 	WARN_ON(kref_read(&port->topology_kref) == 0);
18941bb76ff1Sjsg 	kref_get(&port->topology_kref);
18951bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
18961bb76ff1Sjsg 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
18971bb76ff1Sjsg 
18981bb76ff1Sjsg 	topology_ref_history_unlock(port->mgr);
18991bb76ff1Sjsg }
19001bb76ff1Sjsg 
19011bb76ff1Sjsg /**
19021bb76ff1Sjsg  * drm_dp_mst_topology_put_port() - release a topology reference to a port
19031bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to release the topology reference from
19041bb76ff1Sjsg  *
19051bb76ff1Sjsg  * Releases a topology reference from @port by decrementing
19061bb76ff1Sjsg  * &drm_dp_mst_port.topology_kref.
19071bb76ff1Sjsg  *
19081bb76ff1Sjsg  * See also:
19091bb76ff1Sjsg  * drm_dp_mst_topology_try_get_port()
19101bb76ff1Sjsg  * drm_dp_mst_topology_get_port()
19111bb76ff1Sjsg  */
drm_dp_mst_topology_put_port(struct drm_dp_mst_port * port)19121bb76ff1Sjsg static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
19131bb76ff1Sjsg {
19141bb76ff1Sjsg 	topology_ref_history_lock(port->mgr);
19151bb76ff1Sjsg 
19161bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
19171bb76ff1Sjsg 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
19181bb76ff1Sjsg 
19191bb76ff1Sjsg 	topology_ref_history_unlock(port->mgr);
19201bb76ff1Sjsg 	kref_put(&port->topology_kref, drm_dp_destroy_port);
19211bb76ff1Sjsg }
19221bb76ff1Sjsg 
19231bb76ff1Sjsg static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_branch * to_find)19241bb76ff1Sjsg drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
19251bb76ff1Sjsg 					      struct drm_dp_mst_branch *to_find)
19261bb76ff1Sjsg {
19271bb76ff1Sjsg 	struct drm_dp_mst_port *port;
19281bb76ff1Sjsg 	struct drm_dp_mst_branch *rmstb;
19291bb76ff1Sjsg 
19301bb76ff1Sjsg 	if (to_find == mstb)
19311bb76ff1Sjsg 		return mstb;
19321bb76ff1Sjsg 
19331bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
19341bb76ff1Sjsg 		if (port->mstb) {
19351bb76ff1Sjsg 			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
19361bb76ff1Sjsg 			    port->mstb, to_find);
19371bb76ff1Sjsg 			if (rmstb)
19381bb76ff1Sjsg 				return rmstb;
19391bb76ff1Sjsg 		}
19401bb76ff1Sjsg 	}
19411bb76ff1Sjsg 	return NULL;
19421bb76ff1Sjsg }
19431bb76ff1Sjsg 
19441bb76ff1Sjsg static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)19451bb76ff1Sjsg drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
19461bb76ff1Sjsg 				       struct drm_dp_mst_branch *mstb)
19471bb76ff1Sjsg {
19481bb76ff1Sjsg 	struct drm_dp_mst_branch *rmstb = NULL;
19491bb76ff1Sjsg 
19501bb76ff1Sjsg 	mutex_lock(&mgr->lock);
19511bb76ff1Sjsg 	if (mgr->mst_primary) {
19521bb76ff1Sjsg 		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
19531bb76ff1Sjsg 		    mgr->mst_primary, mstb);
19541bb76ff1Sjsg 
19551bb76ff1Sjsg 		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
19561bb76ff1Sjsg 			rmstb = NULL;
19571bb76ff1Sjsg 	}
19581bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
19591bb76ff1Sjsg 	return rmstb;
19601bb76ff1Sjsg }
19611bb76ff1Sjsg 
19621bb76ff1Sjsg static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * to_find)19631bb76ff1Sjsg drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
19641bb76ff1Sjsg 					      struct drm_dp_mst_port *to_find)
19651bb76ff1Sjsg {
19661bb76ff1Sjsg 	struct drm_dp_mst_port *port, *mport;
19671bb76ff1Sjsg 
19681bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
19691bb76ff1Sjsg 		if (port == to_find)
19701bb76ff1Sjsg 			return port;
19711bb76ff1Sjsg 
19721bb76ff1Sjsg 		if (port->mstb) {
19731bb76ff1Sjsg 			mport = drm_dp_mst_topology_get_port_validated_locked(
19741bb76ff1Sjsg 			    port->mstb, to_find);
19751bb76ff1Sjsg 			if (mport)
19761bb76ff1Sjsg 				return mport;
19771bb76ff1Sjsg 		}
19781bb76ff1Sjsg 	}
19791bb76ff1Sjsg 	return NULL;
19801bb76ff1Sjsg }
19811bb76ff1Sjsg 
19821bb76ff1Sjsg static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)19831bb76ff1Sjsg drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
19841bb76ff1Sjsg 				       struct drm_dp_mst_port *port)
19851bb76ff1Sjsg {
19861bb76ff1Sjsg 	struct drm_dp_mst_port *rport = NULL;
19871bb76ff1Sjsg 
19881bb76ff1Sjsg 	mutex_lock(&mgr->lock);
19891bb76ff1Sjsg 	if (mgr->mst_primary) {
19901bb76ff1Sjsg 		rport = drm_dp_mst_topology_get_port_validated_locked(
19911bb76ff1Sjsg 		    mgr->mst_primary, port);
19921bb76ff1Sjsg 
19931bb76ff1Sjsg 		if (rport && !drm_dp_mst_topology_try_get_port(rport))
19941bb76ff1Sjsg 			rport = NULL;
19951bb76ff1Sjsg 	}
19961bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
19971bb76ff1Sjsg 	return rport;
19981bb76ff1Sjsg }
19991bb76ff1Sjsg 
drm_dp_get_port(struct drm_dp_mst_branch * mstb,u8 port_num)20001bb76ff1Sjsg static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
20011bb76ff1Sjsg {
20021bb76ff1Sjsg 	struct drm_dp_mst_port *port;
20031bb76ff1Sjsg 	int ret;
20041bb76ff1Sjsg 
20051bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
20061bb76ff1Sjsg 		if (port->port_num == port_num) {
20071bb76ff1Sjsg 			ret = drm_dp_mst_topology_try_get_port(port);
20081bb76ff1Sjsg 			return ret ? port : NULL;
20091bb76ff1Sjsg 		}
20101bb76ff1Sjsg 	}
20111bb76ff1Sjsg 
20121bb76ff1Sjsg 	return NULL;
20131bb76ff1Sjsg }
20141bb76ff1Sjsg 
20151bb76ff1Sjsg /*
20161bb76ff1Sjsg  * calculate a new RAD for this MST branch device
20171bb76ff1Sjsg  * if parent has an LCT of 2 then it has 1 nibble of RAD,
20181bb76ff1Sjsg  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
20191bb76ff1Sjsg  */
drm_dp_calculate_rad(struct drm_dp_mst_port * port,u8 * rad)20201bb76ff1Sjsg static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
20211bb76ff1Sjsg 				 u8 *rad)
20221bb76ff1Sjsg {
20231bb76ff1Sjsg 	int parent_lct = port->parent->lct;
20241bb76ff1Sjsg 	int shift = 4;
20251bb76ff1Sjsg 	int idx = (parent_lct - 1) / 2;
20261bb76ff1Sjsg 
20271bb76ff1Sjsg 	if (parent_lct > 1) {
20281bb76ff1Sjsg 		memcpy(rad, port->parent->rad, idx + 1);
20291bb76ff1Sjsg 		shift = (parent_lct % 2) ? 4 : 0;
20301bb76ff1Sjsg 	} else
20311bb76ff1Sjsg 		rad[0] = 0;
20321bb76ff1Sjsg 
20331bb76ff1Sjsg 	rad[idx] |= port->port_num << shift;
20341bb76ff1Sjsg 	return parent_lct + 1;
20351bb76ff1Sjsg }
20361bb76ff1Sjsg 
drm_dp_mst_is_end_device(u8 pdt,bool mcs)20371bb76ff1Sjsg static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
20381bb76ff1Sjsg {
20391bb76ff1Sjsg 	switch (pdt) {
20401bb76ff1Sjsg 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
20411bb76ff1Sjsg 	case DP_PEER_DEVICE_SST_SINK:
20421bb76ff1Sjsg 		return true;
20431bb76ff1Sjsg 	case DP_PEER_DEVICE_MST_BRANCHING:
20441bb76ff1Sjsg 		/* For sst branch device */
20451bb76ff1Sjsg 		if (!mcs)
20461bb76ff1Sjsg 			return true;
20471bb76ff1Sjsg 
20481bb76ff1Sjsg 		return false;
20491bb76ff1Sjsg 	}
20501bb76ff1Sjsg 	return true;
20511bb76ff1Sjsg }
20521bb76ff1Sjsg 
20531bb76ff1Sjsg static int
drm_dp_port_set_pdt(struct drm_dp_mst_port * port,u8 new_pdt,bool new_mcs)20541bb76ff1Sjsg drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
20551bb76ff1Sjsg 		    bool new_mcs)
20561bb76ff1Sjsg {
20571bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
20581bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
20591bb76ff1Sjsg 	u8 rad[8], lct;
20601bb76ff1Sjsg 	int ret = 0;
20611bb76ff1Sjsg 
20621bb76ff1Sjsg 	if (port->pdt == new_pdt && port->mcs == new_mcs)
20631bb76ff1Sjsg 		return 0;
20641bb76ff1Sjsg 
20651bb76ff1Sjsg 	/* Teardown the old pdt, if there is one */
20661bb76ff1Sjsg 	if (port->pdt != DP_PEER_DEVICE_NONE) {
20671bb76ff1Sjsg 		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
20681bb76ff1Sjsg 			/*
20691bb76ff1Sjsg 			 * If the new PDT would also have an i2c bus,
20701bb76ff1Sjsg 			 * don't bother with reregistering it
20711bb76ff1Sjsg 			 */
20721bb76ff1Sjsg 			if (new_pdt != DP_PEER_DEVICE_NONE &&
20731bb76ff1Sjsg 			    drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
20741bb76ff1Sjsg 				port->pdt = new_pdt;
20751bb76ff1Sjsg 				port->mcs = new_mcs;
20761bb76ff1Sjsg 				return 0;
20771bb76ff1Sjsg 			}
20781bb76ff1Sjsg 
20791bb76ff1Sjsg 			/* remove i2c over sideband */
20801bb76ff1Sjsg 			drm_dp_mst_unregister_i2c_bus(port);
20811bb76ff1Sjsg 		} else {
20821bb76ff1Sjsg 			mutex_lock(&mgr->lock);
20831bb76ff1Sjsg 			drm_dp_mst_topology_put_mstb(port->mstb);
20841bb76ff1Sjsg 			port->mstb = NULL;
20851bb76ff1Sjsg 			mutex_unlock(&mgr->lock);
20861bb76ff1Sjsg 		}
20871bb76ff1Sjsg 	}
20881bb76ff1Sjsg 
20891bb76ff1Sjsg 	port->pdt = new_pdt;
20901bb76ff1Sjsg 	port->mcs = new_mcs;
20911bb76ff1Sjsg 
20921bb76ff1Sjsg 	if (port->pdt != DP_PEER_DEVICE_NONE) {
20931bb76ff1Sjsg 		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
20941bb76ff1Sjsg 			/* add i2c over sideband */
20951bb76ff1Sjsg 			ret = drm_dp_mst_register_i2c_bus(port);
20961bb76ff1Sjsg 		} else {
20971bb76ff1Sjsg 			lct = drm_dp_calculate_rad(port, rad);
20981bb76ff1Sjsg 			mstb = drm_dp_add_mst_branch_device(lct, rad);
20991bb76ff1Sjsg 			if (!mstb) {
21001bb76ff1Sjsg 				ret = -ENOMEM;
21011bb76ff1Sjsg 				drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
21021bb76ff1Sjsg 				goto out;
21031bb76ff1Sjsg 			}
21041bb76ff1Sjsg 
21051bb76ff1Sjsg 			mutex_lock(&mgr->lock);
21061bb76ff1Sjsg 			port->mstb = mstb;
21071bb76ff1Sjsg 			mstb->mgr = port->mgr;
21081bb76ff1Sjsg 			mstb->port_parent = port;
21091bb76ff1Sjsg 
21101bb76ff1Sjsg 			/*
21111bb76ff1Sjsg 			 * Make sure this port's memory allocation stays
21121bb76ff1Sjsg 			 * around until its child MSTB releases it
21131bb76ff1Sjsg 			 */
21141bb76ff1Sjsg 			drm_dp_mst_get_port_malloc(port);
21151bb76ff1Sjsg 			mutex_unlock(&mgr->lock);
21161bb76ff1Sjsg 
21171bb76ff1Sjsg 			/* And make sure we send a link address for this */
21181bb76ff1Sjsg 			ret = 1;
21191bb76ff1Sjsg 		}
21201bb76ff1Sjsg 	}
21211bb76ff1Sjsg 
21221bb76ff1Sjsg out:
21231bb76ff1Sjsg 	if (ret < 0)
21241bb76ff1Sjsg 		port->pdt = DP_PEER_DEVICE_NONE;
21251bb76ff1Sjsg 	return ret;
21261bb76ff1Sjsg }
21271bb76ff1Sjsg 
21281bb76ff1Sjsg /**
21291bb76ff1Sjsg  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
21301bb76ff1Sjsg  * @aux: Fake sideband AUX CH
21311bb76ff1Sjsg  * @offset: address of the (first) register to read
21321bb76ff1Sjsg  * @buffer: buffer to store the register values
21331bb76ff1Sjsg  * @size: number of bytes in @buffer
21341bb76ff1Sjsg  *
21351bb76ff1Sjsg  * Performs the same functionality for remote devices via
21361bb76ff1Sjsg  * sideband messaging as drm_dp_dpcd_read() does for local
21371bb76ff1Sjsg  * devices via actual AUX CH.
21381bb76ff1Sjsg  *
21391bb76ff1Sjsg  * Return: Number of bytes read, or negative error code on failure.
21401bb76ff1Sjsg  */
drm_dp_mst_dpcd_read(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)21411bb76ff1Sjsg ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
21421bb76ff1Sjsg 			     unsigned int offset, void *buffer, size_t size)
21431bb76ff1Sjsg {
21441bb76ff1Sjsg 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
21451bb76ff1Sjsg 						    aux);
21461bb76ff1Sjsg 
21471bb76ff1Sjsg 	return drm_dp_send_dpcd_read(port->mgr, port,
21481bb76ff1Sjsg 				     offset, size, buffer);
21491bb76ff1Sjsg }
21501bb76ff1Sjsg 
21511bb76ff1Sjsg /**
21521bb76ff1Sjsg  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
21531bb76ff1Sjsg  * @aux: Fake sideband AUX CH
21541bb76ff1Sjsg  * @offset: address of the (first) register to write
21551bb76ff1Sjsg  * @buffer: buffer containing the values to write
21561bb76ff1Sjsg  * @size: number of bytes in @buffer
21571bb76ff1Sjsg  *
21581bb76ff1Sjsg  * Performs the same functionality for remote devices via
21591bb76ff1Sjsg  * sideband messaging as drm_dp_dpcd_write() does for local
21601bb76ff1Sjsg  * devices via actual AUX CH.
21611bb76ff1Sjsg  *
21621bb76ff1Sjsg  * Return: number of bytes written on success, negative error code on failure.
21631bb76ff1Sjsg  */
drm_dp_mst_dpcd_write(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)21641bb76ff1Sjsg ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
21651bb76ff1Sjsg 			      unsigned int offset, void *buffer, size_t size)
21661bb76ff1Sjsg {
21671bb76ff1Sjsg 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
21681bb76ff1Sjsg 						    aux);
21691bb76ff1Sjsg 
21701bb76ff1Sjsg 	return drm_dp_send_dpcd_write(port->mgr, port,
21711bb76ff1Sjsg 				      offset, size, buffer);
21721bb76ff1Sjsg }
21731bb76ff1Sjsg 
drm_dp_check_mstb_guid(struct drm_dp_mst_branch * mstb,u8 * guid)21741bb76ff1Sjsg static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
21751bb76ff1Sjsg {
21761bb76ff1Sjsg 	int ret = 0;
21771bb76ff1Sjsg 
21781bb76ff1Sjsg 	memcpy(mstb->guid, guid, 16);
21791bb76ff1Sjsg 
21801bb76ff1Sjsg 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
21811bb76ff1Sjsg 		if (mstb->port_parent) {
21821bb76ff1Sjsg 			ret = drm_dp_send_dpcd_write(mstb->mgr,
21831bb76ff1Sjsg 						     mstb->port_parent,
21841bb76ff1Sjsg 						     DP_GUID, 16, mstb->guid);
21851bb76ff1Sjsg 		} else {
21861bb76ff1Sjsg 			ret = drm_dp_dpcd_write(mstb->mgr->aux,
21871bb76ff1Sjsg 						DP_GUID, mstb->guid, 16);
21881bb76ff1Sjsg 		}
21891bb76ff1Sjsg 	}
21901bb76ff1Sjsg 
21911bb76ff1Sjsg 	if (ret < 16 && ret > 0)
21921bb76ff1Sjsg 		return -EPROTO;
21931bb76ff1Sjsg 
21941bb76ff1Sjsg 	return ret == 16 ? 0 : ret;
21951bb76ff1Sjsg }
21961bb76ff1Sjsg 
build_mst_prop_path(const struct drm_dp_mst_branch * mstb,int pnum,char * proppath,size_t proppath_size)21971bb76ff1Sjsg static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
21981bb76ff1Sjsg 				int pnum,
21991bb76ff1Sjsg 				char *proppath,
22001bb76ff1Sjsg 				size_t proppath_size)
22011bb76ff1Sjsg {
22021bb76ff1Sjsg 	int i;
22031bb76ff1Sjsg 	char temp[8];
22041bb76ff1Sjsg 
22051bb76ff1Sjsg 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
22061bb76ff1Sjsg 	for (i = 0; i < (mstb->lct - 1); i++) {
22071bb76ff1Sjsg 		int shift = (i % 2) ? 0 : 4;
22081bb76ff1Sjsg 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
22091bb76ff1Sjsg 
22101bb76ff1Sjsg 		snprintf(temp, sizeof(temp), "-%d", port_num);
22111bb76ff1Sjsg 		strlcat(proppath, temp, proppath_size);
22121bb76ff1Sjsg 	}
22131bb76ff1Sjsg 	snprintf(temp, sizeof(temp), "-%d", pnum);
22141bb76ff1Sjsg 	strlcat(proppath, temp, proppath_size);
22151bb76ff1Sjsg }
22161bb76ff1Sjsg 
22171bb76ff1Sjsg /**
22181bb76ff1Sjsg  * drm_dp_mst_connector_late_register() - Late MST connector registration
22191bb76ff1Sjsg  * @connector: The MST connector
22201bb76ff1Sjsg  * @port: The MST port for this connector
22211bb76ff1Sjsg  *
22221bb76ff1Sjsg  * Helper to register the remote aux device for this MST port. Drivers should
22231bb76ff1Sjsg  * call this from their mst connector's late_register hook to enable MST aux
22241bb76ff1Sjsg  * devices.
22251bb76ff1Sjsg  *
22261bb76ff1Sjsg  * Return: 0 on success, negative error code on failure.
22271bb76ff1Sjsg  */
drm_dp_mst_connector_late_register(struct drm_connector * connector,struct drm_dp_mst_port * port)22281bb76ff1Sjsg int drm_dp_mst_connector_late_register(struct drm_connector *connector,
22291bb76ff1Sjsg 				       struct drm_dp_mst_port *port)
22301bb76ff1Sjsg {
22311bb76ff1Sjsg #ifdef __linux__
22321bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
22331bb76ff1Sjsg 		    port->aux.name, connector->kdev->kobj.name);
22341bb76ff1Sjsg #else
22351bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "registering %s remote bus\n",
22361bb76ff1Sjsg 		    port->aux.name);
22371bb76ff1Sjsg #endif
22381bb76ff1Sjsg 
22391bb76ff1Sjsg 	port->aux.dev = connector->kdev;
22401bb76ff1Sjsg 	return drm_dp_aux_register_devnode(&port->aux);
22411bb76ff1Sjsg }
22421bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
22431bb76ff1Sjsg 
22441bb76ff1Sjsg /**
22451bb76ff1Sjsg  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
22461bb76ff1Sjsg  * @connector: The MST connector
22471bb76ff1Sjsg  * @port: The MST port for this connector
22481bb76ff1Sjsg  *
22491bb76ff1Sjsg  * Helper to unregister the remote aux device for this MST port, registered by
22501bb76ff1Sjsg  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
22511bb76ff1Sjsg  * connector's early_unregister hook.
22521bb76ff1Sjsg  */
drm_dp_mst_connector_early_unregister(struct drm_connector * connector,struct drm_dp_mst_port * port)22531bb76ff1Sjsg void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
22541bb76ff1Sjsg 					   struct drm_dp_mst_port *port)
22551bb76ff1Sjsg {
22561bb76ff1Sjsg #ifdef __linux__
22571bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
22581bb76ff1Sjsg 		    port->aux.name, connector->kdev->kobj.name);
22591bb76ff1Sjsg #else
22601bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus\n",
22611bb76ff1Sjsg 		    port->aux.name);
22621bb76ff1Sjsg #endif
22631bb76ff1Sjsg 	drm_dp_aux_unregister_devnode(&port->aux);
22641bb76ff1Sjsg }
22651bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
22661bb76ff1Sjsg 
22671bb76ff1Sjsg static void
drm_dp_mst_port_add_connector(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)22681bb76ff1Sjsg drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
22691bb76ff1Sjsg 			      struct drm_dp_mst_port *port)
22701bb76ff1Sjsg {
22711bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
22721bb76ff1Sjsg 	char proppath[255];
22731bb76ff1Sjsg 	int ret;
22741bb76ff1Sjsg 
22751bb76ff1Sjsg 	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
22761bb76ff1Sjsg 	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
22771bb76ff1Sjsg 	if (!port->connector) {
22781bb76ff1Sjsg 		ret = -ENOMEM;
22791bb76ff1Sjsg 		goto error;
22801bb76ff1Sjsg 	}
22811bb76ff1Sjsg 
22821bb76ff1Sjsg 	if (port->pdt != DP_PEER_DEVICE_NONE &&
22831bb76ff1Sjsg 	    drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
22841bb76ff1Sjsg 	    port->port_num >= DP_MST_LOGICAL_PORT_0)
2285f005ef32Sjsg 		port->cached_edid = drm_edid_read_ddc(port->connector,
22861bb76ff1Sjsg 						      &port->aux.ddc);
22871bb76ff1Sjsg 
22881bb76ff1Sjsg 	drm_connector_register(port->connector);
22891bb76ff1Sjsg 	return;
22901bb76ff1Sjsg 
22911bb76ff1Sjsg error:
22921bb76ff1Sjsg 	drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
22931bb76ff1Sjsg }
22941bb76ff1Sjsg 
22951bb76ff1Sjsg /*
22961bb76ff1Sjsg  * Drop a topology reference, and unlink the port from the in-memory topology
22971bb76ff1Sjsg  * layout
22981bb76ff1Sjsg  */
22991bb76ff1Sjsg static void
drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)23001bb76ff1Sjsg drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
23011bb76ff1Sjsg 				struct drm_dp_mst_port *port)
23021bb76ff1Sjsg {
23031bb76ff1Sjsg 	mutex_lock(&mgr->lock);
23041bb76ff1Sjsg 	port->parent->num_ports--;
23051bb76ff1Sjsg 	list_del(&port->next);
23061bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
23071bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
23081bb76ff1Sjsg }
23091bb76ff1Sjsg 
23101bb76ff1Sjsg static struct drm_dp_mst_port *
drm_dp_mst_add_port(struct drm_device * dev,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,u8 port_number)23111bb76ff1Sjsg drm_dp_mst_add_port(struct drm_device *dev,
23121bb76ff1Sjsg 		    struct drm_dp_mst_topology_mgr *mgr,
23131bb76ff1Sjsg 		    struct drm_dp_mst_branch *mstb, u8 port_number)
23141bb76ff1Sjsg {
23151bb76ff1Sjsg 	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
23161bb76ff1Sjsg 
23171bb76ff1Sjsg 	if (!port)
23181bb76ff1Sjsg 		return NULL;
23191bb76ff1Sjsg 
23201bb76ff1Sjsg 	kref_init(&port->topology_kref);
23211bb76ff1Sjsg 	kref_init(&port->malloc_kref);
23221bb76ff1Sjsg 	port->parent = mstb;
23231bb76ff1Sjsg 	port->port_num = port_number;
23241bb76ff1Sjsg 	port->mgr = mgr;
23251bb76ff1Sjsg 	port->aux.name = "DPMST";
23261bb76ff1Sjsg 	port->aux.dev = dev->dev;
23271bb76ff1Sjsg 	port->aux.is_remote = true;
23281bb76ff1Sjsg 
23291bb76ff1Sjsg 	/* initialize the MST downstream port's AUX crc work queue */
23301bb76ff1Sjsg 	port->aux.drm_dev = dev;
23311bb76ff1Sjsg 	drm_dp_remote_aux_init(&port->aux);
23321bb76ff1Sjsg 
23331bb76ff1Sjsg 	/*
23341bb76ff1Sjsg 	 * Make sure the memory allocation for our parent branch stays
23351bb76ff1Sjsg 	 * around until our own memory allocation is released
23361bb76ff1Sjsg 	 */
23371bb76ff1Sjsg 	drm_dp_mst_get_mstb_malloc(mstb);
23381bb76ff1Sjsg 
23391bb76ff1Sjsg 	return port;
23401bb76ff1Sjsg }
23411bb76ff1Sjsg 
23421bb76ff1Sjsg static int
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch * mstb,struct drm_device * dev,struct drm_dp_link_addr_reply_port * port_msg)23431bb76ff1Sjsg drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
23441bb76ff1Sjsg 				    struct drm_device *dev,
23451bb76ff1Sjsg 				    struct drm_dp_link_addr_reply_port *port_msg)
23461bb76ff1Sjsg {
23471bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
23481bb76ff1Sjsg 	struct drm_dp_mst_port *port;
23491bb76ff1Sjsg 	int old_ddps = 0, ret;
23501bb76ff1Sjsg 	u8 new_pdt = DP_PEER_DEVICE_NONE;
23511bb76ff1Sjsg 	bool new_mcs = 0;
23521bb76ff1Sjsg 	bool created = false, send_link_addr = false, changed = false;
23531bb76ff1Sjsg 
23541bb76ff1Sjsg 	port = drm_dp_get_port(mstb, port_msg->port_number);
23551bb76ff1Sjsg 	if (!port) {
23561bb76ff1Sjsg 		port = drm_dp_mst_add_port(dev, mgr, mstb,
23571bb76ff1Sjsg 					   port_msg->port_number);
23581bb76ff1Sjsg 		if (!port)
23591bb76ff1Sjsg 			return -ENOMEM;
23601bb76ff1Sjsg 		created = true;
23611bb76ff1Sjsg 		changed = true;
23621bb76ff1Sjsg 	} else if (!port->input && port_msg->input_port && port->connector) {
23631bb76ff1Sjsg 		/* Since port->connector can't be changed here, we create a
23641bb76ff1Sjsg 		 * new port if input_port changes from 0 to 1
23651bb76ff1Sjsg 		 */
23661bb76ff1Sjsg 		drm_dp_mst_topology_unlink_port(mgr, port);
23671bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
23681bb76ff1Sjsg 		port = drm_dp_mst_add_port(dev, mgr, mstb,
23691bb76ff1Sjsg 					   port_msg->port_number);
23701bb76ff1Sjsg 		if (!port)
23711bb76ff1Sjsg 			return -ENOMEM;
23721bb76ff1Sjsg 		changed = true;
23731bb76ff1Sjsg 		created = true;
23741bb76ff1Sjsg 	} else if (port->input && !port_msg->input_port) {
23751bb76ff1Sjsg 		changed = true;
23761bb76ff1Sjsg 	} else if (port->connector) {
23771bb76ff1Sjsg 		/* We're updating a port that's exposed to userspace, so do it
23781bb76ff1Sjsg 		 * under lock
23791bb76ff1Sjsg 		 */
23801bb76ff1Sjsg 		drm_modeset_lock(&mgr->base.lock, NULL);
23811bb76ff1Sjsg 
23821bb76ff1Sjsg 		old_ddps = port->ddps;
23831bb76ff1Sjsg 		changed = port->ddps != port_msg->ddps ||
23841bb76ff1Sjsg 			(port->ddps &&
23851bb76ff1Sjsg 			 (port->ldps != port_msg->legacy_device_plug_status ||
23861bb76ff1Sjsg 			  port->dpcd_rev != port_msg->dpcd_revision ||
23871bb76ff1Sjsg 			  port->mcs != port_msg->mcs ||
23881bb76ff1Sjsg 			  port->pdt != port_msg->peer_device_type ||
23891bb76ff1Sjsg 			  port->num_sdp_stream_sinks !=
23901bb76ff1Sjsg 			  port_msg->num_sdp_stream_sinks));
23911bb76ff1Sjsg 	}
23921bb76ff1Sjsg 
23931bb76ff1Sjsg 	port->input = port_msg->input_port;
23941bb76ff1Sjsg 	if (!port->input)
23951bb76ff1Sjsg 		new_pdt = port_msg->peer_device_type;
23961bb76ff1Sjsg 	new_mcs = port_msg->mcs;
23971bb76ff1Sjsg 	port->ddps = port_msg->ddps;
23981bb76ff1Sjsg 	port->ldps = port_msg->legacy_device_plug_status;
23991bb76ff1Sjsg 	port->dpcd_rev = port_msg->dpcd_revision;
24001bb76ff1Sjsg 	port->num_sdp_streams = port_msg->num_sdp_streams;
24011bb76ff1Sjsg 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
24021bb76ff1Sjsg 
24031bb76ff1Sjsg 	/* manage mstb port lists with mgr lock - take a reference
24041bb76ff1Sjsg 	   for this list */
24051bb76ff1Sjsg 	if (created) {
24061bb76ff1Sjsg 		mutex_lock(&mgr->lock);
24071bb76ff1Sjsg 		drm_dp_mst_topology_get_port(port);
24081bb76ff1Sjsg 		list_add(&port->next, &mstb->ports);
24091bb76ff1Sjsg 		mstb->num_ports++;
24101bb76ff1Sjsg 		mutex_unlock(&mgr->lock);
24111bb76ff1Sjsg 	}
24121bb76ff1Sjsg 
24131bb76ff1Sjsg 	/*
24141bb76ff1Sjsg 	 * Reprobe PBN caps on both hotplug, and when re-probing the link
24151bb76ff1Sjsg 	 * for our parent mstb
24161bb76ff1Sjsg 	 */
24171bb76ff1Sjsg 	if (old_ddps != port->ddps || !created) {
24181bb76ff1Sjsg 		if (port->ddps && !port->input) {
24191bb76ff1Sjsg 			ret = drm_dp_send_enum_path_resources(mgr, mstb,
24201bb76ff1Sjsg 							      port);
24211bb76ff1Sjsg 			if (ret == 1)
24221bb76ff1Sjsg 				changed = true;
24231bb76ff1Sjsg 		} else {
24241bb76ff1Sjsg 			port->full_pbn = 0;
24251bb76ff1Sjsg 		}
24261bb76ff1Sjsg 	}
24271bb76ff1Sjsg 
24281bb76ff1Sjsg 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
24291bb76ff1Sjsg 	if (ret == 1) {
24301bb76ff1Sjsg 		send_link_addr = true;
24311bb76ff1Sjsg 	} else if (ret < 0) {
24321bb76ff1Sjsg 		drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
24331bb76ff1Sjsg 		goto fail;
24341bb76ff1Sjsg 	}
24351bb76ff1Sjsg 
24361bb76ff1Sjsg 	/*
24371bb76ff1Sjsg 	 * If this port wasn't just created, then we're reprobing because
24381bb76ff1Sjsg 	 * we're coming out of suspend. In this case, always resend the link
24391bb76ff1Sjsg 	 * address if there's an MSTB on this port
24401bb76ff1Sjsg 	 */
24411bb76ff1Sjsg 	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
24421bb76ff1Sjsg 	    port->mcs)
24431bb76ff1Sjsg 		send_link_addr = true;
24441bb76ff1Sjsg 
24451bb76ff1Sjsg 	if (port->connector)
24461bb76ff1Sjsg 		drm_modeset_unlock(&mgr->base.lock);
24471bb76ff1Sjsg 	else if (!port->input)
24481bb76ff1Sjsg 		drm_dp_mst_port_add_connector(mstb, port);
24491bb76ff1Sjsg 
24501bb76ff1Sjsg 	if (send_link_addr && port->mstb) {
24511bb76ff1Sjsg 		ret = drm_dp_send_link_address(mgr, port->mstb);
24521bb76ff1Sjsg 		if (ret == 1) /* MSTB below us changed */
24531bb76ff1Sjsg 			changed = true;
24541bb76ff1Sjsg 		else if (ret < 0)
24551bb76ff1Sjsg 			goto fail_put;
24561bb76ff1Sjsg 	}
24571bb76ff1Sjsg 
24581bb76ff1Sjsg 	/* put reference to this port */
24591bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
24601bb76ff1Sjsg 	return changed;
24611bb76ff1Sjsg 
24621bb76ff1Sjsg fail:
24631bb76ff1Sjsg 	drm_dp_mst_topology_unlink_port(mgr, port);
24641bb76ff1Sjsg 	if (port->connector)
24651bb76ff1Sjsg 		drm_modeset_unlock(&mgr->base.lock);
24661bb76ff1Sjsg fail_put:
24671bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
24681bb76ff1Sjsg 	return ret;
24691bb76ff1Sjsg }
24701bb76ff1Sjsg 
24711bb76ff1Sjsg static int
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch * mstb,struct drm_dp_connection_status_notify * conn_stat)24721bb76ff1Sjsg drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
24731bb76ff1Sjsg 			    struct drm_dp_connection_status_notify *conn_stat)
24741bb76ff1Sjsg {
24751bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
24761bb76ff1Sjsg 	struct drm_dp_mst_port *port;
24771bb76ff1Sjsg 	int old_ddps, ret;
24781bb76ff1Sjsg 	u8 new_pdt;
24791bb76ff1Sjsg 	bool new_mcs;
24801bb76ff1Sjsg 	bool dowork = false, create_connector = false;
24811bb76ff1Sjsg 
24821bb76ff1Sjsg 	port = drm_dp_get_port(mstb, conn_stat->port_number);
24831bb76ff1Sjsg 	if (!port)
24841bb76ff1Sjsg 		return 0;
24851bb76ff1Sjsg 
24861bb76ff1Sjsg 	if (port->connector) {
24871bb76ff1Sjsg 		if (!port->input && conn_stat->input_port) {
24881bb76ff1Sjsg 			/*
24891bb76ff1Sjsg 			 * We can't remove a connector from an already exposed
24901bb76ff1Sjsg 			 * port, so just throw the port out and make sure we
24911bb76ff1Sjsg 			 * reprobe the link address of it's parent MSTB
24921bb76ff1Sjsg 			 */
24931bb76ff1Sjsg 			drm_dp_mst_topology_unlink_port(mgr, port);
24941bb76ff1Sjsg 			mstb->link_address_sent = false;
24951bb76ff1Sjsg 			dowork = true;
24961bb76ff1Sjsg 			goto out;
24971bb76ff1Sjsg 		}
24981bb76ff1Sjsg 
24991bb76ff1Sjsg 		/* Locking is only needed if the port's exposed to userspace */
25001bb76ff1Sjsg 		drm_modeset_lock(&mgr->base.lock, NULL);
25011bb76ff1Sjsg 	} else if (port->input && !conn_stat->input_port) {
25021bb76ff1Sjsg 		create_connector = true;
25031bb76ff1Sjsg 		/* Reprobe link address so we get num_sdp_streams */
25041bb76ff1Sjsg 		mstb->link_address_sent = false;
25051bb76ff1Sjsg 		dowork = true;
25061bb76ff1Sjsg 	}
25071bb76ff1Sjsg 
25081bb76ff1Sjsg 	old_ddps = port->ddps;
25091bb76ff1Sjsg 	port->input = conn_stat->input_port;
25101bb76ff1Sjsg 	port->ldps = conn_stat->legacy_device_plug_status;
25111bb76ff1Sjsg 	port->ddps = conn_stat->displayport_device_plug_status;
25121bb76ff1Sjsg 
25131bb76ff1Sjsg 	if (old_ddps != port->ddps) {
25141bb76ff1Sjsg 		if (port->ddps && !port->input)
25151bb76ff1Sjsg 			drm_dp_send_enum_path_resources(mgr, mstb, port);
25161bb76ff1Sjsg 		else
25171bb76ff1Sjsg 			port->full_pbn = 0;
25181bb76ff1Sjsg 	}
25191bb76ff1Sjsg 
25201bb76ff1Sjsg 	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
25211bb76ff1Sjsg 	new_mcs = conn_stat->message_capability_status;
25221bb76ff1Sjsg 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
25231bb76ff1Sjsg 	if (ret == 1) {
25241bb76ff1Sjsg 		dowork = true;
25251bb76ff1Sjsg 	} else if (ret < 0) {
25261bb76ff1Sjsg 		drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
25271bb76ff1Sjsg 		dowork = false;
25281bb76ff1Sjsg 	}
25291bb76ff1Sjsg 
25301bb76ff1Sjsg 	if (port->connector)
25311bb76ff1Sjsg 		drm_modeset_unlock(&mgr->base.lock);
25321bb76ff1Sjsg 	else if (create_connector)
25331bb76ff1Sjsg 		drm_dp_mst_port_add_connector(mstb, port);
25341bb76ff1Sjsg 
25351bb76ff1Sjsg out:
25361bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
25371bb76ff1Sjsg 	return dowork;
25381bb76ff1Sjsg }
25391bb76ff1Sjsg 
drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr * mgr,u8 lct,u8 * rad)25401bb76ff1Sjsg static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
25411bb76ff1Sjsg 							       u8 lct, u8 *rad)
25421bb76ff1Sjsg {
25431bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
25441bb76ff1Sjsg 	struct drm_dp_mst_port *port;
25451bb76ff1Sjsg 	int i, ret;
25461bb76ff1Sjsg 	/* find the port by iterating down */
25471bb76ff1Sjsg 
25481bb76ff1Sjsg 	mutex_lock(&mgr->lock);
25491bb76ff1Sjsg 	mstb = mgr->mst_primary;
25501bb76ff1Sjsg 
25511bb76ff1Sjsg 	if (!mstb)
25521bb76ff1Sjsg 		goto out;
25531bb76ff1Sjsg 
25541bb76ff1Sjsg 	for (i = 0; i < lct - 1; i++) {
25551bb76ff1Sjsg 		int shift = (i % 2) ? 0 : 4;
25561bb76ff1Sjsg 		int port_num = (rad[i / 2] >> shift) & 0xf;
25571bb76ff1Sjsg 
25581bb76ff1Sjsg 		list_for_each_entry(port, &mstb->ports, next) {
25591bb76ff1Sjsg 			if (port->port_num == port_num) {
25601bb76ff1Sjsg 				mstb = port->mstb;
25611bb76ff1Sjsg 				if (!mstb) {
25621bb76ff1Sjsg 					drm_err(mgr->dev,
25631bb76ff1Sjsg 						"failed to lookup MSTB with lct %d, rad %02x\n",
25641bb76ff1Sjsg 						lct, rad[0]);
25651bb76ff1Sjsg 					goto out;
25661bb76ff1Sjsg 				}
25671bb76ff1Sjsg 
25681bb76ff1Sjsg 				break;
25691bb76ff1Sjsg 			}
25701bb76ff1Sjsg 		}
25711bb76ff1Sjsg 	}
25721bb76ff1Sjsg 	ret = drm_dp_mst_topology_try_get_mstb(mstb);
25731bb76ff1Sjsg 	if (!ret)
25741bb76ff1Sjsg 		mstb = NULL;
25751bb76ff1Sjsg out:
25761bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
25771bb76ff1Sjsg 	return mstb;
25781bb76ff1Sjsg }
25791bb76ff1Sjsg 
get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch * mstb,const uint8_t * guid)25801bb76ff1Sjsg static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
25811bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb,
25821bb76ff1Sjsg 	const uint8_t *guid)
25831bb76ff1Sjsg {
25841bb76ff1Sjsg 	struct drm_dp_mst_branch *found_mstb;
25851bb76ff1Sjsg 	struct drm_dp_mst_port *port;
25861bb76ff1Sjsg 
25872fb2b219Sjsg 	if (!mstb)
25882fb2b219Sjsg 		return NULL;
25892fb2b219Sjsg 
25901bb76ff1Sjsg 	if (memcmp(mstb->guid, guid, 16) == 0)
25911bb76ff1Sjsg 		return mstb;
25921bb76ff1Sjsg 
25931bb76ff1Sjsg 
25941bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
25951bb76ff1Sjsg 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
25961bb76ff1Sjsg 
25971bb76ff1Sjsg 		if (found_mstb)
25981bb76ff1Sjsg 			return found_mstb;
25991bb76ff1Sjsg 	}
26001bb76ff1Sjsg 
26011bb76ff1Sjsg 	return NULL;
26021bb76ff1Sjsg }
26031bb76ff1Sjsg 
26041bb76ff1Sjsg static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr * mgr,const uint8_t * guid)26051bb76ff1Sjsg drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
26061bb76ff1Sjsg 				     const uint8_t *guid)
26071bb76ff1Sjsg {
26081bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
26091bb76ff1Sjsg 	int ret;
26101bb76ff1Sjsg 
26111bb76ff1Sjsg 	/* find the port by iterating down */
26121bb76ff1Sjsg 	mutex_lock(&mgr->lock);
26131bb76ff1Sjsg 
26141bb76ff1Sjsg 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
26151bb76ff1Sjsg 	if (mstb) {
26161bb76ff1Sjsg 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
26171bb76ff1Sjsg 		if (!ret)
26181bb76ff1Sjsg 			mstb = NULL;
26191bb76ff1Sjsg 	}
26201bb76ff1Sjsg 
26211bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
26221bb76ff1Sjsg 	return mstb;
26231bb76ff1Sjsg }
26241bb76ff1Sjsg 
drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)26251bb76ff1Sjsg static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
26261bb76ff1Sjsg 					       struct drm_dp_mst_branch *mstb)
26271bb76ff1Sjsg {
26281bb76ff1Sjsg 	struct drm_dp_mst_port *port;
26291bb76ff1Sjsg 	int ret;
26301bb76ff1Sjsg 	bool changed = false;
26311bb76ff1Sjsg 
26321bb76ff1Sjsg 	if (!mstb->link_address_sent) {
26331bb76ff1Sjsg 		ret = drm_dp_send_link_address(mgr, mstb);
26341bb76ff1Sjsg 		if (ret == 1)
26351bb76ff1Sjsg 			changed = true;
26361bb76ff1Sjsg 		else if (ret < 0)
26371bb76ff1Sjsg 			return ret;
26381bb76ff1Sjsg 	}
26391bb76ff1Sjsg 
26401bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
26411bb76ff1Sjsg 		if (port->input || !port->ddps || !port->mstb)
26421bb76ff1Sjsg 			continue;
26431bb76ff1Sjsg 
26441bb76ff1Sjsg 		ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
26451bb76ff1Sjsg 		if (ret == 1)
26461bb76ff1Sjsg 			changed = true;
26471bb76ff1Sjsg 		else if (ret < 0)
26481bb76ff1Sjsg 			return ret;
26491bb76ff1Sjsg 	}
26501bb76ff1Sjsg 
26511bb76ff1Sjsg 	return changed;
26521bb76ff1Sjsg }
26531bb76ff1Sjsg 
drm_dp_mst_link_probe_work(struct work_struct * work)26541bb76ff1Sjsg static void drm_dp_mst_link_probe_work(struct work_struct *work)
26551bb76ff1Sjsg {
26561bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr =
26571bb76ff1Sjsg 		container_of(work, struct drm_dp_mst_topology_mgr, work);
26581bb76ff1Sjsg 	struct drm_device *dev = mgr->dev;
26591bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
26601bb76ff1Sjsg 	int ret;
26611bb76ff1Sjsg 	bool clear_payload_id_table;
26621bb76ff1Sjsg 
26631bb76ff1Sjsg 	mutex_lock(&mgr->probe_lock);
26641bb76ff1Sjsg 
26651bb76ff1Sjsg 	mutex_lock(&mgr->lock);
26661bb76ff1Sjsg 	clear_payload_id_table = !mgr->payload_id_table_cleared;
26671bb76ff1Sjsg 	mgr->payload_id_table_cleared = true;
26681bb76ff1Sjsg 
26691bb76ff1Sjsg 	mstb = mgr->mst_primary;
26701bb76ff1Sjsg 	if (mstb) {
26711bb76ff1Sjsg 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
26721bb76ff1Sjsg 		if (!ret)
26731bb76ff1Sjsg 			mstb = NULL;
26741bb76ff1Sjsg 	}
26751bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
26761bb76ff1Sjsg 	if (!mstb) {
26771bb76ff1Sjsg 		mutex_unlock(&mgr->probe_lock);
26781bb76ff1Sjsg 		return;
26791bb76ff1Sjsg 	}
26801bb76ff1Sjsg 
26811bb76ff1Sjsg 	/*
26821bb76ff1Sjsg 	 * Certain branch devices seem to incorrectly report an available_pbn
26831bb76ff1Sjsg 	 * of 0 on downstream sinks, even after clearing the
26841bb76ff1Sjsg 	 * DP_PAYLOAD_ALLOCATE_* registers in
26851bb76ff1Sjsg 	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
26861bb76ff1Sjsg 	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
26871bb76ff1Sjsg 	 * things work again.
26881bb76ff1Sjsg 	 */
26891bb76ff1Sjsg 	if (clear_payload_id_table) {
26901bb76ff1Sjsg 		drm_dbg_kms(dev, "Clearing payload ID table\n");
26911bb76ff1Sjsg 		drm_dp_send_clear_payload_id_table(mgr, mstb);
26921bb76ff1Sjsg 	}
26931bb76ff1Sjsg 
26941bb76ff1Sjsg 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
26951bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
26961bb76ff1Sjsg 
26971bb76ff1Sjsg 	mutex_unlock(&mgr->probe_lock);
26981bb76ff1Sjsg 	if (ret > 0)
26991bb76ff1Sjsg 		drm_kms_helper_hotplug_event(dev);
27001bb76ff1Sjsg }
27011bb76ff1Sjsg 
drm_dp_validate_guid(struct drm_dp_mst_topology_mgr * mgr,u8 * guid)27021bb76ff1Sjsg static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
27031bb76ff1Sjsg 				 u8 *guid)
27041bb76ff1Sjsg {
27051bb76ff1Sjsg 	u64 salt;
27061bb76ff1Sjsg 
27071bb76ff1Sjsg 	if (memchr_inv(guid, 0, 16))
27081bb76ff1Sjsg 		return true;
27091bb76ff1Sjsg 
27101bb76ff1Sjsg 	salt = get_jiffies_64();
27111bb76ff1Sjsg 
27121bb76ff1Sjsg 	memcpy(&guid[0], &salt, sizeof(u64));
27131bb76ff1Sjsg 	memcpy(&guid[8], &salt, sizeof(u64));
27141bb76ff1Sjsg 
27151bb76ff1Sjsg 	return false;
27161bb76ff1Sjsg }
27171bb76ff1Sjsg 
build_dpcd_read(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes)27181bb76ff1Sjsg static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
27191bb76ff1Sjsg 			    u8 port_num, u32 offset, u8 num_bytes)
27201bb76ff1Sjsg {
27211bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
27221bb76ff1Sjsg 
27231bb76ff1Sjsg 	req.req_type = DP_REMOTE_DPCD_READ;
27241bb76ff1Sjsg 	req.u.dpcd_read.port_number = port_num;
27251bb76ff1Sjsg 	req.u.dpcd_read.dpcd_address = offset;
27261bb76ff1Sjsg 	req.u.dpcd_read.num_bytes = num_bytes;
27271bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
27281bb76ff1Sjsg }
27291bb76ff1Sjsg 
drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr * mgr,bool up,u8 * msg,int len)27301bb76ff1Sjsg static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
27311bb76ff1Sjsg 				    bool up, u8 *msg, int len)
27321bb76ff1Sjsg {
27331bb76ff1Sjsg 	int ret;
27341bb76ff1Sjsg 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
27351bb76ff1Sjsg 	int tosend, total, offset;
27361bb76ff1Sjsg 	int retries = 0;
27371bb76ff1Sjsg 
27381bb76ff1Sjsg retry:
27391bb76ff1Sjsg 	total = len;
27401bb76ff1Sjsg 	offset = 0;
27411bb76ff1Sjsg 	do {
27421bb76ff1Sjsg 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
27431bb76ff1Sjsg 
27441bb76ff1Sjsg 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
27451bb76ff1Sjsg 					&msg[offset],
27461bb76ff1Sjsg 					tosend);
27471bb76ff1Sjsg 		if (ret != tosend) {
27481bb76ff1Sjsg 			if (ret == -EIO && retries < 5) {
27491bb76ff1Sjsg 				retries++;
27501bb76ff1Sjsg 				goto retry;
27511bb76ff1Sjsg 			}
27521bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
27531bb76ff1Sjsg 
27541bb76ff1Sjsg 			return -EIO;
27551bb76ff1Sjsg 		}
27561bb76ff1Sjsg 		offset += tosend;
27571bb76ff1Sjsg 		total -= tosend;
27581bb76ff1Sjsg 	} while (total > 0);
27591bb76ff1Sjsg 	return 0;
27601bb76ff1Sjsg }
27611bb76ff1Sjsg 
set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr * hdr,struct drm_dp_sideband_msg_tx * txmsg)27621bb76ff1Sjsg static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
27631bb76ff1Sjsg 				  struct drm_dp_sideband_msg_tx *txmsg)
27641bb76ff1Sjsg {
27651bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = txmsg->dst;
27661bb76ff1Sjsg 	u8 req_type;
27671bb76ff1Sjsg 
27681bb76ff1Sjsg 	req_type = txmsg->msg[0] & 0x7f;
27691bb76ff1Sjsg 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
27701bb76ff1Sjsg 		req_type == DP_RESOURCE_STATUS_NOTIFY ||
27711bb76ff1Sjsg 		req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
27721bb76ff1Sjsg 		hdr->broadcast = 1;
27731bb76ff1Sjsg 	else
27741bb76ff1Sjsg 		hdr->broadcast = 0;
27751bb76ff1Sjsg 	hdr->path_msg = txmsg->path_msg;
27761bb76ff1Sjsg 	if (hdr->broadcast) {
27771bb76ff1Sjsg 		hdr->lct = 1;
27781bb76ff1Sjsg 		hdr->lcr = 6;
27791bb76ff1Sjsg 	} else {
27801bb76ff1Sjsg 		hdr->lct = mstb->lct;
27811bb76ff1Sjsg 		hdr->lcr = mstb->lct - 1;
27821bb76ff1Sjsg 	}
27831bb76ff1Sjsg 
27841bb76ff1Sjsg 	memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
27851bb76ff1Sjsg 
27861bb76ff1Sjsg 	return 0;
27871bb76ff1Sjsg }
27881bb76ff1Sjsg /*
27891bb76ff1Sjsg  * process a single block of the next message in the sideband queue
27901bb76ff1Sjsg  */
process_single_tx_qlock(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg,bool up)27911bb76ff1Sjsg static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
27921bb76ff1Sjsg 				   struct drm_dp_sideband_msg_tx *txmsg,
27931bb76ff1Sjsg 				   bool up)
27941bb76ff1Sjsg {
27951bb76ff1Sjsg 	u8 chunk[48];
27961bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr hdr;
27971bb76ff1Sjsg 	int len, space, idx, tosend;
27981bb76ff1Sjsg 	int ret;
27991bb76ff1Sjsg 
28001bb76ff1Sjsg 	if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
28011bb76ff1Sjsg 		return 0;
28021bb76ff1Sjsg 
28031bb76ff1Sjsg 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
28041bb76ff1Sjsg 
28051bb76ff1Sjsg 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
28061bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
28071bb76ff1Sjsg 
28081bb76ff1Sjsg 	/* make hdr from dst mst */
28091bb76ff1Sjsg 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
28101bb76ff1Sjsg 	if (ret < 0)
28111bb76ff1Sjsg 		return ret;
28121bb76ff1Sjsg 
28131bb76ff1Sjsg 	/* amount left to send in this message */
28141bb76ff1Sjsg 	len = txmsg->cur_len - txmsg->cur_offset;
28151bb76ff1Sjsg 
28161bb76ff1Sjsg 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
28171bb76ff1Sjsg 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
28181bb76ff1Sjsg 
28191bb76ff1Sjsg 	tosend = min(len, space);
28201bb76ff1Sjsg 	if (len == txmsg->cur_len)
28211bb76ff1Sjsg 		hdr.somt = 1;
28221bb76ff1Sjsg 	if (space >= len)
28231bb76ff1Sjsg 		hdr.eomt = 1;
28241bb76ff1Sjsg 
28251bb76ff1Sjsg 
28261bb76ff1Sjsg 	hdr.msg_len = tosend + 1;
28271bb76ff1Sjsg 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
28281bb76ff1Sjsg 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
28291bb76ff1Sjsg 	/* add crc at end */
28301bb76ff1Sjsg 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
28311bb76ff1Sjsg 	idx += tosend + 1;
28321bb76ff1Sjsg 
28331bb76ff1Sjsg 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
28341bb76ff1Sjsg 	if (ret) {
28351bb76ff1Sjsg 		if (drm_debug_enabled(DRM_UT_DP)) {
28361bb76ff1Sjsg 			struct drm_printer p = drm_debug_printer(DBG_PREFIX);
28371bb76ff1Sjsg 
28381bb76ff1Sjsg 			drm_printf(&p, "sideband msg failed to send\n");
28391bb76ff1Sjsg 			drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
28401bb76ff1Sjsg 		}
28411bb76ff1Sjsg 		return ret;
28421bb76ff1Sjsg 	}
28431bb76ff1Sjsg 
28441bb76ff1Sjsg 	txmsg->cur_offset += tosend;
28451bb76ff1Sjsg 	if (txmsg->cur_offset == txmsg->cur_len) {
28461bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
28471bb76ff1Sjsg 		return 1;
28481bb76ff1Sjsg 	}
28491bb76ff1Sjsg 	return 0;
28501bb76ff1Sjsg }
28511bb76ff1Sjsg 
process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr * mgr)28521bb76ff1Sjsg static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
28531bb76ff1Sjsg {
28541bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
28551bb76ff1Sjsg 	int ret;
28561bb76ff1Sjsg 
28571bb76ff1Sjsg 	WARN_ON(!mutex_is_locked(&mgr->qlock));
28581bb76ff1Sjsg 
28591bb76ff1Sjsg 	/* construct a chunk from the first msg in the tx_msg queue */
28601bb76ff1Sjsg 	if (list_empty(&mgr->tx_msg_downq))
28611bb76ff1Sjsg 		return;
28621bb76ff1Sjsg 
28631bb76ff1Sjsg 	txmsg = list_first_entry(&mgr->tx_msg_downq,
28641bb76ff1Sjsg 				 struct drm_dp_sideband_msg_tx, next);
28651bb76ff1Sjsg 	ret = process_single_tx_qlock(mgr, txmsg, false);
28661bb76ff1Sjsg 	if (ret < 0) {
28671bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
28681bb76ff1Sjsg 		list_del(&txmsg->next);
28691bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
28701bb76ff1Sjsg 		wake_up_all(&mgr->tx_waitq);
28711bb76ff1Sjsg 	}
28721bb76ff1Sjsg }
28731bb76ff1Sjsg 
drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)28741bb76ff1Sjsg static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
28751bb76ff1Sjsg 				 struct drm_dp_sideband_msg_tx *txmsg)
28761bb76ff1Sjsg {
28771bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
28781bb76ff1Sjsg 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
28791bb76ff1Sjsg 
28801bb76ff1Sjsg 	if (drm_debug_enabled(DRM_UT_DP)) {
28811bb76ff1Sjsg 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
28821bb76ff1Sjsg 
28831bb76ff1Sjsg 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
28841bb76ff1Sjsg 	}
28851bb76ff1Sjsg 
28861bb76ff1Sjsg 	if (list_is_singular(&mgr->tx_msg_downq))
28871bb76ff1Sjsg 		process_single_down_tx_qlock(mgr);
28881bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
28891bb76ff1Sjsg }
28901bb76ff1Sjsg 
28911bb76ff1Sjsg static void
drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_link_address_ack_reply * reply)28921bb76ff1Sjsg drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
28931bb76ff1Sjsg 			 struct drm_dp_link_address_ack_reply *reply)
28941bb76ff1Sjsg {
28951bb76ff1Sjsg 	struct drm_dp_link_addr_reply_port *port_reply;
28961bb76ff1Sjsg 	int i;
28971bb76ff1Sjsg 
28981bb76ff1Sjsg 	for (i = 0; i < reply->nports; i++) {
28991bb76ff1Sjsg 		port_reply = &reply->ports[i];
29001bb76ff1Sjsg 		drm_dbg_kms(mgr->dev,
29011bb76ff1Sjsg 			    "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
29021bb76ff1Sjsg 			    i,
29031bb76ff1Sjsg 			    port_reply->input_port,
29041bb76ff1Sjsg 			    port_reply->peer_device_type,
29051bb76ff1Sjsg 			    port_reply->port_number,
29061bb76ff1Sjsg 			    port_reply->dpcd_revision,
29071bb76ff1Sjsg 			    port_reply->mcs,
29081bb76ff1Sjsg 			    port_reply->ddps,
29091bb76ff1Sjsg 			    port_reply->legacy_device_plug_status,
29101bb76ff1Sjsg 			    port_reply->num_sdp_streams,
29111bb76ff1Sjsg 			    port_reply->num_sdp_stream_sinks);
29121bb76ff1Sjsg 	}
29131bb76ff1Sjsg }
29141bb76ff1Sjsg 
drm_dp_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)29151bb76ff1Sjsg static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
29161bb76ff1Sjsg 				     struct drm_dp_mst_branch *mstb)
29171bb76ff1Sjsg {
29181bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
29191bb76ff1Sjsg 	struct drm_dp_link_address_ack_reply *reply;
29201bb76ff1Sjsg 	struct drm_dp_mst_port *port, *tmp;
29211bb76ff1Sjsg 	int i, ret, port_mask = 0;
29221bb76ff1Sjsg 	bool changed = false;
29231bb76ff1Sjsg 
29241bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
29251bb76ff1Sjsg 	if (!txmsg)
29261bb76ff1Sjsg 		return -ENOMEM;
29271bb76ff1Sjsg 
29281bb76ff1Sjsg 	txmsg->dst = mstb;
29291bb76ff1Sjsg 	build_link_address(txmsg);
29301bb76ff1Sjsg 
29311bb76ff1Sjsg 	mstb->link_address_sent = true;
29321bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
29331bb76ff1Sjsg 
29341bb76ff1Sjsg 	/* FIXME: Actually do some real error handling here */
29351bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2936cb2f7175Sjsg 	if (ret < 0) {
29371bb76ff1Sjsg 		drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
29381bb76ff1Sjsg 		goto out;
29391bb76ff1Sjsg 	}
29401bb76ff1Sjsg 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
29411bb76ff1Sjsg 		drm_err(mgr->dev, "link address NAK received\n");
29421bb76ff1Sjsg 		ret = -EIO;
29431bb76ff1Sjsg 		goto out;
29441bb76ff1Sjsg 	}
29451bb76ff1Sjsg 
29461bb76ff1Sjsg 	reply = &txmsg->reply.u.link_addr;
29471bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
29481bb76ff1Sjsg 	drm_dp_dump_link_address(mgr, reply);
29491bb76ff1Sjsg 
29501bb76ff1Sjsg 	ret = drm_dp_check_mstb_guid(mstb, reply->guid);
29511bb76ff1Sjsg 	if (ret) {
29521bb76ff1Sjsg 		char buf[64];
29531bb76ff1Sjsg 
29541bb76ff1Sjsg 		drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
29551bb76ff1Sjsg 		drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
29561bb76ff1Sjsg 		goto out;
29571bb76ff1Sjsg 	}
29581bb76ff1Sjsg 
29591bb76ff1Sjsg 	for (i = 0; i < reply->nports; i++) {
29601bb76ff1Sjsg 		port_mask |= BIT(reply->ports[i].port_number);
29611bb76ff1Sjsg 		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
29621bb76ff1Sjsg 							  &reply->ports[i]);
29631bb76ff1Sjsg 		if (ret == 1)
29641bb76ff1Sjsg 			changed = true;
29651bb76ff1Sjsg 		else if (ret < 0)
29661bb76ff1Sjsg 			goto out;
29671bb76ff1Sjsg 	}
29681bb76ff1Sjsg 
29691bb76ff1Sjsg 	/* Prune any ports that are currently a part of mstb in our in-memory
29701bb76ff1Sjsg 	 * topology, but were not seen in this link address. Usually this
29711bb76ff1Sjsg 	 * means that they were removed while the topology was out of sync,
29721bb76ff1Sjsg 	 * e.g. during suspend/resume
29731bb76ff1Sjsg 	 */
29741bb76ff1Sjsg 	mutex_lock(&mgr->lock);
29751bb76ff1Sjsg 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
29761bb76ff1Sjsg 		if (port_mask & BIT(port->port_num))
29771bb76ff1Sjsg 			continue;
29781bb76ff1Sjsg 
29791bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
29801bb76ff1Sjsg 			    port->port_num);
29811bb76ff1Sjsg 		list_del(&port->next);
29821bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
29831bb76ff1Sjsg 		changed = true;
29841bb76ff1Sjsg 	}
29851bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
29861bb76ff1Sjsg 
29871bb76ff1Sjsg out:
2988cb2f7175Sjsg 	if (ret < 0)
29891bb76ff1Sjsg 		mstb->link_address_sent = false;
29901bb76ff1Sjsg 	kfree(txmsg);
29911bb76ff1Sjsg 	return ret < 0 ? ret : changed;
29921bb76ff1Sjsg }
29931bb76ff1Sjsg 
29941bb76ff1Sjsg static void
drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)29951bb76ff1Sjsg drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
29961bb76ff1Sjsg 				   struct drm_dp_mst_branch *mstb)
29971bb76ff1Sjsg {
29981bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
29991bb76ff1Sjsg 	int ret;
30001bb76ff1Sjsg 
30011bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
30021bb76ff1Sjsg 	if (!txmsg)
30031bb76ff1Sjsg 		return;
30041bb76ff1Sjsg 
30051bb76ff1Sjsg 	txmsg->dst = mstb;
30061bb76ff1Sjsg 	build_clear_payload_id_table(txmsg);
30071bb76ff1Sjsg 
30081bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
30091bb76ff1Sjsg 
30101bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
30111bb76ff1Sjsg 	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
30121bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
30131bb76ff1Sjsg 
30141bb76ff1Sjsg 	kfree(txmsg);
30151bb76ff1Sjsg }
30161bb76ff1Sjsg 
30171bb76ff1Sjsg static int
drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)30181bb76ff1Sjsg drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
30191bb76ff1Sjsg 				struct drm_dp_mst_branch *mstb,
30201bb76ff1Sjsg 				struct drm_dp_mst_port *port)
30211bb76ff1Sjsg {
30221bb76ff1Sjsg 	struct drm_dp_enum_path_resources_ack_reply *path_res;
30231bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
30241bb76ff1Sjsg 	int ret;
30251bb76ff1Sjsg 
30261bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
30271bb76ff1Sjsg 	if (!txmsg)
30281bb76ff1Sjsg 		return -ENOMEM;
30291bb76ff1Sjsg 
30301bb76ff1Sjsg 	txmsg->dst = mstb;
30311bb76ff1Sjsg 	build_enum_path_resources(txmsg, port->port_num);
30321bb76ff1Sjsg 
30331bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
30341bb76ff1Sjsg 
30351bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
30361bb76ff1Sjsg 	if (ret > 0) {
30371bb76ff1Sjsg 		ret = 0;
30381bb76ff1Sjsg 		path_res = &txmsg->reply.u.path_resources;
30391bb76ff1Sjsg 
30401bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
30411bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
30421bb76ff1Sjsg 		} else {
30431bb76ff1Sjsg 			if (port->port_num != path_res->port_number)
30441bb76ff1Sjsg 				DRM_ERROR("got incorrect port in response\n");
30451bb76ff1Sjsg 
30461bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
30471bb76ff1Sjsg 				    path_res->port_number,
30481bb76ff1Sjsg 				    path_res->full_payload_bw_number,
30491bb76ff1Sjsg 				    path_res->avail_payload_bw_number);
30501bb76ff1Sjsg 
30511bb76ff1Sjsg 			/*
30521bb76ff1Sjsg 			 * If something changed, make sure we send a
30531bb76ff1Sjsg 			 * hotplug
30541bb76ff1Sjsg 			 */
30551bb76ff1Sjsg 			if (port->full_pbn != path_res->full_payload_bw_number ||
30561bb76ff1Sjsg 			    port->fec_capable != path_res->fec_capable)
30571bb76ff1Sjsg 				ret = 1;
30581bb76ff1Sjsg 
30591bb76ff1Sjsg 			port->full_pbn = path_res->full_payload_bw_number;
30601bb76ff1Sjsg 			port->fec_capable = path_res->fec_capable;
30611bb76ff1Sjsg 		}
30621bb76ff1Sjsg 	}
30631bb76ff1Sjsg 
30641bb76ff1Sjsg 	kfree(txmsg);
30651bb76ff1Sjsg 	return ret;
30661bb76ff1Sjsg }
30671bb76ff1Sjsg 
drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch * mstb)30681bb76ff1Sjsg static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
30691bb76ff1Sjsg {
30701bb76ff1Sjsg 	if (!mstb->port_parent)
30711bb76ff1Sjsg 		return NULL;
30721bb76ff1Sjsg 
30731bb76ff1Sjsg 	if (mstb->port_parent->mstb != mstb)
30741bb76ff1Sjsg 		return mstb->port_parent;
30751bb76ff1Sjsg 
30761bb76ff1Sjsg 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
30771bb76ff1Sjsg }
30781bb76ff1Sjsg 
30791bb76ff1Sjsg /*
30801bb76ff1Sjsg  * Searches upwards in the topology starting from mstb to try to find the
30811bb76ff1Sjsg  * closest available parent of mstb that's still connected to the rest of the
30821bb76ff1Sjsg  * topology. This can be used in order to perform operations like releasing
30831bb76ff1Sjsg  * payloads, where the branch device which owned the payload may no longer be
30841bb76ff1Sjsg  * around and thus would require that the payload on the last living relative
30851bb76ff1Sjsg  * be freed instead.
30861bb76ff1Sjsg  */
30871bb76ff1Sjsg static struct drm_dp_mst_branch *
drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int * port_num)30881bb76ff1Sjsg drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
30891bb76ff1Sjsg 					struct drm_dp_mst_branch *mstb,
30901bb76ff1Sjsg 					int *port_num)
30911bb76ff1Sjsg {
30921bb76ff1Sjsg 	struct drm_dp_mst_branch *rmstb = NULL;
30931bb76ff1Sjsg 	struct drm_dp_mst_port *found_port;
30941bb76ff1Sjsg 
30951bb76ff1Sjsg 	mutex_lock(&mgr->lock);
30961bb76ff1Sjsg 	if (!mgr->mst_primary)
30971bb76ff1Sjsg 		goto out;
30981bb76ff1Sjsg 
30991bb76ff1Sjsg 	do {
31001bb76ff1Sjsg 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
31011bb76ff1Sjsg 		if (!found_port)
31021bb76ff1Sjsg 			break;
31031bb76ff1Sjsg 
31041bb76ff1Sjsg 		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
31051bb76ff1Sjsg 			rmstb = found_port->parent;
31061bb76ff1Sjsg 			*port_num = found_port->port_num;
31071bb76ff1Sjsg 		} else {
31081bb76ff1Sjsg 			/* Search again, starting from this parent */
31091bb76ff1Sjsg 			mstb = found_port->parent;
31101bb76ff1Sjsg 		}
31111bb76ff1Sjsg 	} while (!rmstb);
31121bb76ff1Sjsg out:
31131bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
31141bb76ff1Sjsg 	return rmstb;
31151bb76ff1Sjsg }
31161bb76ff1Sjsg 
drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,int pbn)31171bb76ff1Sjsg static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
31181bb76ff1Sjsg 				   struct drm_dp_mst_port *port,
31191bb76ff1Sjsg 				   int id,
31201bb76ff1Sjsg 				   int pbn)
31211bb76ff1Sjsg {
31221bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
31231bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
31241bb76ff1Sjsg 	int ret, port_num;
31251bb76ff1Sjsg 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
31261bb76ff1Sjsg 	int i;
31271bb76ff1Sjsg 
31281bb76ff1Sjsg 	port_num = port->port_num;
31291bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
31301bb76ff1Sjsg 	if (!mstb) {
31311bb76ff1Sjsg 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
31321bb76ff1Sjsg 							       port->parent,
31331bb76ff1Sjsg 							       &port_num);
31341bb76ff1Sjsg 
31351bb76ff1Sjsg 		if (!mstb)
31361bb76ff1Sjsg 			return -EINVAL;
31371bb76ff1Sjsg 	}
31381bb76ff1Sjsg 
31391bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
31401bb76ff1Sjsg 	if (!txmsg) {
31411bb76ff1Sjsg 		ret = -ENOMEM;
31421bb76ff1Sjsg 		goto fail_put;
31431bb76ff1Sjsg 	}
31441bb76ff1Sjsg 
31451bb76ff1Sjsg 	for (i = 0; i < port->num_sdp_streams; i++)
31461bb76ff1Sjsg 		sinks[i] = i;
31471bb76ff1Sjsg 
31481bb76ff1Sjsg 	txmsg->dst = mstb;
31491bb76ff1Sjsg 	build_allocate_payload(txmsg, port_num,
31501bb76ff1Sjsg 			       id,
31511bb76ff1Sjsg 			       pbn, port->num_sdp_streams, sinks);
31521bb76ff1Sjsg 
31531bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
31541bb76ff1Sjsg 
31551bb76ff1Sjsg 	/*
31561bb76ff1Sjsg 	 * FIXME: there is a small chance that between getting the last
31571bb76ff1Sjsg 	 * connected mstb and sending the payload message, the last connected
31581bb76ff1Sjsg 	 * mstb could also be removed from the topology. In the future, this
31591bb76ff1Sjsg 	 * needs to be fixed by restarting the
31601bb76ff1Sjsg 	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
31611bb76ff1Sjsg 	 * timeout if the topology is still connected to the system.
31621bb76ff1Sjsg 	 */
31631bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
31641bb76ff1Sjsg 	if (ret > 0) {
31651bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
31661bb76ff1Sjsg 			ret = -EINVAL;
31671bb76ff1Sjsg 		else
31681bb76ff1Sjsg 			ret = 0;
31691bb76ff1Sjsg 	}
31701bb76ff1Sjsg 	kfree(txmsg);
31711bb76ff1Sjsg fail_put:
31721bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
31731bb76ff1Sjsg 	return ret;
31741bb76ff1Sjsg }
31751bb76ff1Sjsg 
drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,bool power_up)31761bb76ff1Sjsg int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
31771bb76ff1Sjsg 				 struct drm_dp_mst_port *port, bool power_up)
31781bb76ff1Sjsg {
31791bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
31801bb76ff1Sjsg 	int ret;
31811bb76ff1Sjsg 
31821bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
31831bb76ff1Sjsg 	if (!port)
31841bb76ff1Sjsg 		return -EINVAL;
31851bb76ff1Sjsg 
31861bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
31871bb76ff1Sjsg 	if (!txmsg) {
31881bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
31891bb76ff1Sjsg 		return -ENOMEM;
31901bb76ff1Sjsg 	}
31911bb76ff1Sjsg 
31921bb76ff1Sjsg 	txmsg->dst = port->parent;
31931bb76ff1Sjsg 	build_power_updown_phy(txmsg, port->port_num, power_up);
31941bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
31951bb76ff1Sjsg 
31961bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
31971bb76ff1Sjsg 	if (ret > 0) {
31981bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
31991bb76ff1Sjsg 			ret = -EINVAL;
32001bb76ff1Sjsg 		else
32011bb76ff1Sjsg 			ret = 0;
32021bb76ff1Sjsg 	}
32031bb76ff1Sjsg 	kfree(txmsg);
32041bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
32051bb76ff1Sjsg 
32061bb76ff1Sjsg 	return ret;
32071bb76ff1Sjsg }
32081bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
32091bb76ff1Sjsg 
drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,struct drm_dp_query_stream_enc_status_ack_reply * status)32101bb76ff1Sjsg int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
32111bb76ff1Sjsg 		struct drm_dp_mst_port *port,
32121bb76ff1Sjsg 		struct drm_dp_query_stream_enc_status_ack_reply *status)
32131bb76ff1Sjsg {
32141bb76ff1Sjsg 	struct drm_dp_mst_topology_state *state;
32151bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
32161bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
32171bb76ff1Sjsg 	u8 nonce[7];
32181bb76ff1Sjsg 	int ret;
32191bb76ff1Sjsg 
32201bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
32211bb76ff1Sjsg 	if (!txmsg)
32221bb76ff1Sjsg 		return -ENOMEM;
32231bb76ff1Sjsg 
32241bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
32251bb76ff1Sjsg 	if (!port) {
32261bb76ff1Sjsg 		ret = -EINVAL;
32271bb76ff1Sjsg 		goto out_get_port;
32281bb76ff1Sjsg 	}
32291bb76ff1Sjsg 
32301bb76ff1Sjsg 	get_random_bytes(nonce, sizeof(nonce));
32311bb76ff1Sjsg 
32321bb76ff1Sjsg 	drm_modeset_lock(&mgr->base.lock, NULL);
32331bb76ff1Sjsg 	state = to_drm_dp_mst_topology_state(mgr->base.state);
32341bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(state, port);
32351bb76ff1Sjsg 
32361bb76ff1Sjsg 	/*
32371bb76ff1Sjsg 	 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
32381bb76ff1Sjsg 	 *  transaction at the MST Branch device directly connected to the
32391bb76ff1Sjsg 	 *  Source"
32401bb76ff1Sjsg 	 */
32411bb76ff1Sjsg 	txmsg->dst = mgr->mst_primary;
32421bb76ff1Sjsg 
32431bb76ff1Sjsg 	build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
32441bb76ff1Sjsg 
32451bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
32461bb76ff1Sjsg 
32471bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
32481bb76ff1Sjsg 	if (ret < 0) {
32491bb76ff1Sjsg 		goto out;
32501bb76ff1Sjsg 	} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
32511bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
32521bb76ff1Sjsg 		ret = -ENXIO;
32531bb76ff1Sjsg 		goto out;
32541bb76ff1Sjsg 	}
32551bb76ff1Sjsg 
32561bb76ff1Sjsg 	ret = 0;
32571bb76ff1Sjsg 	memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
32581bb76ff1Sjsg 
32591bb76ff1Sjsg out:
32601bb76ff1Sjsg 	drm_modeset_unlock(&mgr->base.lock);
32611bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
32621bb76ff1Sjsg out_get_port:
32631bb76ff1Sjsg 	kfree(txmsg);
32641bb76ff1Sjsg 	return ret;
32651bb76ff1Sjsg }
32661bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
32671bb76ff1Sjsg 
drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_atomic_payload * payload)32681bb76ff1Sjsg static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
32691bb76ff1Sjsg 				       struct drm_dp_mst_atomic_payload *payload)
32701bb76ff1Sjsg {
32711bb76ff1Sjsg 	return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
32721bb76ff1Sjsg 					 payload->time_slots);
32731bb76ff1Sjsg }
32741bb76ff1Sjsg 
drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_atomic_payload * payload)32751bb76ff1Sjsg static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
32761bb76ff1Sjsg 				       struct drm_dp_mst_atomic_payload *payload)
32771bb76ff1Sjsg {
32781bb76ff1Sjsg 	int ret;
32791bb76ff1Sjsg 	struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
32801bb76ff1Sjsg 
32811bb76ff1Sjsg 	if (!port)
32821bb76ff1Sjsg 		return -EIO;
32831bb76ff1Sjsg 
32841bb76ff1Sjsg 	ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
32851bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
32861bb76ff1Sjsg 	return ret;
32871bb76ff1Sjsg }
32881bb76ff1Sjsg 
drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * payload)32891bb76ff1Sjsg static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
32901bb76ff1Sjsg 					struct drm_dp_mst_topology_state *mst_state,
32911bb76ff1Sjsg 					struct drm_dp_mst_atomic_payload *payload)
32921bb76ff1Sjsg {
32931bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "\n");
32941bb76ff1Sjsg 
32951bb76ff1Sjsg 	/* it's okay for these to fail */
32961bb76ff1Sjsg 	drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
32971bb76ff1Sjsg 	drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
32981bb76ff1Sjsg 
32991bb76ff1Sjsg 	return 0;
33001bb76ff1Sjsg }
33011bb76ff1Sjsg 
33021bb76ff1Sjsg /**
33031bb76ff1Sjsg  * drm_dp_add_payload_part1() - Execute payload update part 1
33041bb76ff1Sjsg  * @mgr: Manager to use.
33051bb76ff1Sjsg  * @mst_state: The MST atomic state
33061bb76ff1Sjsg  * @payload: The payload to write
33071bb76ff1Sjsg  *
33081bb76ff1Sjsg  * Determines the starting time slot for the given payload, and programs the VCPI for this payload
33091bb76ff1Sjsg  * into hardware. After calling this, the driver should generate ACT and payload packets.
33101bb76ff1Sjsg  *
33111bb76ff1Sjsg  * Returns: 0 on success, error code on failure. In the event that this fails,
33121bb76ff1Sjsg  * @payload.vc_start_slot will also be set to -1.
33131bb76ff1Sjsg  */
drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,struct drm_dp_mst_atomic_payload * payload)33141bb76ff1Sjsg int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
33151bb76ff1Sjsg 			     struct drm_dp_mst_topology_state *mst_state,
33161bb76ff1Sjsg 			     struct drm_dp_mst_atomic_payload *payload)
33171bb76ff1Sjsg {
33181bb76ff1Sjsg 	struct drm_dp_mst_port *port;
33191bb76ff1Sjsg 	int ret;
33201bb76ff1Sjsg 
33211bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
332294994154Sjsg 	if (!port) {
332394994154Sjsg 		drm_dbg_kms(mgr->dev,
332494994154Sjsg 			    "VCPI %d for port %p not in topology, not creating a payload\n",
332594994154Sjsg 			    payload->vcpi, payload->port);
332694994154Sjsg 		payload->vc_start_slot = -1;
33271bb76ff1Sjsg 		return 0;
332894994154Sjsg 	}
33291bb76ff1Sjsg 
33301bb76ff1Sjsg 	if (mgr->payload_count == 0)
33311bb76ff1Sjsg 		mgr->next_start_slot = mst_state->start_slot;
33321bb76ff1Sjsg 
33331bb76ff1Sjsg 	payload->vc_start_slot = mgr->next_start_slot;
33341bb76ff1Sjsg 
33351bb76ff1Sjsg 	ret = drm_dp_create_payload_step1(mgr, payload);
33361bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
33371bb76ff1Sjsg 	if (ret < 0) {
33381bb76ff1Sjsg 		drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
33391bb76ff1Sjsg 			 payload->port, ret);
33401bb76ff1Sjsg 		payload->vc_start_slot = -1;
33411bb76ff1Sjsg 		return ret;
33421bb76ff1Sjsg 	}
33431bb76ff1Sjsg 
33441bb76ff1Sjsg 	mgr->payload_count++;
33451bb76ff1Sjsg 	mgr->next_start_slot += payload->time_slots;
33461bb76ff1Sjsg 
33471bb76ff1Sjsg 	return 0;
33481bb76ff1Sjsg }
33491bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_add_payload_part1);
33501bb76ff1Sjsg 
33511bb76ff1Sjsg /**
33521bb76ff1Sjsg  * drm_dp_remove_payload() - Remove an MST payload
33531bb76ff1Sjsg  * @mgr: Manager to use.
33541bb76ff1Sjsg  * @mst_state: The MST atomic state
3355d0d157cdSjsg  * @old_payload: The payload with its old state
3356d0d157cdSjsg  * @new_payload: The payload to write
33571bb76ff1Sjsg  *
33581bb76ff1Sjsg  * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
33591bb76ff1Sjsg  * the starting time slots of all other payloads which would have been shifted towards the start of
33601bb76ff1Sjsg  * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
33611bb76ff1Sjsg  */
drm_dp_remove_payload(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state,const struct drm_dp_mst_atomic_payload * old_payload,struct drm_dp_mst_atomic_payload * new_payload)33621bb76ff1Sjsg void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
33631bb76ff1Sjsg 			   struct drm_dp_mst_topology_state *mst_state,
3364d0d157cdSjsg 			   const struct drm_dp_mst_atomic_payload *old_payload,
3365d0d157cdSjsg 			   struct drm_dp_mst_atomic_payload *new_payload)
33661bb76ff1Sjsg {
33671bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos;
33681bb76ff1Sjsg 	bool send_remove = false;
33691bb76ff1Sjsg 
33701bb76ff1Sjsg 	/* We failed to make the payload, so nothing to do */
3371d0d157cdSjsg 	if (new_payload->vc_start_slot == -1)
33721bb76ff1Sjsg 		return;
33731bb76ff1Sjsg 
33741bb76ff1Sjsg 	mutex_lock(&mgr->lock);
3375d0d157cdSjsg 	send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
33761bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
33771bb76ff1Sjsg 
33781bb76ff1Sjsg 	if (send_remove)
3379d0d157cdSjsg 		drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
33801bb76ff1Sjsg 	else
33811bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
3382d0d157cdSjsg 			    new_payload->vcpi);
33831bb76ff1Sjsg 
33841bb76ff1Sjsg 	list_for_each_entry(pos, &mst_state->payloads, next) {
3385d0d157cdSjsg 		if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
3386d0d157cdSjsg 			pos->vc_start_slot -= old_payload->time_slots;
33871bb76ff1Sjsg 	}
3388d0d157cdSjsg 	new_payload->vc_start_slot = -1;
33891bb76ff1Sjsg 
33901bb76ff1Sjsg 	mgr->payload_count--;
3391d0d157cdSjsg 	mgr->next_start_slot -= old_payload->time_slots;
3392f6f081b8Sjsg 
3393d0d157cdSjsg 	if (new_payload->delete)
3394d0d157cdSjsg 		drm_dp_mst_put_port_malloc(new_payload->port);
33951bb76ff1Sjsg }
33961bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_remove_payload);
33971bb76ff1Sjsg 
33981bb76ff1Sjsg /**
33991bb76ff1Sjsg  * drm_dp_add_payload_part2() - Execute payload update part 2
34001bb76ff1Sjsg  * @mgr: Manager to use.
34011bb76ff1Sjsg  * @state: The global atomic state
34021bb76ff1Sjsg  * @payload: The payload to update
34031bb76ff1Sjsg  *
34041bb76ff1Sjsg  * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
34051bb76ff1Sjsg  * function will send the sideband messages to finish allocating this payload.
34061bb76ff1Sjsg  *
34071bb76ff1Sjsg  * Returns: 0 on success, negative error code on failure.
34081bb76ff1Sjsg  */
drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr * mgr,struct drm_atomic_state * state,struct drm_dp_mst_atomic_payload * payload)34091bb76ff1Sjsg int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
34101bb76ff1Sjsg 			     struct drm_atomic_state *state,
34111bb76ff1Sjsg 			     struct drm_dp_mst_atomic_payload *payload)
34121bb76ff1Sjsg {
34131bb76ff1Sjsg 	int ret = 0;
34141bb76ff1Sjsg 
34151bb76ff1Sjsg 	/* Skip failed payloads */
34161bb76ff1Sjsg 	if (payload->vc_start_slot == -1) {
34178c294423Sjsg 		drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
34181bb76ff1Sjsg 			    payload->port->connector->name);
34191bb76ff1Sjsg 		return -EIO;
34201bb76ff1Sjsg 	}
34211bb76ff1Sjsg 
34221bb76ff1Sjsg 	ret = drm_dp_create_payload_step2(mgr, payload);
34231bb76ff1Sjsg 	if (ret < 0) {
34241bb76ff1Sjsg 		if (!payload->delete)
34251bb76ff1Sjsg 			drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
34261bb76ff1Sjsg 				payload->port, ret);
34271bb76ff1Sjsg 		else
34281bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
34291bb76ff1Sjsg 				    payload->port, ret);
34301bb76ff1Sjsg 	}
34311bb76ff1Sjsg 
34321bb76ff1Sjsg 	return ret;
34331bb76ff1Sjsg }
34341bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_add_payload_part2);
34351bb76ff1Sjsg 
drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)34361bb76ff1Sjsg static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
34371bb76ff1Sjsg 				 struct drm_dp_mst_port *port,
34381bb76ff1Sjsg 				 int offset, int size, u8 *bytes)
34391bb76ff1Sjsg {
34401bb76ff1Sjsg 	int ret = 0;
34411bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
34421bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
34431bb76ff1Sjsg 
34441bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
34451bb76ff1Sjsg 	if (!mstb)
34461bb76ff1Sjsg 		return -EINVAL;
34471bb76ff1Sjsg 
34481bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
34491bb76ff1Sjsg 	if (!txmsg) {
34501bb76ff1Sjsg 		ret = -ENOMEM;
34511bb76ff1Sjsg 		goto fail_put;
34521bb76ff1Sjsg 	}
34531bb76ff1Sjsg 
34541bb76ff1Sjsg 	build_dpcd_read(txmsg, port->port_num, offset, size);
34551bb76ff1Sjsg 	txmsg->dst = port->parent;
34561bb76ff1Sjsg 
34571bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
34581bb76ff1Sjsg 
34591bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
34601bb76ff1Sjsg 	if (ret < 0)
34611bb76ff1Sjsg 		goto fail_free;
34621bb76ff1Sjsg 
34631bb76ff1Sjsg 	if (txmsg->reply.reply_type == 1) {
34641bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
34651bb76ff1Sjsg 			    mstb, port->port_num, offset, size);
34661bb76ff1Sjsg 		ret = -EIO;
34671bb76ff1Sjsg 		goto fail_free;
34681bb76ff1Sjsg 	}
34691bb76ff1Sjsg 
34701bb76ff1Sjsg 	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
34711bb76ff1Sjsg 		ret = -EPROTO;
34721bb76ff1Sjsg 		goto fail_free;
34731bb76ff1Sjsg 	}
34741bb76ff1Sjsg 
34751bb76ff1Sjsg 	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
34761bb76ff1Sjsg 		    size);
34771bb76ff1Sjsg 	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
34781bb76ff1Sjsg 
34791bb76ff1Sjsg fail_free:
34801bb76ff1Sjsg 	kfree(txmsg);
34811bb76ff1Sjsg fail_put:
34821bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
34831bb76ff1Sjsg 
34841bb76ff1Sjsg 	return ret;
34851bb76ff1Sjsg }
34861bb76ff1Sjsg 
drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)34871bb76ff1Sjsg static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
34881bb76ff1Sjsg 				  struct drm_dp_mst_port *port,
34891bb76ff1Sjsg 				  int offset, int size, u8 *bytes)
34901bb76ff1Sjsg {
34911bb76ff1Sjsg 	int ret;
34921bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
34931bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
34941bb76ff1Sjsg 
34951bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
34961bb76ff1Sjsg 	if (!mstb)
34971bb76ff1Sjsg 		return -EINVAL;
34981bb76ff1Sjsg 
34991bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
35001bb76ff1Sjsg 	if (!txmsg) {
35011bb76ff1Sjsg 		ret = -ENOMEM;
35021bb76ff1Sjsg 		goto fail_put;
35031bb76ff1Sjsg 	}
35041bb76ff1Sjsg 
35051bb76ff1Sjsg 	build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
35061bb76ff1Sjsg 	txmsg->dst = mstb;
35071bb76ff1Sjsg 
35081bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
35091bb76ff1Sjsg 
35101bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
35111bb76ff1Sjsg 	if (ret > 0) {
35121bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
35131bb76ff1Sjsg 			ret = -EIO;
35141bb76ff1Sjsg 		else
35151bb76ff1Sjsg 			ret = size;
35161bb76ff1Sjsg 	}
35171bb76ff1Sjsg 
35181bb76ff1Sjsg 	kfree(txmsg);
35191bb76ff1Sjsg fail_put:
35201bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
35211bb76ff1Sjsg 	return ret;
35221bb76ff1Sjsg }
35231bb76ff1Sjsg 
drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx * msg,u8 req_type)35241bb76ff1Sjsg static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
35251bb76ff1Sjsg {
35261bb76ff1Sjsg 	struct drm_dp_sideband_msg_reply_body reply;
35271bb76ff1Sjsg 
35281bb76ff1Sjsg 	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
35291bb76ff1Sjsg 	reply.req_type = req_type;
35301bb76ff1Sjsg 	drm_dp_encode_sideband_reply(&reply, msg);
35311bb76ff1Sjsg 	return 0;
35321bb76ff1Sjsg }
35331bb76ff1Sjsg 
drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int req_type,bool broadcast)35341bb76ff1Sjsg static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
35351bb76ff1Sjsg 				    struct drm_dp_mst_branch *mstb,
35361bb76ff1Sjsg 				    int req_type, bool broadcast)
35371bb76ff1Sjsg {
35381bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
35391bb76ff1Sjsg 
35401bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
35411bb76ff1Sjsg 	if (!txmsg)
35421bb76ff1Sjsg 		return -ENOMEM;
35431bb76ff1Sjsg 
35441bb76ff1Sjsg 	txmsg->dst = mstb;
35451bb76ff1Sjsg 	drm_dp_encode_up_ack_reply(txmsg, req_type);
35461bb76ff1Sjsg 
35471bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
35481bb76ff1Sjsg 	/* construct a chunk from the first msg in the tx_msg queue */
35491bb76ff1Sjsg 	process_single_tx_qlock(mgr, txmsg, true);
35501bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
35511bb76ff1Sjsg 
35521bb76ff1Sjsg 	kfree(txmsg);
35531bb76ff1Sjsg 	return 0;
35541bb76ff1Sjsg }
35551bb76ff1Sjsg 
35561bb76ff1Sjsg /**
35571bb76ff1Sjsg  * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
35581bb76ff1Sjsg  * @mgr: The &drm_dp_mst_topology_mgr to use
35591bb76ff1Sjsg  * @link_rate: link rate in 10kbits/s units
35601bb76ff1Sjsg  * @link_lane_count: lane count
35611bb76ff1Sjsg  *
35621bb76ff1Sjsg  * Calculate the total bandwidth of a MultiStream Transport link. The returned
35631bb76ff1Sjsg  * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
35641bb76ff1Sjsg  * convert the number of PBNs required for a given stream to the number of
35651bb76ff1Sjsg  * timeslots this stream requires in each MTP.
35661bb76ff1Sjsg  */
drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr * mgr,int link_rate,int link_lane_count)35671bb76ff1Sjsg int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
35681bb76ff1Sjsg 			     int link_rate, int link_lane_count)
35691bb76ff1Sjsg {
35701bb76ff1Sjsg 	if (link_rate == 0 || link_lane_count == 0)
35711bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
35721bb76ff1Sjsg 			    link_rate, link_lane_count);
35731bb76ff1Sjsg 
35741bb76ff1Sjsg 	/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
35751bb76ff1Sjsg 	return link_rate * link_lane_count / 54000;
35761bb76ff1Sjsg }
35771bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
35781bb76ff1Sjsg 
35791bb76ff1Sjsg /**
35801bb76ff1Sjsg  * drm_dp_read_mst_cap() - check whether or not a sink supports MST
35811bb76ff1Sjsg  * @aux: The DP AUX channel to use
35821bb76ff1Sjsg  * @dpcd: A cached copy of the DPCD capabilities for this sink
35831bb76ff1Sjsg  *
35841bb76ff1Sjsg  * Returns: %True if the sink supports MST, %false otherwise
35851bb76ff1Sjsg  */
drm_dp_read_mst_cap(struct drm_dp_aux * aux,const u8 dpcd[DP_RECEIVER_CAP_SIZE])35861bb76ff1Sjsg bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
35871bb76ff1Sjsg 			 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
35881bb76ff1Sjsg {
35891bb76ff1Sjsg 	u8 mstm_cap;
35901bb76ff1Sjsg 
35911bb76ff1Sjsg 	if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
35921bb76ff1Sjsg 		return false;
35931bb76ff1Sjsg 
35941bb76ff1Sjsg 	if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
35951bb76ff1Sjsg 		return false;
35961bb76ff1Sjsg 
35971bb76ff1Sjsg 	return mstm_cap & DP_MST_CAP;
35981bb76ff1Sjsg }
35991bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_read_mst_cap);
36001bb76ff1Sjsg 
36011bb76ff1Sjsg /**
36021bb76ff1Sjsg  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
36031bb76ff1Sjsg  * @mgr: manager to set state for
36041bb76ff1Sjsg  * @mst_state: true to enable MST on this connector - false to disable.
36051bb76ff1Sjsg  *
36061bb76ff1Sjsg  * This is called by the driver when it detects an MST capable device plugged
36071bb76ff1Sjsg  * into a DP MST capable port, or when a DP MST capable device is unplugged.
36081bb76ff1Sjsg  */
drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr * mgr,bool mst_state)36091bb76ff1Sjsg int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
36101bb76ff1Sjsg {
36111bb76ff1Sjsg 	int ret = 0;
36121bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = NULL;
36131bb76ff1Sjsg 
36141bb76ff1Sjsg 	mutex_lock(&mgr->lock);
36151bb76ff1Sjsg 	if (mst_state == mgr->mst_state)
36161bb76ff1Sjsg 		goto out_unlock;
36171bb76ff1Sjsg 
36181bb76ff1Sjsg 	mgr->mst_state = mst_state;
36191bb76ff1Sjsg 	/* set the device into MST mode */
36201bb76ff1Sjsg 	if (mst_state) {
36211bb76ff1Sjsg 		WARN_ON(mgr->mst_primary);
36221bb76ff1Sjsg 
36231bb76ff1Sjsg 		/* get dpcd info */
36241bb76ff1Sjsg 		ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
36251bb76ff1Sjsg 		if (ret < 0) {
36261bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
36271bb76ff1Sjsg 				    mgr->aux->name, ret);
36281bb76ff1Sjsg 			goto out_unlock;
36291bb76ff1Sjsg 		}
36301bb76ff1Sjsg 
36311bb76ff1Sjsg 		/* add initial branch device at LCT 1 */
36321bb76ff1Sjsg 		mstb = drm_dp_add_mst_branch_device(1, NULL);
36331bb76ff1Sjsg 		if (mstb == NULL) {
36341bb76ff1Sjsg 			ret = -ENOMEM;
36351bb76ff1Sjsg 			goto out_unlock;
36361bb76ff1Sjsg 		}
36371bb76ff1Sjsg 		mstb->mgr = mgr;
36381bb76ff1Sjsg 
36391bb76ff1Sjsg 		/* give this the main reference */
36401bb76ff1Sjsg 		mgr->mst_primary = mstb;
36411bb76ff1Sjsg 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
36421bb76ff1Sjsg 
36431bb76ff1Sjsg 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
36441bb76ff1Sjsg 					 DP_MST_EN |
36451bb76ff1Sjsg 					 DP_UP_REQ_EN |
36461bb76ff1Sjsg 					 DP_UPSTREAM_IS_SRC);
36471bb76ff1Sjsg 		if (ret < 0)
36481bb76ff1Sjsg 			goto out_unlock;
36491bb76ff1Sjsg 
36501bb76ff1Sjsg 		/* Write reset payload */
36511bb76ff1Sjsg 		drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
36521bb76ff1Sjsg 
36531bb76ff1Sjsg 		queue_work(system_long_wq, &mgr->work);
36541bb76ff1Sjsg 
36551bb76ff1Sjsg 		ret = 0;
36561bb76ff1Sjsg 	} else {
36571bb76ff1Sjsg 		/* disable MST on the device */
36581bb76ff1Sjsg 		mstb = mgr->mst_primary;
36591bb76ff1Sjsg 		mgr->mst_primary = NULL;
36601bb76ff1Sjsg 		/* this can fail if the device is gone */
36611bb76ff1Sjsg 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
36621bb76ff1Sjsg 		ret = 0;
36631bb76ff1Sjsg 		mgr->payload_id_table_cleared = false;
36643b10c595Sjsg 
36653b10c595Sjsg 		memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
36663b10c595Sjsg 		memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
36671bb76ff1Sjsg 	}
36681bb76ff1Sjsg 
36691bb76ff1Sjsg out_unlock:
36701bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
36711bb76ff1Sjsg 	if (mstb)
36721bb76ff1Sjsg 		drm_dp_mst_topology_put_mstb(mstb);
36731bb76ff1Sjsg 	return ret;
36741bb76ff1Sjsg 
36751bb76ff1Sjsg }
36761bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
36771bb76ff1Sjsg 
36781bb76ff1Sjsg static void
drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch * mstb)36791bb76ff1Sjsg drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
36801bb76ff1Sjsg {
36811bb76ff1Sjsg 	struct drm_dp_mst_port *port;
36821bb76ff1Sjsg 
36831bb76ff1Sjsg 	/* The link address will need to be re-sent on resume */
36841bb76ff1Sjsg 	mstb->link_address_sent = false;
36851bb76ff1Sjsg 
36861bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next)
36871bb76ff1Sjsg 		if (port->mstb)
36881bb76ff1Sjsg 			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
36891bb76ff1Sjsg }
36901bb76ff1Sjsg 
36911bb76ff1Sjsg /**
36921bb76ff1Sjsg  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
36931bb76ff1Sjsg  * @mgr: manager to suspend
36941bb76ff1Sjsg  *
36951bb76ff1Sjsg  * This function tells the MST device that we can't handle UP messages
36961bb76ff1Sjsg  * anymore. This should stop it from sending any since we are suspended.
36971bb76ff1Sjsg  */
drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr * mgr)36981bb76ff1Sjsg void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
36991bb76ff1Sjsg {
37001bb76ff1Sjsg 	mutex_lock(&mgr->lock);
37011bb76ff1Sjsg 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
37021bb76ff1Sjsg 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
37031bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37041bb76ff1Sjsg 	flush_work(&mgr->up_req_work);
37051bb76ff1Sjsg 	flush_work(&mgr->work);
37061bb76ff1Sjsg 	flush_work(&mgr->delayed_destroy_work);
37071bb76ff1Sjsg 
37081bb76ff1Sjsg 	mutex_lock(&mgr->lock);
37091bb76ff1Sjsg 	if (mgr->mst_state && mgr->mst_primary)
37101bb76ff1Sjsg 		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
37111bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37121bb76ff1Sjsg }
37131bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
37141bb76ff1Sjsg 
37151bb76ff1Sjsg /**
37161bb76ff1Sjsg  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
37171bb76ff1Sjsg  * @mgr: manager to resume
37181bb76ff1Sjsg  * @sync: whether or not to perform topology reprobing synchronously
37191bb76ff1Sjsg  *
37201bb76ff1Sjsg  * This will fetch DPCD and see if the device is still there,
37211bb76ff1Sjsg  * if it is, it will rewrite the MSTM control bits, and return.
37221bb76ff1Sjsg  *
37231bb76ff1Sjsg  * If the device fails this returns -1, and the driver should do
37241bb76ff1Sjsg  * a full MST reprobe, in case we were undocked.
37251bb76ff1Sjsg  *
37261bb76ff1Sjsg  * During system resume (where it is assumed that the driver will be calling
37271bb76ff1Sjsg  * drm_atomic_helper_resume()) this function should be called beforehand with
37281bb76ff1Sjsg  * @sync set to true. In contexts like runtime resume where the driver is not
37291bb76ff1Sjsg  * expected to be calling drm_atomic_helper_resume(), this function should be
37301bb76ff1Sjsg  * called with @sync set to false in order to avoid deadlocking.
37311bb76ff1Sjsg  *
37321bb76ff1Sjsg  * Returns: -1 if the MST topology was removed while we were suspended, 0
37331bb76ff1Sjsg  * otherwise.
37341bb76ff1Sjsg  */
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr * mgr,bool sync)37351bb76ff1Sjsg int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
37361bb76ff1Sjsg 				   bool sync)
37371bb76ff1Sjsg {
37381bb76ff1Sjsg 	int ret;
37391bb76ff1Sjsg 	u8 guid[16];
37401bb76ff1Sjsg 
37411bb76ff1Sjsg 	mutex_lock(&mgr->lock);
37421bb76ff1Sjsg 	if (!mgr->mst_primary)
37431bb76ff1Sjsg 		goto out_fail;
37441bb76ff1Sjsg 
37451bb76ff1Sjsg 	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
37461bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
37471bb76ff1Sjsg 		goto out_fail;
37481bb76ff1Sjsg 	}
37491bb76ff1Sjsg 
37501bb76ff1Sjsg 	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
37511bb76ff1Sjsg 				 DP_MST_EN |
37521bb76ff1Sjsg 				 DP_UP_REQ_EN |
37531bb76ff1Sjsg 				 DP_UPSTREAM_IS_SRC);
37541bb76ff1Sjsg 	if (ret < 0) {
37551bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
37561bb76ff1Sjsg 		goto out_fail;
37571bb76ff1Sjsg 	}
37581bb76ff1Sjsg 
37591bb76ff1Sjsg 	/* Some hubs forget their guids after they resume */
37601bb76ff1Sjsg 	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
37611bb76ff1Sjsg 	if (ret != 16) {
37621bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
37631bb76ff1Sjsg 		goto out_fail;
37641bb76ff1Sjsg 	}
37651bb76ff1Sjsg 
37661bb76ff1Sjsg 	ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
37671bb76ff1Sjsg 	if (ret) {
37681bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
37691bb76ff1Sjsg 		goto out_fail;
37701bb76ff1Sjsg 	}
37711bb76ff1Sjsg 
37721bb76ff1Sjsg 	/*
37731bb76ff1Sjsg 	 * For the final step of resuming the topology, we need to bring the
37741bb76ff1Sjsg 	 * state of our in-memory topology back into sync with reality. So,
37751bb76ff1Sjsg 	 * restart the probing process as if we're probing a new hub
37761bb76ff1Sjsg 	 */
37771bb76ff1Sjsg 	queue_work(system_long_wq, &mgr->work);
37781bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37791bb76ff1Sjsg 
37801bb76ff1Sjsg 	if (sync) {
37811bb76ff1Sjsg 		drm_dbg_kms(mgr->dev,
37821bb76ff1Sjsg 			    "Waiting for link probe work to finish re-syncing topology...\n");
37831bb76ff1Sjsg 		flush_work(&mgr->work);
37841bb76ff1Sjsg 	}
37851bb76ff1Sjsg 
37861bb76ff1Sjsg 	return 0;
37871bb76ff1Sjsg 
37881bb76ff1Sjsg out_fail:
37891bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37901bb76ff1Sjsg 	return -1;
37911bb76ff1Sjsg }
37921bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
37931bb76ff1Sjsg 
37941bb76ff1Sjsg static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr * mgr,bool up,struct drm_dp_mst_branch ** mstb)37951bb76ff1Sjsg drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
37961bb76ff1Sjsg 		      struct drm_dp_mst_branch **mstb)
37971bb76ff1Sjsg {
37981bb76ff1Sjsg 	int len;
37991bb76ff1Sjsg 	u8 replyblock[32];
38001bb76ff1Sjsg 	int replylen, curreply;
38011bb76ff1Sjsg 	int ret;
38021bb76ff1Sjsg 	u8 hdrlen;
38031bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr hdr;
38041bb76ff1Sjsg 	struct drm_dp_sideband_msg_rx *msg =
38051bb76ff1Sjsg 		up ? &mgr->up_req_recv : &mgr->down_rep_recv;
38061bb76ff1Sjsg 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
38071bb76ff1Sjsg 			   DP_SIDEBAND_MSG_DOWN_REP_BASE;
38081bb76ff1Sjsg 
38091bb76ff1Sjsg 	if (!up)
38101bb76ff1Sjsg 		*mstb = NULL;
38111bb76ff1Sjsg 
38121bb76ff1Sjsg 	len = min(mgr->max_dpcd_transaction_bytes, 16);
38131bb76ff1Sjsg 	ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
38141bb76ff1Sjsg 	if (ret != len) {
38151bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
38161bb76ff1Sjsg 		return false;
38171bb76ff1Sjsg 	}
38181bb76ff1Sjsg 
38191bb76ff1Sjsg 	ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
38201bb76ff1Sjsg 	if (ret == false) {
38211bb76ff1Sjsg 		print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
38221bb76ff1Sjsg 			       1, replyblock, len, false);
38231bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
38241bb76ff1Sjsg 		return false;
38251bb76ff1Sjsg 	}
38261bb76ff1Sjsg 
38271bb76ff1Sjsg 	if (!up) {
38281bb76ff1Sjsg 		/* Caller is responsible for giving back this reference */
38291bb76ff1Sjsg 		*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
38301bb76ff1Sjsg 		if (!*mstb) {
38311bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
38321bb76ff1Sjsg 			return false;
38331bb76ff1Sjsg 		}
38341bb76ff1Sjsg 	}
38351bb76ff1Sjsg 
38361bb76ff1Sjsg 	if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
38371bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
38381bb76ff1Sjsg 		return false;
38391bb76ff1Sjsg 	}
38401bb76ff1Sjsg 
38411bb76ff1Sjsg 	replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
38421bb76ff1Sjsg 	ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
38431bb76ff1Sjsg 	if (!ret) {
38441bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
38451bb76ff1Sjsg 		return false;
38461bb76ff1Sjsg 	}
38471bb76ff1Sjsg 
38481bb76ff1Sjsg 	replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
38491bb76ff1Sjsg 	curreply = len;
38501bb76ff1Sjsg 	while (replylen > 0) {
38511bb76ff1Sjsg 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
38521bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
38531bb76ff1Sjsg 				    replyblock, len);
38541bb76ff1Sjsg 		if (ret != len) {
38551bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
38561bb76ff1Sjsg 				    len, ret);
38571bb76ff1Sjsg 			return false;
38581bb76ff1Sjsg 		}
38591bb76ff1Sjsg 
38601bb76ff1Sjsg 		ret = drm_dp_sideband_append_payload(msg, replyblock, len);
38611bb76ff1Sjsg 		if (!ret) {
38621bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
38631bb76ff1Sjsg 			return false;
38641bb76ff1Sjsg 		}
38651bb76ff1Sjsg 
38661bb76ff1Sjsg 		curreply += len;
38671bb76ff1Sjsg 		replylen -= len;
38681bb76ff1Sjsg 	}
38691bb76ff1Sjsg 	return true;
38701bb76ff1Sjsg }
38711bb76ff1Sjsg 
drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr * mgr)38721bb76ff1Sjsg static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
38731bb76ff1Sjsg {
38741bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
38751bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = NULL;
38761bb76ff1Sjsg 	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
38771bb76ff1Sjsg 
38781bb76ff1Sjsg 	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3879dcf8af2aSjsg 		goto out_clear_reply;
38801bb76ff1Sjsg 
38811bb76ff1Sjsg 	/* Multi-packet message transmission, don't clear the reply */
38821bb76ff1Sjsg 	if (!msg->have_eomt)
38831bb76ff1Sjsg 		goto out;
38841bb76ff1Sjsg 
38851bb76ff1Sjsg 	/* find the message */
38861bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
38871bb76ff1Sjsg 	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
38881bb76ff1Sjsg 					 struct drm_dp_sideband_msg_tx, next);
38891bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
38901bb76ff1Sjsg 
38911bb76ff1Sjsg 	/* Were we actually expecting a response, and from this mstb? */
38921bb76ff1Sjsg 	if (!txmsg || txmsg->dst != mstb) {
38931bb76ff1Sjsg 		struct drm_dp_sideband_msg_hdr *hdr;
38941bb76ff1Sjsg 
38951bb76ff1Sjsg 		hdr = &msg->initial_hdr;
38961bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
38971bb76ff1Sjsg 			    mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
38981bb76ff1Sjsg 		goto out_clear_reply;
38991bb76ff1Sjsg 	}
39001bb76ff1Sjsg 
39011bb76ff1Sjsg 	drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
39021bb76ff1Sjsg 
39031bb76ff1Sjsg 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
39041bb76ff1Sjsg 		drm_dbg_kms(mgr->dev,
39051bb76ff1Sjsg 			    "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
39061bb76ff1Sjsg 			    txmsg->reply.req_type,
39071bb76ff1Sjsg 			    drm_dp_mst_req_type_str(txmsg->reply.req_type),
39081bb76ff1Sjsg 			    txmsg->reply.u.nak.reason,
39091bb76ff1Sjsg 			    drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
39101bb76ff1Sjsg 			    txmsg->reply.u.nak.nak_data);
39111bb76ff1Sjsg 	}
39121bb76ff1Sjsg 
39131bb76ff1Sjsg 	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
39141bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
39151bb76ff1Sjsg 
39161bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
39171bb76ff1Sjsg 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
39181bb76ff1Sjsg 	list_del(&txmsg->next);
39191bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
39201bb76ff1Sjsg 
39211bb76ff1Sjsg 	wake_up_all(&mgr->tx_waitq);
39221bb76ff1Sjsg 
39231bb76ff1Sjsg 	return 0;
39241bb76ff1Sjsg 
39251bb76ff1Sjsg out_clear_reply:
39261bb76ff1Sjsg 	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
39271bb76ff1Sjsg out:
39281bb76ff1Sjsg 	if (mstb)
39291bb76ff1Sjsg 		drm_dp_mst_topology_put_mstb(mstb);
39301bb76ff1Sjsg 
39311bb76ff1Sjsg 	return 0;
39321bb76ff1Sjsg }
39331bb76ff1Sjsg 
39341bb76ff1Sjsg static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_pending_up_req * up_req)39351bb76ff1Sjsg drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
39361bb76ff1Sjsg 			  struct drm_dp_pending_up_req *up_req)
39371bb76ff1Sjsg {
39381bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = NULL;
39391bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
39401bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
39411bb76ff1Sjsg 	bool hotplug = false, dowork = false;
39421bb76ff1Sjsg 
39431bb76ff1Sjsg 	if (hdr->broadcast) {
39441bb76ff1Sjsg 		const u8 *guid = NULL;
39451bb76ff1Sjsg 
39461bb76ff1Sjsg 		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
39471bb76ff1Sjsg 			guid = msg->u.conn_stat.guid;
39481bb76ff1Sjsg 		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
39491bb76ff1Sjsg 			guid = msg->u.resource_stat.guid;
39501bb76ff1Sjsg 
39511bb76ff1Sjsg 		if (guid)
39521bb76ff1Sjsg 			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
39531bb76ff1Sjsg 	} else {
39541bb76ff1Sjsg 		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
39551bb76ff1Sjsg 	}
39561bb76ff1Sjsg 
39571bb76ff1Sjsg 	if (!mstb) {
39581bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
39591bb76ff1Sjsg 		return false;
39601bb76ff1Sjsg 	}
39611bb76ff1Sjsg 
39621bb76ff1Sjsg 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
39631bb76ff1Sjsg 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
39641bb76ff1Sjsg 		dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
39651bb76ff1Sjsg 		hotplug = true;
39661bb76ff1Sjsg 	}
39671bb76ff1Sjsg 
39681bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
39691bb76ff1Sjsg 
39701bb76ff1Sjsg 	if (dowork)
39711bb76ff1Sjsg 		queue_work(system_long_wq, &mgr->work);
39721bb76ff1Sjsg 	return hotplug;
39731bb76ff1Sjsg }
39741bb76ff1Sjsg 
drm_dp_mst_up_req_work(struct work_struct * work)39751bb76ff1Sjsg static void drm_dp_mst_up_req_work(struct work_struct *work)
39761bb76ff1Sjsg {
39771bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr =
39781bb76ff1Sjsg 		container_of(work, struct drm_dp_mst_topology_mgr,
39791bb76ff1Sjsg 			     up_req_work);
39801bb76ff1Sjsg 	struct drm_dp_pending_up_req *up_req;
39811bb76ff1Sjsg 	bool send_hotplug = false;
39821bb76ff1Sjsg 
39831bb76ff1Sjsg 	mutex_lock(&mgr->probe_lock);
39841bb76ff1Sjsg 	while (true) {
39851bb76ff1Sjsg 		mutex_lock(&mgr->up_req_lock);
39861bb76ff1Sjsg 		up_req = list_first_entry_or_null(&mgr->up_req_list,
39871bb76ff1Sjsg 						  struct drm_dp_pending_up_req,
39881bb76ff1Sjsg 						  next);
39891bb76ff1Sjsg 		if (up_req)
39901bb76ff1Sjsg 			list_del(&up_req->next);
39911bb76ff1Sjsg 		mutex_unlock(&mgr->up_req_lock);
39921bb76ff1Sjsg 
39931bb76ff1Sjsg 		if (!up_req)
39941bb76ff1Sjsg 			break;
39951bb76ff1Sjsg 
39961bb76ff1Sjsg 		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
39971bb76ff1Sjsg 		kfree(up_req);
39981bb76ff1Sjsg 	}
39991bb76ff1Sjsg 	mutex_unlock(&mgr->probe_lock);
40001bb76ff1Sjsg 
40011bb76ff1Sjsg 	if (send_hotplug)
40021bb76ff1Sjsg 		drm_kms_helper_hotplug_event(mgr->dev);
40031bb76ff1Sjsg }
40041bb76ff1Sjsg 
drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr * mgr)40051bb76ff1Sjsg static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
40061bb76ff1Sjsg {
40071bb76ff1Sjsg 	struct drm_dp_pending_up_req *up_req;
40081bb76ff1Sjsg 
40091bb76ff1Sjsg 	if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
40101bb76ff1Sjsg 		goto out;
40111bb76ff1Sjsg 
40121bb76ff1Sjsg 	if (!mgr->up_req_recv.have_eomt)
40131bb76ff1Sjsg 		return 0;
40141bb76ff1Sjsg 
40151bb76ff1Sjsg 	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
40161bb76ff1Sjsg 	if (!up_req)
40171bb76ff1Sjsg 		return -ENOMEM;
40181bb76ff1Sjsg 
40191bb76ff1Sjsg 	INIT_LIST_HEAD(&up_req->next);
40201bb76ff1Sjsg 
40211bb76ff1Sjsg 	drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
40221bb76ff1Sjsg 
40231bb76ff1Sjsg 	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
40241bb76ff1Sjsg 	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
40251bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
40261bb76ff1Sjsg 			    up_req->msg.req_type);
40271bb76ff1Sjsg 		kfree(up_req);
40281bb76ff1Sjsg 		goto out;
40291bb76ff1Sjsg 	}
40301bb76ff1Sjsg 
40311bb76ff1Sjsg 	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
40321bb76ff1Sjsg 				 false);
40331bb76ff1Sjsg 
40341bb76ff1Sjsg 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
40351bb76ff1Sjsg 		const struct drm_dp_connection_status_notify *conn_stat =
40361bb76ff1Sjsg 			&up_req->msg.u.conn_stat;
4037*0c230191Sjsg 		bool handle_csn;
40381bb76ff1Sjsg 
40391bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
40401bb76ff1Sjsg 			    conn_stat->port_number,
40411bb76ff1Sjsg 			    conn_stat->legacy_device_plug_status,
40421bb76ff1Sjsg 			    conn_stat->displayport_device_plug_status,
40431bb76ff1Sjsg 			    conn_stat->message_capability_status,
40441bb76ff1Sjsg 			    conn_stat->input_port,
40451bb76ff1Sjsg 			    conn_stat->peer_device_type);
4046*0c230191Sjsg 
4047*0c230191Sjsg 		mutex_lock(&mgr->probe_lock);
4048*0c230191Sjsg 		handle_csn = mgr->mst_primary->link_address_sent;
4049*0c230191Sjsg 		mutex_unlock(&mgr->probe_lock);
4050*0c230191Sjsg 
4051*0c230191Sjsg 		if (!handle_csn) {
4052*0c230191Sjsg 			drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
4053*0c230191Sjsg 			kfree(up_req);
4054*0c230191Sjsg 			goto out;
4055*0c230191Sjsg 		}
40561bb76ff1Sjsg 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
40571bb76ff1Sjsg 		const struct drm_dp_resource_status_notify *res_stat =
40581bb76ff1Sjsg 			&up_req->msg.u.resource_stat;
40591bb76ff1Sjsg 
40601bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
40611bb76ff1Sjsg 			    res_stat->port_number,
40621bb76ff1Sjsg 			    res_stat->available_pbn);
40631bb76ff1Sjsg 	}
40641bb76ff1Sjsg 
40651bb76ff1Sjsg 	up_req->hdr = mgr->up_req_recv.initial_hdr;
40661bb76ff1Sjsg 	mutex_lock(&mgr->up_req_lock);
40671bb76ff1Sjsg 	list_add_tail(&up_req->next, &mgr->up_req_list);
40681bb76ff1Sjsg 	mutex_unlock(&mgr->up_req_lock);
40691bb76ff1Sjsg 	queue_work(system_long_wq, &mgr->up_req_work);
40701bb76ff1Sjsg 
40711bb76ff1Sjsg out:
40721bb76ff1Sjsg 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
40731bb76ff1Sjsg 	return 0;
40741bb76ff1Sjsg }
40751bb76ff1Sjsg 
40761bb76ff1Sjsg /**
407783f201fbSjsg  * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
40781bb76ff1Sjsg  * @mgr: manager to notify irq for.
40791bb76ff1Sjsg  * @esi: 4 bytes from SINK_COUNT_ESI
408083f201fbSjsg  * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
40811bb76ff1Sjsg  * @handled: whether the hpd interrupt was consumed or not
40821bb76ff1Sjsg  *
408383f201fbSjsg  * This should be called from the driver when it detects a HPD IRQ,
40841bb76ff1Sjsg  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
408583f201fbSjsg  * topology manager will process the sideband messages received
408683f201fbSjsg  * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
408783f201fbSjsg  * corresponding flags that Driver has to ack the DP receiver later.
408883f201fbSjsg  *
408983f201fbSjsg  * Note that driver shall also call
409083f201fbSjsg  * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
409183f201fbSjsg  * after calling this function, to try to kick off a new request in
409283f201fbSjsg  * the queue if the previous message transaction is completed.
409383f201fbSjsg  *
409483f201fbSjsg  * See also:
409583f201fbSjsg  * drm_dp_mst_hpd_irq_send_new_request()
40961bb76ff1Sjsg  */
drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr * mgr,const u8 * esi,u8 * ack,bool * handled)409783f201fbSjsg int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
409883f201fbSjsg 				    u8 *ack, bool *handled)
40991bb76ff1Sjsg {
41001bb76ff1Sjsg 	int ret = 0;
41011bb76ff1Sjsg 	int sc;
41021bb76ff1Sjsg 	*handled = false;
41031bb76ff1Sjsg 	sc = DP_GET_SINK_COUNT(esi[0]);
41041bb76ff1Sjsg 
41051bb76ff1Sjsg 	if (sc != mgr->sink_count) {
41061bb76ff1Sjsg 		mgr->sink_count = sc;
41071bb76ff1Sjsg 		*handled = true;
41081bb76ff1Sjsg 	}
41091bb76ff1Sjsg 
41101bb76ff1Sjsg 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
41111bb76ff1Sjsg 		ret = drm_dp_mst_handle_down_rep(mgr);
41121bb76ff1Sjsg 		*handled = true;
411383f201fbSjsg 		ack[1] |= DP_DOWN_REP_MSG_RDY;
41141bb76ff1Sjsg 	}
41151bb76ff1Sjsg 
41161bb76ff1Sjsg 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
41171bb76ff1Sjsg 		ret |= drm_dp_mst_handle_up_req(mgr);
41181bb76ff1Sjsg 		*handled = true;
411983f201fbSjsg 		ack[1] |= DP_UP_REQ_MSG_RDY;
41201bb76ff1Sjsg 	}
41211bb76ff1Sjsg 
41221bb76ff1Sjsg 	return ret;
41231bb76ff1Sjsg }
412483f201fbSjsg EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
41251bb76ff1Sjsg 
41261bb76ff1Sjsg /**
412783f201fbSjsg  * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
412883f201fbSjsg  * @mgr: manager to notify irq for.
412983f201fbSjsg  *
413083f201fbSjsg  * This should be called from the driver when mst irq event is handled
413183f201fbSjsg  * and acked. Note that new down request should only be sent when
413283f201fbSjsg  * previous message transaction is completed. Source is not supposed to generate
413383f201fbSjsg  * interleaved message transactions.
413483f201fbSjsg  */
drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr * mgr)413583f201fbSjsg void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
413683f201fbSjsg {
413783f201fbSjsg 	struct drm_dp_sideband_msg_tx *txmsg;
413883f201fbSjsg 	bool kick = true;
413983f201fbSjsg 
414083f201fbSjsg 	mutex_lock(&mgr->qlock);
414183f201fbSjsg 	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
414283f201fbSjsg 					 struct drm_dp_sideband_msg_tx, next);
414383f201fbSjsg 	/* If last transaction is not completed yet*/
414483f201fbSjsg 	if (!txmsg ||
414583f201fbSjsg 	    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
414683f201fbSjsg 	    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
414783f201fbSjsg 		kick = false;
414883f201fbSjsg 	mutex_unlock(&mgr->qlock);
414983f201fbSjsg 
415083f201fbSjsg 	if (kick)
415183f201fbSjsg 		drm_dp_mst_kick_tx(mgr);
415283f201fbSjsg }
415383f201fbSjsg EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
415483f201fbSjsg /**
41551bb76ff1Sjsg  * drm_dp_mst_detect_port() - get connection status for an MST port
41561bb76ff1Sjsg  * @connector: DRM connector for this port
41571bb76ff1Sjsg  * @ctx: The acquisition context to use for grabbing locks
41581bb76ff1Sjsg  * @mgr: manager for this port
41591bb76ff1Sjsg  * @port: pointer to a port
41601bb76ff1Sjsg  *
41611bb76ff1Sjsg  * This returns the current connection state for a port.
41621bb76ff1Sjsg  */
41631bb76ff1Sjsg int
drm_dp_mst_detect_port(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)41641bb76ff1Sjsg drm_dp_mst_detect_port(struct drm_connector *connector,
41651bb76ff1Sjsg 		       struct drm_modeset_acquire_ctx *ctx,
41661bb76ff1Sjsg 		       struct drm_dp_mst_topology_mgr *mgr,
41671bb76ff1Sjsg 		       struct drm_dp_mst_port *port)
41681bb76ff1Sjsg {
41691bb76ff1Sjsg 	int ret;
41701bb76ff1Sjsg 
41711bb76ff1Sjsg 	/* we need to search for the port in the mgr in case it's gone */
41721bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
41731bb76ff1Sjsg 	if (!port)
41741bb76ff1Sjsg 		return connector_status_disconnected;
41751bb76ff1Sjsg 
41761bb76ff1Sjsg 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
41771bb76ff1Sjsg 	if (ret)
41781bb76ff1Sjsg 		goto out;
41791bb76ff1Sjsg 
41801bb76ff1Sjsg 	ret = connector_status_disconnected;
41811bb76ff1Sjsg 
41821bb76ff1Sjsg 	if (!port->ddps)
41831bb76ff1Sjsg 		goto out;
41841bb76ff1Sjsg 
41851bb76ff1Sjsg 	switch (port->pdt) {
41861bb76ff1Sjsg 	case DP_PEER_DEVICE_NONE:
41871bb76ff1Sjsg 		break;
41881bb76ff1Sjsg 	case DP_PEER_DEVICE_MST_BRANCHING:
41891bb76ff1Sjsg 		if (!port->mcs)
41901bb76ff1Sjsg 			ret = connector_status_connected;
41911bb76ff1Sjsg 		break;
41921bb76ff1Sjsg 
41931bb76ff1Sjsg 	case DP_PEER_DEVICE_SST_SINK:
41941bb76ff1Sjsg 		ret = connector_status_connected;
41951bb76ff1Sjsg 		/* for logical ports - cache the EDID */
41961bb76ff1Sjsg 		if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
4197f005ef32Sjsg 			port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
41981bb76ff1Sjsg 		break;
41991bb76ff1Sjsg 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
42001bb76ff1Sjsg 		if (port->ldps)
42011bb76ff1Sjsg 			ret = connector_status_connected;
42021bb76ff1Sjsg 		break;
42031bb76ff1Sjsg 	}
42041bb76ff1Sjsg out:
42051bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
42061bb76ff1Sjsg 	return ret;
42071bb76ff1Sjsg }
42081bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_detect_port);
42091bb76ff1Sjsg 
42101bb76ff1Sjsg /**
4211f005ef32Sjsg  * drm_dp_mst_edid_read() - get EDID for an MST port
42121bb76ff1Sjsg  * @connector: toplevel connector to get EDID for
42131bb76ff1Sjsg  * @mgr: manager for this port
42141bb76ff1Sjsg  * @port: unverified pointer to a port.
42151bb76ff1Sjsg  *
42161bb76ff1Sjsg  * This returns an EDID for the port connected to a connector,
42171bb76ff1Sjsg  * It validates the pointer still exists so the caller doesn't require a
42181bb76ff1Sjsg  * reference.
42191bb76ff1Sjsg  */
drm_dp_mst_edid_read(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4220f005ef32Sjsg const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
4221f005ef32Sjsg 					    struct drm_dp_mst_topology_mgr *mgr,
4222f005ef32Sjsg 					    struct drm_dp_mst_port *port)
42231bb76ff1Sjsg {
4224f005ef32Sjsg 	const struct drm_edid *drm_edid;
42251bb76ff1Sjsg 
42261bb76ff1Sjsg 	/* we need to search for the port in the mgr in case it's gone */
42271bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
42281bb76ff1Sjsg 	if (!port)
42291bb76ff1Sjsg 		return NULL;
42301bb76ff1Sjsg 
42311bb76ff1Sjsg 	if (port->cached_edid)
4232f005ef32Sjsg 		drm_edid = drm_edid_dup(port->cached_edid);
4233f005ef32Sjsg 	else
4234f005ef32Sjsg 		drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
4235f005ef32Sjsg 
42361bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
4237f005ef32Sjsg 
4238f005ef32Sjsg 	return drm_edid;
4239f005ef32Sjsg }
4240f005ef32Sjsg EXPORT_SYMBOL(drm_dp_mst_edid_read);
4241f005ef32Sjsg 
4242f005ef32Sjsg /**
4243f005ef32Sjsg  * drm_dp_mst_get_edid() - get EDID for an MST port
4244f005ef32Sjsg  * @connector: toplevel connector to get EDID for
4245f005ef32Sjsg  * @mgr: manager for this port
4246f005ef32Sjsg  * @port: unverified pointer to a port.
4247f005ef32Sjsg  *
4248f005ef32Sjsg  * This function is deprecated; please use drm_dp_mst_edid_read() instead.
4249f005ef32Sjsg  *
4250f005ef32Sjsg  * This returns an EDID for the port connected to a connector,
4251f005ef32Sjsg  * It validates the pointer still exists so the caller doesn't require a
4252f005ef32Sjsg  * reference.
4253f005ef32Sjsg  */
drm_dp_mst_get_edid(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4254f005ef32Sjsg struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
4255f005ef32Sjsg 				 struct drm_dp_mst_topology_mgr *mgr,
4256f005ef32Sjsg 				 struct drm_dp_mst_port *port)
4257f005ef32Sjsg {
4258f005ef32Sjsg 	const struct drm_edid *drm_edid;
4259f005ef32Sjsg 	struct edid *edid;
4260f005ef32Sjsg 
4261f005ef32Sjsg 	drm_edid = drm_dp_mst_edid_read(connector, mgr, port);
4262f005ef32Sjsg 
4263f005ef32Sjsg 	edid = drm_edid_duplicate(drm_edid_raw(drm_edid));
4264f005ef32Sjsg 
4265f005ef32Sjsg 	drm_edid_free(drm_edid);
4266f005ef32Sjsg 
42671bb76ff1Sjsg 	return edid;
42681bb76ff1Sjsg }
42691bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_get_edid);
42701bb76ff1Sjsg 
42711bb76ff1Sjsg /**
42721bb76ff1Sjsg  * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
42731bb76ff1Sjsg  * @state: global atomic state
42741bb76ff1Sjsg  * @mgr: MST topology manager for the port
42751bb76ff1Sjsg  * @port: port to find time slots for
42761bb76ff1Sjsg  * @pbn: bandwidth required for the mode in PBN
42771bb76ff1Sjsg  *
42781bb76ff1Sjsg  * Allocates time slots to @port, replacing any previous time slot allocations it may
42791bb76ff1Sjsg  * have had. Any atomic drivers which support MST must call this function in
42801bb76ff1Sjsg  * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
42811bb76ff1Sjsg  * change the current time slot allocation for the new state, and ensure the MST
42821bb76ff1Sjsg  * atomic state is added whenever the state of payloads in the topology changes.
42831bb76ff1Sjsg  *
42841bb76ff1Sjsg  * Allocations set by this function are not checked against the bandwidth
42851bb76ff1Sjsg  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
42861bb76ff1Sjsg  *
42871bb76ff1Sjsg  * Additionally, it is OK to call this function multiple times on the same
42881bb76ff1Sjsg  * @port as needed. It is not OK however, to call this function and
42891bb76ff1Sjsg  * drm_dp_atomic_release_time_slots() in the same atomic check phase.
42901bb76ff1Sjsg  *
42911bb76ff1Sjsg  * See also:
42921bb76ff1Sjsg  * drm_dp_atomic_release_time_slots()
42931bb76ff1Sjsg  * drm_dp_mst_atomic_check()
42941bb76ff1Sjsg  *
42951bb76ff1Sjsg  * Returns:
42961bb76ff1Sjsg  * Total slots in the atomic state assigned for this port, or a negative error
42971bb76ff1Sjsg  * code if the port no longer exists
42981bb76ff1Sjsg  */
drm_dp_atomic_find_time_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int pbn)42991bb76ff1Sjsg int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
43001bb76ff1Sjsg 				  struct drm_dp_mst_topology_mgr *mgr,
43011bb76ff1Sjsg 				  struct drm_dp_mst_port *port, int pbn)
43021bb76ff1Sjsg {
43031bb76ff1Sjsg 	struct drm_dp_mst_topology_state *topology_state;
43041bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload = NULL;
43051bb76ff1Sjsg 	struct drm_connector_state *conn_state;
43061bb76ff1Sjsg 	int prev_slots = 0, prev_bw = 0, req_slots;
43071bb76ff1Sjsg 
43081bb76ff1Sjsg 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
43091bb76ff1Sjsg 	if (IS_ERR(topology_state))
43101bb76ff1Sjsg 		return PTR_ERR(topology_state);
43111bb76ff1Sjsg 
43121bb76ff1Sjsg 	conn_state = drm_atomic_get_new_connector_state(state, port->connector);
43131bb76ff1Sjsg 	topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
43141bb76ff1Sjsg 
43151bb76ff1Sjsg 	/* Find the current allocation for this port, if any */
43161bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(topology_state, port);
43171bb76ff1Sjsg 	if (payload) {
43181bb76ff1Sjsg 		prev_slots = payload->time_slots;
43191bb76ff1Sjsg 		prev_bw = payload->pbn;
43201bb76ff1Sjsg 
43211bb76ff1Sjsg 		/*
43221bb76ff1Sjsg 		 * This should never happen, unless the driver tries
43231bb76ff1Sjsg 		 * releasing and allocating the same timeslot allocation,
43241bb76ff1Sjsg 		 * which is an error
43251bb76ff1Sjsg 		 */
43261bb76ff1Sjsg 		if (drm_WARN_ON(mgr->dev, payload->delete)) {
43271bb76ff1Sjsg 			drm_err(mgr->dev,
43281bb76ff1Sjsg 				"cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
43291bb76ff1Sjsg 				port);
43301bb76ff1Sjsg 			return -EINVAL;
43311bb76ff1Sjsg 		}
43321bb76ff1Sjsg 	}
43331bb76ff1Sjsg 
43341bb76ff1Sjsg 	req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
43351bb76ff1Sjsg 
43361bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
43371bb76ff1Sjsg 		       port->connector->base.id, port->connector->name,
43381bb76ff1Sjsg 		       port, prev_slots, req_slots);
43391bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
43401bb76ff1Sjsg 		       port->connector->base.id, port->connector->name,
43411bb76ff1Sjsg 		       port, prev_bw, pbn);
43421bb76ff1Sjsg 
43431bb76ff1Sjsg 	/* Add the new allocation to the state, note the VCPI isn't assigned until the end */
43441bb76ff1Sjsg 	if (!payload) {
43451bb76ff1Sjsg 		payload = kzalloc(sizeof(*payload), GFP_KERNEL);
43461bb76ff1Sjsg 		if (!payload)
43471bb76ff1Sjsg 			return -ENOMEM;
43481bb76ff1Sjsg 
43491bb76ff1Sjsg 		drm_dp_mst_get_port_malloc(port);
43501bb76ff1Sjsg 		payload->port = port;
43511bb76ff1Sjsg 		payload->vc_start_slot = -1;
43521bb76ff1Sjsg 		list_add(&payload->next, &topology_state->payloads);
43531bb76ff1Sjsg 	}
43541bb76ff1Sjsg 	payload->time_slots = req_slots;
43551bb76ff1Sjsg 	payload->pbn = pbn;
43561bb76ff1Sjsg 
43571bb76ff1Sjsg 	return req_slots;
43581bb76ff1Sjsg }
43591bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
43601bb76ff1Sjsg 
43611bb76ff1Sjsg /**
43621bb76ff1Sjsg  * drm_dp_atomic_release_time_slots() - Release allocated time slots
43631bb76ff1Sjsg  * @state: global atomic state
43641bb76ff1Sjsg  * @mgr: MST topology manager for the port
43651bb76ff1Sjsg  * @port: The port to release the time slots from
43661bb76ff1Sjsg  *
43671bb76ff1Sjsg  * Releases any time slots that have been allocated to a port in the atomic
43681bb76ff1Sjsg  * state. Any atomic drivers which support MST must call this function
43691bb76ff1Sjsg  * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
43701bb76ff1Sjsg  * This helper will check whether time slots would be released by the new state and
43711bb76ff1Sjsg  * respond accordingly, along with ensuring the MST state is always added to the
43721bb76ff1Sjsg  * atomic state whenever a new state would modify the state of payloads on the
43731bb76ff1Sjsg  * topology.
43741bb76ff1Sjsg  *
43751bb76ff1Sjsg  * It is OK to call this even if @port has been removed from the system.
43761bb76ff1Sjsg  * Additionally, it is OK to call this function multiple times on the same
43771bb76ff1Sjsg  * @port as needed. It is not OK however, to call this function and
43781bb76ff1Sjsg  * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
43791bb76ff1Sjsg  * phase.
43801bb76ff1Sjsg  *
43811bb76ff1Sjsg  * See also:
43821bb76ff1Sjsg  * drm_dp_atomic_find_time_slots()
43831bb76ff1Sjsg  * drm_dp_mst_atomic_check()
43841bb76ff1Sjsg  *
43851bb76ff1Sjsg  * Returns:
43861bb76ff1Sjsg  * 0 on success, negative error code otherwise
43871bb76ff1Sjsg  */
drm_dp_atomic_release_time_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)43881bb76ff1Sjsg int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
43891bb76ff1Sjsg 				     struct drm_dp_mst_topology_mgr *mgr,
43901bb76ff1Sjsg 				     struct drm_dp_mst_port *port)
43911bb76ff1Sjsg {
43921bb76ff1Sjsg 	struct drm_dp_mst_topology_state *topology_state;
43931bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
43941bb76ff1Sjsg 	struct drm_connector_state *old_conn_state, *new_conn_state;
43951bb76ff1Sjsg 	bool update_payload = true;
43961bb76ff1Sjsg 
43971bb76ff1Sjsg 	old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
43981bb76ff1Sjsg 	if (!old_conn_state->crtc)
43991bb76ff1Sjsg 		return 0;
44001bb76ff1Sjsg 
44011bb76ff1Sjsg 	/* If the CRTC isn't disabled by this state, don't release it's payload */
44021bb76ff1Sjsg 	new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
44031bb76ff1Sjsg 	if (new_conn_state->crtc) {
44041bb76ff1Sjsg 		struct drm_crtc_state *crtc_state =
44051bb76ff1Sjsg 			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
44061bb76ff1Sjsg 
44071bb76ff1Sjsg 		/* No modeset means no payload changes, so it's safe to not pull in the MST state */
44081bb76ff1Sjsg 		if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
44091bb76ff1Sjsg 			return 0;
44101bb76ff1Sjsg 
44111bb76ff1Sjsg 		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
44121bb76ff1Sjsg 			update_payload = false;
44131bb76ff1Sjsg 	}
44141bb76ff1Sjsg 
44151bb76ff1Sjsg 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
44161bb76ff1Sjsg 	if (IS_ERR(topology_state))
44171bb76ff1Sjsg 		return PTR_ERR(topology_state);
44181bb76ff1Sjsg 
44191bb76ff1Sjsg 	topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
44201bb76ff1Sjsg 	if (!update_payload)
44211bb76ff1Sjsg 		return 0;
44221bb76ff1Sjsg 
44231bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(topology_state, port);
44241bb76ff1Sjsg 	if (WARN_ON(!payload)) {
44251bb76ff1Sjsg 		drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
44261bb76ff1Sjsg 			port, &topology_state->base);
44271bb76ff1Sjsg 		return -EINVAL;
44281bb76ff1Sjsg 	}
44291bb76ff1Sjsg 
44301bb76ff1Sjsg 	if (new_conn_state->crtc)
44311bb76ff1Sjsg 		return 0;
44321bb76ff1Sjsg 
44331bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
44341bb76ff1Sjsg 	if (!payload->delete) {
44351bb76ff1Sjsg 		payload->pbn = 0;
44361bb76ff1Sjsg 		payload->delete = true;
44371bb76ff1Sjsg 		topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
44381bb76ff1Sjsg 	}
44391bb76ff1Sjsg 
44401bb76ff1Sjsg 	return 0;
44411bb76ff1Sjsg }
44421bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
44431bb76ff1Sjsg 
44441bb76ff1Sjsg /**
44451bb76ff1Sjsg  * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
44461bb76ff1Sjsg  * @state: global atomic state
44471bb76ff1Sjsg  *
44481bb76ff1Sjsg  * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
44491bb76ff1Sjsg  * currently assigned to an MST topology. Drivers must call this hook from their
44501bb76ff1Sjsg  * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
44511bb76ff1Sjsg  *
44521bb76ff1Sjsg  * Returns:
44531bb76ff1Sjsg  * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
44541bb76ff1Sjsg  */
drm_dp_mst_atomic_setup_commit(struct drm_atomic_state * state)44551bb76ff1Sjsg int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
44561bb76ff1Sjsg {
44571bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr;
44581bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
44591bb76ff1Sjsg 	struct drm_crtc *crtc;
44601bb76ff1Sjsg 	struct drm_crtc_state *crtc_state;
44611bb76ff1Sjsg 	int i, j, commit_idx, num_commit_deps;
44621bb76ff1Sjsg 
44631bb76ff1Sjsg 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
44641bb76ff1Sjsg 		if (!mst_state->pending_crtc_mask)
44651bb76ff1Sjsg 			continue;
44661bb76ff1Sjsg 
44671bb76ff1Sjsg 		num_commit_deps = hweight32(mst_state->pending_crtc_mask);
44681bb76ff1Sjsg 		mst_state->commit_deps = kmalloc_array(num_commit_deps,
44691bb76ff1Sjsg 						       sizeof(*mst_state->commit_deps), GFP_KERNEL);
44701bb76ff1Sjsg 		if (!mst_state->commit_deps)
44711bb76ff1Sjsg 			return -ENOMEM;
44721bb76ff1Sjsg 		mst_state->num_commit_deps = num_commit_deps;
44731bb76ff1Sjsg 
44741bb76ff1Sjsg 		commit_idx = 0;
44751bb76ff1Sjsg 		for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
44761bb76ff1Sjsg 			if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
44771bb76ff1Sjsg 				mst_state->commit_deps[commit_idx++] =
44781bb76ff1Sjsg 					drm_crtc_commit_get(crtc_state->commit);
44791bb76ff1Sjsg 			}
44801bb76ff1Sjsg 		}
44811bb76ff1Sjsg 	}
44821bb76ff1Sjsg 
44831bb76ff1Sjsg 	return 0;
44841bb76ff1Sjsg }
44851bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
44861bb76ff1Sjsg 
44871bb76ff1Sjsg /**
44881bb76ff1Sjsg  * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
44891bb76ff1Sjsg  * prepare new MST state for commit
44901bb76ff1Sjsg  * @state: global atomic state
44911bb76ff1Sjsg  *
44921bb76ff1Sjsg  * Goes through any MST topologies in this atomic state, and waits for any pending commits which
44931bb76ff1Sjsg  * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
44941bb76ff1Sjsg  * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
44951bb76ff1Sjsg  * with eachother by forcing them to be executed sequentially in situations where the only resources
44961bb76ff1Sjsg  * the modeset objects in these commits share are an MST topology.
44971bb76ff1Sjsg  *
44981bb76ff1Sjsg  * This function also prepares the new MST state for commit by performing some state preparation
44991bb76ff1Sjsg  * which can't be done until this point, such as reading back the final VC start slots (which are
45001bb76ff1Sjsg  * determined at commit-time) from the previous state.
45011bb76ff1Sjsg  *
45021bb76ff1Sjsg  * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
45031bb76ff1Sjsg  * or whatever their equivalent of that is.
45041bb76ff1Sjsg  */
drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state * state)45051bb76ff1Sjsg void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
45061bb76ff1Sjsg {
45071bb76ff1Sjsg 	struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
45081bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr;
45091bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
45101bb76ff1Sjsg 	int i, j, ret;
45111bb76ff1Sjsg 
45121bb76ff1Sjsg 	for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
45131bb76ff1Sjsg 		for (j = 0; j < old_mst_state->num_commit_deps; j++) {
45141bb76ff1Sjsg 			ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
45151bb76ff1Sjsg 			if (ret < 0)
45161bb76ff1Sjsg 				drm_err(state->dev, "Failed to wait for %s: %d\n",
45171bb76ff1Sjsg 					old_mst_state->commit_deps[j]->crtc->name, ret);
45181bb76ff1Sjsg 		}
45191bb76ff1Sjsg 
45201bb76ff1Sjsg 		/* Now that previous state is committed, it's safe to copy over the start slot
45211bb76ff1Sjsg 		 * assignments
45221bb76ff1Sjsg 		 */
45231bb76ff1Sjsg 		list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
45241bb76ff1Sjsg 			if (old_payload->delete)
45251bb76ff1Sjsg 				continue;
45261bb76ff1Sjsg 
45271bb76ff1Sjsg 			new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
45281bb76ff1Sjsg 								       old_payload->port);
45291bb76ff1Sjsg 			new_payload->vc_start_slot = old_payload->vc_start_slot;
45301bb76ff1Sjsg 		}
45311bb76ff1Sjsg 	}
45321bb76ff1Sjsg }
45331bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
45341bb76ff1Sjsg 
45351bb76ff1Sjsg /**
45361bb76ff1Sjsg  * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
45371bb76ff1Sjsg  * in SST mode
45381bb76ff1Sjsg  * @new_conn_state: The new connector state of the &drm_connector
45391bb76ff1Sjsg  * @mgr: The MST topology manager for the &drm_connector
45401bb76ff1Sjsg  *
45411bb76ff1Sjsg  * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
45421bb76ff1Sjsg  * serialize non-blocking commits happening on the real DP connector of an MST topology switching
45431bb76ff1Sjsg  * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
45441bb76ff1Sjsg  * MST topology will never share the same &drm_encoder.
45451bb76ff1Sjsg  *
45461bb76ff1Sjsg  * This function takes care of this serialization issue, by checking a root MST connector's atomic
45471bb76ff1Sjsg  * state to determine if it is about to have a modeset - and then pulling in the MST topology state
45481bb76ff1Sjsg  * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
45491bb76ff1Sjsg  *
45501bb76ff1Sjsg  * Drivers implementing MST must call this function from the
45511bb76ff1Sjsg  * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
45521bb76ff1Sjsg  * driving MST sinks.
45531bb76ff1Sjsg  *
45541bb76ff1Sjsg  * Returns:
45551bb76ff1Sjsg  * 0 on success, negative error code otherwise
45561bb76ff1Sjsg  */
drm_dp_mst_root_conn_atomic_check(struct drm_connector_state * new_conn_state,struct drm_dp_mst_topology_mgr * mgr)45571bb76ff1Sjsg int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
45581bb76ff1Sjsg 				      struct drm_dp_mst_topology_mgr *mgr)
45591bb76ff1Sjsg {
45601bb76ff1Sjsg 	struct drm_atomic_state *state = new_conn_state->state;
45611bb76ff1Sjsg 	struct drm_connector_state *old_conn_state =
45621bb76ff1Sjsg 		drm_atomic_get_old_connector_state(state, new_conn_state->connector);
45631bb76ff1Sjsg 	struct drm_crtc_state *crtc_state;
45641bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state = NULL;
45651bb76ff1Sjsg 
45661bb76ff1Sjsg 	if (new_conn_state->crtc) {
45671bb76ff1Sjsg 		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
45681bb76ff1Sjsg 		if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
45691bb76ff1Sjsg 			mst_state = drm_atomic_get_mst_topology_state(state, mgr);
45701bb76ff1Sjsg 			if (IS_ERR(mst_state))
45711bb76ff1Sjsg 				return PTR_ERR(mst_state);
45721bb76ff1Sjsg 
45731bb76ff1Sjsg 			mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
45741bb76ff1Sjsg 		}
45751bb76ff1Sjsg 	}
45761bb76ff1Sjsg 
45771bb76ff1Sjsg 	if (old_conn_state->crtc) {
45781bb76ff1Sjsg 		crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
45791bb76ff1Sjsg 		if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
45801bb76ff1Sjsg 			if (!mst_state) {
45811bb76ff1Sjsg 				mst_state = drm_atomic_get_mst_topology_state(state, mgr);
45821bb76ff1Sjsg 				if (IS_ERR(mst_state))
45831bb76ff1Sjsg 					return PTR_ERR(mst_state);
45841bb76ff1Sjsg 			}
45851bb76ff1Sjsg 
45861bb76ff1Sjsg 			mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
45871bb76ff1Sjsg 		}
45881bb76ff1Sjsg 	}
45891bb76ff1Sjsg 
45901bb76ff1Sjsg 	return 0;
45911bb76ff1Sjsg }
45921bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
45931bb76ff1Sjsg 
45941bb76ff1Sjsg /**
45951bb76ff1Sjsg  * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
45961bb76ff1Sjsg  * @mst_state: mst_state to update
45971bb76ff1Sjsg  * @link_encoding_cap: the ecoding format on the link
45981bb76ff1Sjsg  */
drm_dp_mst_update_slots(struct drm_dp_mst_topology_state * mst_state,uint8_t link_encoding_cap)45991bb76ff1Sjsg void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
46001bb76ff1Sjsg {
46011bb76ff1Sjsg 	if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
46021bb76ff1Sjsg 		mst_state->total_avail_slots = 64;
46031bb76ff1Sjsg 		mst_state->start_slot = 0;
46041bb76ff1Sjsg 	} else {
46051bb76ff1Sjsg 		mst_state->total_avail_slots = 63;
46061bb76ff1Sjsg 		mst_state->start_slot = 1;
46071bb76ff1Sjsg 	}
46081bb76ff1Sjsg 
46091bb76ff1Sjsg 	DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
46101bb76ff1Sjsg 		      (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
46111bb76ff1Sjsg 		      mst_state);
46121bb76ff1Sjsg }
46131bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_update_slots);
46141bb76ff1Sjsg 
drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr * mgr,int id,u8 start_slot,u8 num_slots)46151bb76ff1Sjsg static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
46161bb76ff1Sjsg 				     int id, u8 start_slot, u8 num_slots)
46171bb76ff1Sjsg {
46181bb76ff1Sjsg 	u8 payload_alloc[3], status;
46191bb76ff1Sjsg 	int ret;
46201bb76ff1Sjsg 	int retries = 0;
46211bb76ff1Sjsg 
46221bb76ff1Sjsg 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
46231bb76ff1Sjsg 			   DP_PAYLOAD_TABLE_UPDATED);
46241bb76ff1Sjsg 
46251bb76ff1Sjsg 	payload_alloc[0] = id;
46261bb76ff1Sjsg 	payload_alloc[1] = start_slot;
46271bb76ff1Sjsg 	payload_alloc[2] = num_slots;
46281bb76ff1Sjsg 
46291bb76ff1Sjsg 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
46301bb76ff1Sjsg 	if (ret != 3) {
46311bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
46321bb76ff1Sjsg 		goto fail;
46331bb76ff1Sjsg 	}
46341bb76ff1Sjsg 
46351bb76ff1Sjsg retry:
46361bb76ff1Sjsg 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
46371bb76ff1Sjsg 	if (ret < 0) {
46381bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
46391bb76ff1Sjsg 		goto fail;
46401bb76ff1Sjsg 	}
46411bb76ff1Sjsg 
46421bb76ff1Sjsg 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
46431bb76ff1Sjsg 		retries++;
46441bb76ff1Sjsg 		if (retries < 20) {
46451bb76ff1Sjsg 			usleep_range(10000, 20000);
46461bb76ff1Sjsg 			goto retry;
46471bb76ff1Sjsg 		}
46481bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
46491bb76ff1Sjsg 			    status);
46501bb76ff1Sjsg 		ret = -EINVAL;
46511bb76ff1Sjsg 		goto fail;
46521bb76ff1Sjsg 	}
46531bb76ff1Sjsg 	ret = 0;
46541bb76ff1Sjsg fail:
46551bb76ff1Sjsg 	return ret;
46561bb76ff1Sjsg }
46571bb76ff1Sjsg 
do_get_act_status(struct drm_dp_aux * aux)46581bb76ff1Sjsg static int do_get_act_status(struct drm_dp_aux *aux)
46591bb76ff1Sjsg {
46601bb76ff1Sjsg 	int ret;
46611bb76ff1Sjsg 	u8 status;
46621bb76ff1Sjsg 
46631bb76ff1Sjsg 	ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
46641bb76ff1Sjsg 	if (ret < 0)
46651bb76ff1Sjsg 		return ret;
46661bb76ff1Sjsg 
46671bb76ff1Sjsg 	return status;
46681bb76ff1Sjsg }
46691bb76ff1Sjsg 
46701bb76ff1Sjsg /**
46711bb76ff1Sjsg  * drm_dp_check_act_status() - Polls for ACT handled status.
46721bb76ff1Sjsg  * @mgr: manager to use
46731bb76ff1Sjsg  *
46741bb76ff1Sjsg  * Tries waiting for the MST hub to finish updating it's payload table by
46751bb76ff1Sjsg  * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
46761bb76ff1Sjsg  * take that long).
46771bb76ff1Sjsg  *
46781bb76ff1Sjsg  * Returns:
46791bb76ff1Sjsg  * 0 if the ACT was handled in time, negative error code on failure.
46801bb76ff1Sjsg  */
drm_dp_check_act_status(struct drm_dp_mst_topology_mgr * mgr)46811bb76ff1Sjsg int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
46821bb76ff1Sjsg {
46831bb76ff1Sjsg 	/*
46841bb76ff1Sjsg 	 * There doesn't seem to be any recommended retry count or timeout in
46851bb76ff1Sjsg 	 * the MST specification. Since some hubs have been observed to take
46861bb76ff1Sjsg 	 * over 1 second to update their payload allocations under certain
46871bb76ff1Sjsg 	 * conditions, we use a rather large timeout value.
46881bb76ff1Sjsg 	 */
46891bb76ff1Sjsg 	const int timeout_ms = 3000;
46901bb76ff1Sjsg 	int ret, status;
46911bb76ff1Sjsg 
46921bb76ff1Sjsg 	ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
46931bb76ff1Sjsg 				 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
46941bb76ff1Sjsg 				 200, timeout_ms * USEC_PER_MSEC);
46951bb76ff1Sjsg 	if (ret < 0 && status >= 0) {
46961bb76ff1Sjsg 		drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
46971bb76ff1Sjsg 			timeout_ms, status);
46981bb76ff1Sjsg 		return -EINVAL;
46991bb76ff1Sjsg 	} else if (status < 0) {
47001bb76ff1Sjsg 		/*
47011bb76ff1Sjsg 		 * Failure here isn't unexpected - the hub may have
47021bb76ff1Sjsg 		 * just been unplugged
47031bb76ff1Sjsg 		 */
47041bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
47051bb76ff1Sjsg 		return status;
47061bb76ff1Sjsg 	}
47071bb76ff1Sjsg 
47081bb76ff1Sjsg 	return 0;
47091bb76ff1Sjsg }
47101bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_check_act_status);
47111bb76ff1Sjsg 
47121bb76ff1Sjsg /**
47131bb76ff1Sjsg  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4714065458a7Sjsg  * @clock: dot clock
4715065458a7Sjsg  * @bpp: bpp as .4 binary fixed point
47161bb76ff1Sjsg  *
47171bb76ff1Sjsg  * This uses the formula in the spec to calculate the PBN value for a mode.
47181bb76ff1Sjsg  */
drm_dp_calc_pbn_mode(int clock,int bpp)4719065458a7Sjsg int drm_dp_calc_pbn_mode(int clock, int bpp)
47201bb76ff1Sjsg {
47211bb76ff1Sjsg 	/*
47221bb76ff1Sjsg 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
47231bb76ff1Sjsg 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
47241bb76ff1Sjsg 	 * common multiplier to render an integer PBN for all link rate/lane
47251bb76ff1Sjsg 	 * counts combinations
47261bb76ff1Sjsg 	 * calculate
47271bb76ff1Sjsg 	 * peak_kbps *= (1006/1000)
47281bb76ff1Sjsg 	 * peak_kbps *= (64/54)
47291bb76ff1Sjsg 	 * peak_kbps *= 8    convert to bytes
47301bb76ff1Sjsg 	 */
4731065458a7Sjsg 	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
4732065458a7Sjsg 				1000 * 8 * 54 * 1000);
47331bb76ff1Sjsg }
47341bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
47351bb76ff1Sjsg 
47361bb76ff1Sjsg /* we want to kick the TX after we've ack the up/down IRQs. */
drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr * mgr)47371bb76ff1Sjsg static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
47381bb76ff1Sjsg {
47391bb76ff1Sjsg 	queue_work(system_long_wq, &mgr->tx_work);
47401bb76ff1Sjsg }
47411bb76ff1Sjsg 
47421bb76ff1Sjsg /*
47431bb76ff1Sjsg  * Helper function for parsing DP device types into convenient strings
47441bb76ff1Sjsg  * for use with dp_mst_topology
47451bb76ff1Sjsg  */
pdt_to_string(u8 pdt)47461bb76ff1Sjsg static const char *pdt_to_string(u8 pdt)
47471bb76ff1Sjsg {
47481bb76ff1Sjsg 	switch (pdt) {
47491bb76ff1Sjsg 	case DP_PEER_DEVICE_NONE:
47501bb76ff1Sjsg 		return "NONE";
47511bb76ff1Sjsg 	case DP_PEER_DEVICE_SOURCE_OR_SST:
47521bb76ff1Sjsg 		return "SOURCE OR SST";
47531bb76ff1Sjsg 	case DP_PEER_DEVICE_MST_BRANCHING:
47541bb76ff1Sjsg 		return "MST BRANCHING";
47551bb76ff1Sjsg 	case DP_PEER_DEVICE_SST_SINK:
47561bb76ff1Sjsg 		return "SST SINK";
47571bb76ff1Sjsg 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
47581bb76ff1Sjsg 		return "DP LEGACY CONV";
47591bb76ff1Sjsg 	default:
47601bb76ff1Sjsg 		return "ERR";
47611bb76ff1Sjsg 	}
47621bb76ff1Sjsg }
47631bb76ff1Sjsg 
drm_dp_mst_dump_mstb(struct seq_file * m,struct drm_dp_mst_branch * mstb)47641bb76ff1Sjsg static void drm_dp_mst_dump_mstb(struct seq_file *m,
47651bb76ff1Sjsg 				 struct drm_dp_mst_branch *mstb)
47661bb76ff1Sjsg {
47671bb76ff1Sjsg 	struct drm_dp_mst_port *port;
47681bb76ff1Sjsg 	int tabs = mstb->lct;
47691bb76ff1Sjsg 	char prefix[10];
47701bb76ff1Sjsg 	int i;
47711bb76ff1Sjsg 
47721bb76ff1Sjsg 	for (i = 0; i < tabs; i++)
47731bb76ff1Sjsg 		prefix[i] = '\t';
47741bb76ff1Sjsg 	prefix[i] = '\0';
47751bb76ff1Sjsg 
47761bb76ff1Sjsg 	seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
47771bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
47781bb76ff1Sjsg 		seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
47791bb76ff1Sjsg 			   prefix,
47801bb76ff1Sjsg 			   port->port_num,
47811bb76ff1Sjsg 			   port,
47821bb76ff1Sjsg 			   port->input ? "input" : "output",
47831bb76ff1Sjsg 			   pdt_to_string(port->pdt),
47841bb76ff1Sjsg 			   port->ddps,
47851bb76ff1Sjsg 			   port->ldps,
47861bb76ff1Sjsg 			   port->num_sdp_streams,
47871bb76ff1Sjsg 			   port->num_sdp_stream_sinks,
47881bb76ff1Sjsg 			   port->fec_capable ? "true" : "false",
47891bb76ff1Sjsg 			   port->connector);
47901bb76ff1Sjsg 		if (port->mstb)
47911bb76ff1Sjsg 			drm_dp_mst_dump_mstb(m, port->mstb);
47921bb76ff1Sjsg 	}
47931bb76ff1Sjsg }
47941bb76ff1Sjsg 
47951bb76ff1Sjsg #define DP_PAYLOAD_TABLE_SIZE		64
47961bb76ff1Sjsg 
dump_dp_payload_table(struct drm_dp_mst_topology_mgr * mgr,char * buf)47971bb76ff1Sjsg static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
47981bb76ff1Sjsg 				  char *buf)
47991bb76ff1Sjsg {
48001bb76ff1Sjsg 	int i;
48011bb76ff1Sjsg 
48021bb76ff1Sjsg 	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
48031bb76ff1Sjsg 		if (drm_dp_dpcd_read(mgr->aux,
48041bb76ff1Sjsg 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
48051bb76ff1Sjsg 				     &buf[i], 16) != 16)
48061bb76ff1Sjsg 			return false;
48071bb76ff1Sjsg 	}
48081bb76ff1Sjsg 	return true;
48091bb76ff1Sjsg }
48101bb76ff1Sjsg 
fetch_monitor_name(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,char * name,int namelen)48111bb76ff1Sjsg static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
48121bb76ff1Sjsg 			       struct drm_dp_mst_port *port, char *name,
48131bb76ff1Sjsg 			       int namelen)
48141bb76ff1Sjsg {
48151bb76ff1Sjsg 	struct edid *mst_edid;
48161bb76ff1Sjsg 
48171bb76ff1Sjsg 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
48181bb76ff1Sjsg 	drm_edid_get_monitor_name(mst_edid, name, namelen);
48191bb76ff1Sjsg 	kfree(mst_edid);
48201bb76ff1Sjsg }
48211bb76ff1Sjsg 
48221bb76ff1Sjsg /**
48231bb76ff1Sjsg  * drm_dp_mst_dump_topology(): dump topology to seq file.
48241bb76ff1Sjsg  * @m: seq_file to dump output to
48251bb76ff1Sjsg  * @mgr: manager to dump current topology for.
48261bb76ff1Sjsg  *
48271bb76ff1Sjsg  * helper to dump MST topology to a seq file for debugfs.
48281bb76ff1Sjsg  */
drm_dp_mst_dump_topology(struct seq_file * m,struct drm_dp_mst_topology_mgr * mgr)48291bb76ff1Sjsg void drm_dp_mst_dump_topology(struct seq_file *m,
48301bb76ff1Sjsg 			      struct drm_dp_mst_topology_mgr *mgr)
48311bb76ff1Sjsg {
48321bb76ff1Sjsg 	struct drm_dp_mst_topology_state *state;
48331bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
48341bb76ff1Sjsg 	int i, ret;
48351bb76ff1Sjsg 
48361bb76ff1Sjsg 	mutex_lock(&mgr->lock);
48371bb76ff1Sjsg 	if (mgr->mst_primary)
48381bb76ff1Sjsg 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
48391bb76ff1Sjsg 
48401bb76ff1Sjsg 	/* dump VCPIs */
48411bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
48421bb76ff1Sjsg 
48431bb76ff1Sjsg 	ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
48441bb76ff1Sjsg 	if (ret < 0)
48451bb76ff1Sjsg 		return;
48461bb76ff1Sjsg 
48471bb76ff1Sjsg 	state = to_drm_dp_mst_topology_state(mgr->base.state);
48481bb76ff1Sjsg 	seq_printf(m, "\n*** Atomic state info ***\n");
48491bb76ff1Sjsg 	seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
48501bb76ff1Sjsg 		   state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
48511bb76ff1Sjsg 
48521bb76ff1Sjsg 	seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc |     sink name     |\n");
48531bb76ff1Sjsg 	for (i = 0; i < mgr->max_payloads; i++) {
48541bb76ff1Sjsg 		list_for_each_entry(payload, &state->payloads, next) {
48551bb76ff1Sjsg 			char name[14];
48561bb76ff1Sjsg 
48571bb76ff1Sjsg 			if (payload->vcpi != i || payload->delete)
48581bb76ff1Sjsg 				continue;
48591bb76ff1Sjsg 
48601bb76ff1Sjsg 			fetch_monitor_name(mgr, payload->port, name, sizeof(name));
48611bb76ff1Sjsg 			seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
48621bb76ff1Sjsg 				   i,
48631bb76ff1Sjsg 				   payload->port->port_num,
48641bb76ff1Sjsg 				   payload->vcpi,
48651bb76ff1Sjsg 				   payload->vc_start_slot,
48661bb76ff1Sjsg 				   payload->vc_start_slot + payload->time_slots - 1,
48671bb76ff1Sjsg 				   payload->pbn,
48681bb76ff1Sjsg 				   payload->dsc_enabled ? "Y" : "N",
48691bb76ff1Sjsg 				   (*name != 0) ? name : "Unknown");
48701bb76ff1Sjsg 		}
48711bb76ff1Sjsg 	}
48721bb76ff1Sjsg 
48731bb76ff1Sjsg 	seq_printf(m, "\n*** DPCD Info ***\n");
48741bb76ff1Sjsg 	mutex_lock(&mgr->lock);
48751bb76ff1Sjsg 	if (mgr->mst_primary) {
48761bb76ff1Sjsg 		u8 buf[DP_PAYLOAD_TABLE_SIZE];
48771bb76ff1Sjsg 		int ret;
48781bb76ff1Sjsg 
48791bb76ff1Sjsg 		if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
48801bb76ff1Sjsg 			seq_printf(m, "dpcd read failed\n");
48811bb76ff1Sjsg 			goto out;
48821bb76ff1Sjsg 		}
48831bb76ff1Sjsg 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
48841bb76ff1Sjsg 
48851bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
48861bb76ff1Sjsg 		if (ret != 2) {
48871bb76ff1Sjsg 			seq_printf(m, "faux/mst read failed\n");
48881bb76ff1Sjsg 			goto out;
48891bb76ff1Sjsg 		}
48901bb76ff1Sjsg 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
48911bb76ff1Sjsg 
48921bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
48931bb76ff1Sjsg 		if (ret != 1) {
48941bb76ff1Sjsg 			seq_printf(m, "mst ctrl read failed\n");
48951bb76ff1Sjsg 			goto out;
48961bb76ff1Sjsg 		}
48971bb76ff1Sjsg 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
48981bb76ff1Sjsg 
48991bb76ff1Sjsg 		/* dump the standard OUI branch header */
49001bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
49011bb76ff1Sjsg 		if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
49021bb76ff1Sjsg 			seq_printf(m, "branch oui read failed\n");
49031bb76ff1Sjsg 			goto out;
49041bb76ff1Sjsg 		}
49051bb76ff1Sjsg 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
49061bb76ff1Sjsg 
49071bb76ff1Sjsg 		for (i = 0x3; i < 0x8 && buf[i]; i++)
49081bb76ff1Sjsg 			seq_printf(m, "%c", buf[i]);
49091bb76ff1Sjsg 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
49101bb76ff1Sjsg 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
49111bb76ff1Sjsg 		if (dump_dp_payload_table(mgr, buf))
49121bb76ff1Sjsg 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
49131bb76ff1Sjsg 	}
49141bb76ff1Sjsg 
49151bb76ff1Sjsg out:
49161bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
49171bb76ff1Sjsg 	drm_modeset_unlock(&mgr->base.lock);
49181bb76ff1Sjsg }
49191bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_dump_topology);
49201bb76ff1Sjsg 
drm_dp_tx_work(struct work_struct * work)49211bb76ff1Sjsg static void drm_dp_tx_work(struct work_struct *work)
49221bb76ff1Sjsg {
49231bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
49241bb76ff1Sjsg 
49251bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
49261bb76ff1Sjsg 	if (!list_empty(&mgr->tx_msg_downq))
49271bb76ff1Sjsg 		process_single_down_tx_qlock(mgr);
49281bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
49291bb76ff1Sjsg }
49301bb76ff1Sjsg 
49311bb76ff1Sjsg static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port * port)49321bb76ff1Sjsg drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
49331bb76ff1Sjsg {
49341bb76ff1Sjsg 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
49351bb76ff1Sjsg 
49361bb76ff1Sjsg 	if (port->connector) {
49371bb76ff1Sjsg 		drm_connector_unregister(port->connector);
49381bb76ff1Sjsg 		drm_connector_put(port->connector);
49391bb76ff1Sjsg 	}
49401bb76ff1Sjsg 
49411bb76ff1Sjsg 	drm_dp_mst_put_port_malloc(port);
49421bb76ff1Sjsg }
49431bb76ff1Sjsg 
49441bb76ff1Sjsg static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch * mstb)49451bb76ff1Sjsg drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
49461bb76ff1Sjsg {
49471bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
49481bb76ff1Sjsg 	struct drm_dp_mst_port *port, *port_tmp;
49491bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
49501bb76ff1Sjsg 	bool wake_tx = false;
49511bb76ff1Sjsg 
49521bb76ff1Sjsg 	mutex_lock(&mgr->lock);
49531bb76ff1Sjsg 	list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
49541bb76ff1Sjsg 		list_del(&port->next);
49551bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
49561bb76ff1Sjsg 	}
49571bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
49581bb76ff1Sjsg 
49591bb76ff1Sjsg 	/* drop any tx slot msg */
49601bb76ff1Sjsg 	mutex_lock(&mstb->mgr->qlock);
49611bb76ff1Sjsg 	list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
49621bb76ff1Sjsg 		if (txmsg->dst != mstb)
49631bb76ff1Sjsg 			continue;
49641bb76ff1Sjsg 
49651bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
49661bb76ff1Sjsg 		list_del(&txmsg->next);
49671bb76ff1Sjsg 		wake_tx = true;
49681bb76ff1Sjsg 	}
49691bb76ff1Sjsg 	mutex_unlock(&mstb->mgr->qlock);
49701bb76ff1Sjsg 
49711bb76ff1Sjsg 	if (wake_tx)
49721bb76ff1Sjsg 		wake_up_all(&mstb->mgr->tx_waitq);
49731bb76ff1Sjsg 
49741bb76ff1Sjsg 	drm_dp_mst_put_mstb_malloc(mstb);
49751bb76ff1Sjsg }
49761bb76ff1Sjsg 
drm_dp_delayed_destroy_work(struct work_struct * work)49771bb76ff1Sjsg static void drm_dp_delayed_destroy_work(struct work_struct *work)
49781bb76ff1Sjsg {
49791bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr =
49801bb76ff1Sjsg 		container_of(work, struct drm_dp_mst_topology_mgr,
49811bb76ff1Sjsg 			     delayed_destroy_work);
49821bb76ff1Sjsg 	bool send_hotplug = false, go_again;
49831bb76ff1Sjsg 
49841bb76ff1Sjsg 	/*
49851bb76ff1Sjsg 	 * Not a regular list traverse as we have to drop the destroy
49861bb76ff1Sjsg 	 * connector lock before destroying the mstb/port, to avoid AB->BA
49871bb76ff1Sjsg 	 * ordering between this lock and the config mutex.
49881bb76ff1Sjsg 	 */
49891bb76ff1Sjsg 	do {
49901bb76ff1Sjsg 		go_again = false;
49911bb76ff1Sjsg 
49921bb76ff1Sjsg 		for (;;) {
49931bb76ff1Sjsg 			struct drm_dp_mst_branch *mstb;
49941bb76ff1Sjsg 
49951bb76ff1Sjsg 			mutex_lock(&mgr->delayed_destroy_lock);
49961bb76ff1Sjsg 			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
49971bb76ff1Sjsg 							struct drm_dp_mst_branch,
49981bb76ff1Sjsg 							destroy_next);
49991bb76ff1Sjsg 			if (mstb)
50001bb76ff1Sjsg 				list_del(&mstb->destroy_next);
50011bb76ff1Sjsg 			mutex_unlock(&mgr->delayed_destroy_lock);
50021bb76ff1Sjsg 
50031bb76ff1Sjsg 			if (!mstb)
50041bb76ff1Sjsg 				break;
50051bb76ff1Sjsg 
50061bb76ff1Sjsg 			drm_dp_delayed_destroy_mstb(mstb);
50071bb76ff1Sjsg 			go_again = true;
50081bb76ff1Sjsg 		}
50091bb76ff1Sjsg 
50101bb76ff1Sjsg 		for (;;) {
50111bb76ff1Sjsg 			struct drm_dp_mst_port *port;
50121bb76ff1Sjsg 
50131bb76ff1Sjsg 			mutex_lock(&mgr->delayed_destroy_lock);
50141bb76ff1Sjsg 			port = list_first_entry_or_null(&mgr->destroy_port_list,
50151bb76ff1Sjsg 							struct drm_dp_mst_port,
50161bb76ff1Sjsg 							next);
50171bb76ff1Sjsg 			if (port)
50181bb76ff1Sjsg 				list_del(&port->next);
50191bb76ff1Sjsg 			mutex_unlock(&mgr->delayed_destroy_lock);
50201bb76ff1Sjsg 
50211bb76ff1Sjsg 			if (!port)
50221bb76ff1Sjsg 				break;
50231bb76ff1Sjsg 
50241bb76ff1Sjsg 			drm_dp_delayed_destroy_port(port);
50251bb76ff1Sjsg 			send_hotplug = true;
50261bb76ff1Sjsg 			go_again = true;
50271bb76ff1Sjsg 		}
50281bb76ff1Sjsg 	} while (go_again);
50291bb76ff1Sjsg 
50301bb76ff1Sjsg 	if (send_hotplug)
50311bb76ff1Sjsg 		drm_kms_helper_hotplug_event(mgr->dev);
50321bb76ff1Sjsg }
50331bb76ff1Sjsg 
50341bb76ff1Sjsg static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj * obj)50351bb76ff1Sjsg drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
50361bb76ff1Sjsg {
50371bb76ff1Sjsg 	struct drm_dp_mst_topology_state *state, *old_state =
50381bb76ff1Sjsg 		to_dp_mst_topology_state(obj->state);
50391bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos, *payload;
50401bb76ff1Sjsg 
50411bb76ff1Sjsg 	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
50421bb76ff1Sjsg 	if (!state)
50431bb76ff1Sjsg 		return NULL;
50441bb76ff1Sjsg 
50451bb76ff1Sjsg 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
50461bb76ff1Sjsg 
50471bb76ff1Sjsg 	INIT_LIST_HEAD(&state->payloads);
50481bb76ff1Sjsg 	state->commit_deps = NULL;
50491bb76ff1Sjsg 	state->num_commit_deps = 0;
50501bb76ff1Sjsg 	state->pending_crtc_mask = 0;
50511bb76ff1Sjsg 
50521bb76ff1Sjsg 	list_for_each_entry(pos, &old_state->payloads, next) {
50531bb76ff1Sjsg 		/* Prune leftover freed timeslot allocations */
50541bb76ff1Sjsg 		if (pos->delete)
50551bb76ff1Sjsg 			continue;
50561bb76ff1Sjsg 
50571bb76ff1Sjsg 		payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
50581bb76ff1Sjsg 		if (!payload)
50591bb76ff1Sjsg 			goto fail;
50601bb76ff1Sjsg 
50611bb76ff1Sjsg 		drm_dp_mst_get_port_malloc(payload->port);
50621bb76ff1Sjsg 		list_add(&payload->next, &state->payloads);
50631bb76ff1Sjsg 	}
50641bb76ff1Sjsg 
50651bb76ff1Sjsg 	return &state->base;
50661bb76ff1Sjsg 
50671bb76ff1Sjsg fail:
50681bb76ff1Sjsg 	list_for_each_entry_safe(pos, payload, &state->payloads, next) {
50691bb76ff1Sjsg 		drm_dp_mst_put_port_malloc(pos->port);
50701bb76ff1Sjsg 		kfree(pos);
50711bb76ff1Sjsg 	}
50721bb76ff1Sjsg 	kfree(state);
50731bb76ff1Sjsg 
50741bb76ff1Sjsg 	return NULL;
50751bb76ff1Sjsg }
50761bb76ff1Sjsg 
drm_dp_mst_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)50771bb76ff1Sjsg static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
50781bb76ff1Sjsg 				     struct drm_private_state *state)
50791bb76ff1Sjsg {
50801bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state =
50811bb76ff1Sjsg 		to_dp_mst_topology_state(state);
50821bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos, *tmp;
50831bb76ff1Sjsg 	int i;
50841bb76ff1Sjsg 
50851bb76ff1Sjsg 	list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
50861bb76ff1Sjsg 		/* We only keep references to ports with active payloads */
50871bb76ff1Sjsg 		if (!pos->delete)
50881bb76ff1Sjsg 			drm_dp_mst_put_port_malloc(pos->port);
50891bb76ff1Sjsg 		kfree(pos);
50901bb76ff1Sjsg 	}
50911bb76ff1Sjsg 
50921bb76ff1Sjsg 	for (i = 0; i < mst_state->num_commit_deps; i++)
50931bb76ff1Sjsg 		drm_crtc_commit_put(mst_state->commit_deps[i]);
50941bb76ff1Sjsg 
50951bb76ff1Sjsg 	kfree(mst_state->commit_deps);
50961bb76ff1Sjsg 	kfree(mst_state);
50971bb76ff1Sjsg }
50981bb76ff1Sjsg 
drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port * port,struct drm_dp_mst_branch * branch)50991bb76ff1Sjsg static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
51001bb76ff1Sjsg 						 struct drm_dp_mst_branch *branch)
51011bb76ff1Sjsg {
51021bb76ff1Sjsg 	while (port->parent) {
51031bb76ff1Sjsg 		if (port->parent == branch)
51041bb76ff1Sjsg 			return true;
51051bb76ff1Sjsg 
51061bb76ff1Sjsg 		if (port->parent->port_parent)
51071bb76ff1Sjsg 			port = port->parent->port_parent;
51081bb76ff1Sjsg 		else
51091bb76ff1Sjsg 			break;
51101bb76ff1Sjsg 	}
51111bb76ff1Sjsg 	return false;
51121bb76ff1Sjsg }
51131bb76ff1Sjsg 
51141bb76ff1Sjsg static int
51151bb76ff1Sjsg drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
51161bb76ff1Sjsg 				      struct drm_dp_mst_topology_state *state);
51171bb76ff1Sjsg 
51181bb76ff1Sjsg static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_topology_state * state)51191bb76ff1Sjsg drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
51201bb76ff1Sjsg 				      struct drm_dp_mst_topology_state *state)
51211bb76ff1Sjsg {
51221bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
51231bb76ff1Sjsg 	struct drm_dp_mst_port *port;
51241bb76ff1Sjsg 	int pbn_used = 0, ret;
51251bb76ff1Sjsg 	bool found = false;
51261bb76ff1Sjsg 
51271bb76ff1Sjsg 	/* Check that we have at least one port in our state that's downstream
51281bb76ff1Sjsg 	 * of this branch, otherwise we can skip this branch
51291bb76ff1Sjsg 	 */
51301bb76ff1Sjsg 	list_for_each_entry(payload, &state->payloads, next) {
51311bb76ff1Sjsg 		if (!payload->pbn ||
51321bb76ff1Sjsg 		    !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
51331bb76ff1Sjsg 			continue;
51341bb76ff1Sjsg 
51351bb76ff1Sjsg 		found = true;
51361bb76ff1Sjsg 		break;
51371bb76ff1Sjsg 	}
51381bb76ff1Sjsg 	if (!found)
51391bb76ff1Sjsg 		return 0;
51401bb76ff1Sjsg 
51411bb76ff1Sjsg 	if (mstb->port_parent)
51421bb76ff1Sjsg 		drm_dbg_atomic(mstb->mgr->dev,
51431bb76ff1Sjsg 			       "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
51441bb76ff1Sjsg 			       mstb->port_parent->parent, mstb->port_parent, mstb);
51451bb76ff1Sjsg 	else
51461bb76ff1Sjsg 		drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
51471bb76ff1Sjsg 
51481bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
51491bb76ff1Sjsg 		ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
51501bb76ff1Sjsg 		if (ret < 0)
51511bb76ff1Sjsg 			return ret;
51521bb76ff1Sjsg 
51531bb76ff1Sjsg 		pbn_used += ret;
51541bb76ff1Sjsg 	}
51551bb76ff1Sjsg 
51561bb76ff1Sjsg 	return pbn_used;
51571bb76ff1Sjsg }
51581bb76ff1Sjsg 
51591bb76ff1Sjsg static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port * port,struct drm_dp_mst_topology_state * state)51601bb76ff1Sjsg drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
51611bb76ff1Sjsg 				      struct drm_dp_mst_topology_state *state)
51621bb76ff1Sjsg {
51631bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
51641bb76ff1Sjsg 	int pbn_used = 0;
51651bb76ff1Sjsg 
51661bb76ff1Sjsg 	if (port->pdt == DP_PEER_DEVICE_NONE)
51671bb76ff1Sjsg 		return 0;
51681bb76ff1Sjsg 
51691bb76ff1Sjsg 	if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
51701bb76ff1Sjsg 		payload = drm_atomic_get_mst_payload_state(state, port);
51711bb76ff1Sjsg 		if (!payload)
51721bb76ff1Sjsg 			return 0;
51731bb76ff1Sjsg 
51741bb76ff1Sjsg 		/*
51751bb76ff1Sjsg 		 * This could happen if the sink deasserted its HPD line, but
51761bb76ff1Sjsg 		 * the branch device still reports it as attached (PDT != NONE).
51771bb76ff1Sjsg 		 */
51781bb76ff1Sjsg 		if (!port->full_pbn) {
51791bb76ff1Sjsg 			drm_dbg_atomic(port->mgr->dev,
51801bb76ff1Sjsg 				       "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
51811bb76ff1Sjsg 				       port->parent, port);
51821bb76ff1Sjsg 			return -EINVAL;
51831bb76ff1Sjsg 		}
51841bb76ff1Sjsg 
51851bb76ff1Sjsg 		pbn_used = payload->pbn;
51861bb76ff1Sjsg 	} else {
51871bb76ff1Sjsg 		pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
51881bb76ff1Sjsg 								 state);
51891bb76ff1Sjsg 		if (pbn_used <= 0)
51901bb76ff1Sjsg 			return pbn_used;
51911bb76ff1Sjsg 	}
51921bb76ff1Sjsg 
51931bb76ff1Sjsg 	if (pbn_used > port->full_pbn) {
51941bb76ff1Sjsg 		drm_dbg_atomic(port->mgr->dev,
51951bb76ff1Sjsg 			       "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
51961bb76ff1Sjsg 			       port->parent, port, pbn_used, port->full_pbn);
51971bb76ff1Sjsg 		return -ENOSPC;
51981bb76ff1Sjsg 	}
51991bb76ff1Sjsg 
52001bb76ff1Sjsg 	drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
52011bb76ff1Sjsg 		       port->parent, port, pbn_used, port->full_pbn);
52021bb76ff1Sjsg 
52031bb76ff1Sjsg 	return pbn_used;
52041bb76ff1Sjsg }
52051bb76ff1Sjsg 
52061bb76ff1Sjsg static inline int
drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state)52071bb76ff1Sjsg drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
52081bb76ff1Sjsg 					     struct drm_dp_mst_topology_state *mst_state)
52091bb76ff1Sjsg {
52101bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
52111bb76ff1Sjsg 	int avail_slots = mst_state->total_avail_slots, payload_count = 0;
52121bb76ff1Sjsg 
52131bb76ff1Sjsg 	list_for_each_entry(payload, &mst_state->payloads, next) {
52141bb76ff1Sjsg 		/* Releasing payloads is always OK-even if the port is gone */
52151bb76ff1Sjsg 		if (payload->delete) {
52161bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
52171bb76ff1Sjsg 				       payload->port);
52181bb76ff1Sjsg 			continue;
52191bb76ff1Sjsg 		}
52201bb76ff1Sjsg 
52211bb76ff1Sjsg 		drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
52221bb76ff1Sjsg 			       payload->port, payload->time_slots);
52231bb76ff1Sjsg 
52241bb76ff1Sjsg 		avail_slots -= payload->time_slots;
52251bb76ff1Sjsg 		if (avail_slots < 0) {
52261bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev,
52271bb76ff1Sjsg 				       "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
52281bb76ff1Sjsg 				       payload->port, mst_state, avail_slots + payload->time_slots);
52291bb76ff1Sjsg 			return -ENOSPC;
52301bb76ff1Sjsg 		}
52311bb76ff1Sjsg 
52321bb76ff1Sjsg 		if (++payload_count > mgr->max_payloads) {
52331bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev,
52341bb76ff1Sjsg 				       "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
52351bb76ff1Sjsg 				       mgr, mst_state, mgr->max_payloads);
52361bb76ff1Sjsg 			return -EINVAL;
52371bb76ff1Sjsg 		}
52381bb76ff1Sjsg 
52391bb76ff1Sjsg 		/* Assign a VCPI */
52401bb76ff1Sjsg 		if (!payload->vcpi) {
52411bb76ff1Sjsg 			payload->vcpi = ffz(mst_state->payload_mask) + 1;
52421bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
52431bb76ff1Sjsg 				       payload->port, payload->vcpi);
52441bb76ff1Sjsg 			mst_state->payload_mask |= BIT(payload->vcpi - 1);
52451bb76ff1Sjsg 		}
52461bb76ff1Sjsg 	}
52471bb76ff1Sjsg 
52481bb76ff1Sjsg 	if (!payload_count)
52491bb76ff1Sjsg 		mst_state->pbn_div = 0;
52501bb76ff1Sjsg 
52511bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
52521bb76ff1Sjsg 		       mgr, mst_state, mst_state->pbn_div, avail_slots,
52531bb76ff1Sjsg 		       mst_state->total_avail_slots - avail_slots);
52541bb76ff1Sjsg 
52551bb76ff1Sjsg 	return 0;
52561bb76ff1Sjsg }
52571bb76ff1Sjsg 
52581bb76ff1Sjsg /**
52591bb76ff1Sjsg  * drm_dp_mst_add_affected_dsc_crtcs
52601bb76ff1Sjsg  * @state: Pointer to the new struct drm_dp_mst_topology_state
52611bb76ff1Sjsg  * @mgr: MST topology manager
52621bb76ff1Sjsg  *
52631bb76ff1Sjsg  * Whenever there is a change in mst topology
52641bb76ff1Sjsg  * DSC configuration would have to be recalculated
52651bb76ff1Sjsg  * therefore we need to trigger modeset on all affected
52661bb76ff1Sjsg  * CRTCs in that topology
52671bb76ff1Sjsg  *
52681bb76ff1Sjsg  * See also:
52691bb76ff1Sjsg  * drm_dp_mst_atomic_enable_dsc()
52701bb76ff1Sjsg  */
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)52711bb76ff1Sjsg int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
52721bb76ff1Sjsg {
52731bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
52741bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos;
52751bb76ff1Sjsg 	struct drm_connector *connector;
52761bb76ff1Sjsg 	struct drm_connector_state *conn_state;
52771bb76ff1Sjsg 	struct drm_crtc *crtc;
52781bb76ff1Sjsg 	struct drm_crtc_state *crtc_state;
52791bb76ff1Sjsg 
52801bb76ff1Sjsg 	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
52811bb76ff1Sjsg 
52821bb76ff1Sjsg 	if (IS_ERR(mst_state))
52831bb76ff1Sjsg 		return PTR_ERR(mst_state);
52841bb76ff1Sjsg 
52851bb76ff1Sjsg 	list_for_each_entry(pos, &mst_state->payloads, next) {
52861bb76ff1Sjsg 
52871bb76ff1Sjsg 		connector = pos->port->connector;
52881bb76ff1Sjsg 
52891bb76ff1Sjsg 		if (!connector)
52901bb76ff1Sjsg 			return -EINVAL;
52911bb76ff1Sjsg 
52921bb76ff1Sjsg 		conn_state = drm_atomic_get_connector_state(state, connector);
52931bb76ff1Sjsg 
52941bb76ff1Sjsg 		if (IS_ERR(conn_state))
52951bb76ff1Sjsg 			return PTR_ERR(conn_state);
52961bb76ff1Sjsg 
52971bb76ff1Sjsg 		crtc = conn_state->crtc;
52981bb76ff1Sjsg 
52991bb76ff1Sjsg 		if (!crtc)
53001bb76ff1Sjsg 			continue;
53011bb76ff1Sjsg 
53021bb76ff1Sjsg 		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
53031bb76ff1Sjsg 			continue;
53041bb76ff1Sjsg 
53051bb76ff1Sjsg 		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
53061bb76ff1Sjsg 
53071bb76ff1Sjsg 		if (IS_ERR(crtc_state))
53081bb76ff1Sjsg 			return PTR_ERR(crtc_state);
53091bb76ff1Sjsg 
53101bb76ff1Sjsg 		drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
53111bb76ff1Sjsg 			       mgr, crtc);
53121bb76ff1Sjsg 
53131bb76ff1Sjsg 		crtc_state->mode_changed = true;
53141bb76ff1Sjsg 	}
53151bb76ff1Sjsg 	return 0;
53161bb76ff1Sjsg }
53171bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
53181bb76ff1Sjsg 
53191bb76ff1Sjsg /**
53201bb76ff1Sjsg  * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
53211bb76ff1Sjsg  * @state: Pointer to the new drm_atomic_state
53221bb76ff1Sjsg  * @port: Pointer to the affected MST Port
53231bb76ff1Sjsg  * @pbn: Newly recalculated bw required for link with DSC enabled
53241bb76ff1Sjsg  * @enable: Boolean flag to enable or disable DSC on the port
53251bb76ff1Sjsg  *
53261bb76ff1Sjsg  * This function enables DSC on the given Port
53271bb76ff1Sjsg  * by recalculating its vcpi from pbn provided
53281bb76ff1Sjsg  * and sets dsc_enable flag to keep track of which
53291bb76ff1Sjsg  * ports have DSC enabled
53301bb76ff1Sjsg  *
53311bb76ff1Sjsg  */
drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state * state,struct drm_dp_mst_port * port,int pbn,bool enable)53321bb76ff1Sjsg int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
53331bb76ff1Sjsg 				 struct drm_dp_mst_port *port,
53341bb76ff1Sjsg 				 int pbn, bool enable)
53351bb76ff1Sjsg {
53361bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
53371bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
53381bb76ff1Sjsg 	int time_slots = 0;
53391bb76ff1Sjsg 
53401bb76ff1Sjsg 	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
53411bb76ff1Sjsg 	if (IS_ERR(mst_state))
53421bb76ff1Sjsg 		return PTR_ERR(mst_state);
53431bb76ff1Sjsg 
53441bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(mst_state, port);
53451bb76ff1Sjsg 	if (!payload) {
53461bb76ff1Sjsg 		drm_dbg_atomic(state->dev,
53471bb76ff1Sjsg 			       "[MST PORT:%p] Couldn't find payload in mst state %p\n",
53481bb76ff1Sjsg 			       port, mst_state);
53491bb76ff1Sjsg 		return -EINVAL;
53501bb76ff1Sjsg 	}
53511bb76ff1Sjsg 
53521bb76ff1Sjsg 	if (payload->dsc_enabled == enable) {
53531bb76ff1Sjsg 		drm_dbg_atomic(state->dev,
53541bb76ff1Sjsg 			       "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
53551bb76ff1Sjsg 			       port, enable, payload->time_slots);
53561bb76ff1Sjsg 		time_slots = payload->time_slots;
53571bb76ff1Sjsg 	}
53581bb76ff1Sjsg 
53591bb76ff1Sjsg 	if (enable) {
53601bb76ff1Sjsg 		time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
53611bb76ff1Sjsg 		drm_dbg_atomic(state->dev,
53621bb76ff1Sjsg 			       "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
53631bb76ff1Sjsg 			       port, time_slots);
53641bb76ff1Sjsg 		if (time_slots < 0)
53651bb76ff1Sjsg 			return -EINVAL;
53661bb76ff1Sjsg 	}
53671bb76ff1Sjsg 
53681bb76ff1Sjsg 	payload->dsc_enabled = enable;
53691bb76ff1Sjsg 
53701bb76ff1Sjsg 	return time_slots;
53711bb76ff1Sjsg }
53721bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
53731bb76ff1Sjsg 
53741bb76ff1Sjsg /**
53751bb76ff1Sjsg  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
53761bb76ff1Sjsg  * atomic update is valid
53771bb76ff1Sjsg  * @state: Pointer to the new &struct drm_dp_mst_topology_state
53781bb76ff1Sjsg  *
53791bb76ff1Sjsg  * Checks the given topology state for an atomic update to ensure that it's
53801bb76ff1Sjsg  * valid. This includes checking whether there's enough bandwidth to support
53811bb76ff1Sjsg  * the new timeslot allocations in the atomic update.
53821bb76ff1Sjsg  *
53831bb76ff1Sjsg  * Any atomic drivers supporting DP MST must make sure to call this after
53841bb76ff1Sjsg  * checking the rest of their state in their
53851bb76ff1Sjsg  * &drm_mode_config_funcs.atomic_check() callback.
53861bb76ff1Sjsg  *
53871bb76ff1Sjsg  * See also:
53881bb76ff1Sjsg  * drm_dp_atomic_find_time_slots()
53891bb76ff1Sjsg  * drm_dp_atomic_release_time_slots()
53901bb76ff1Sjsg  *
53911bb76ff1Sjsg  * Returns:
53921bb76ff1Sjsg  *
53931bb76ff1Sjsg  * 0 if the new state is valid, negative error code otherwise.
53941bb76ff1Sjsg  */
drm_dp_mst_atomic_check(struct drm_atomic_state * state)53951bb76ff1Sjsg int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
53961bb76ff1Sjsg {
53971bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr;
53981bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
53991bb76ff1Sjsg 	int i, ret = 0;
54001bb76ff1Sjsg 
54011bb76ff1Sjsg 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
54021bb76ff1Sjsg 		if (!mgr->mst_state)
54031bb76ff1Sjsg 			continue;
54041bb76ff1Sjsg 
54051bb76ff1Sjsg 		ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
54061bb76ff1Sjsg 		if (ret)
54071bb76ff1Sjsg 			break;
54081bb76ff1Sjsg 
54091bb76ff1Sjsg 		mutex_lock(&mgr->lock);
54101bb76ff1Sjsg 		ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
54111bb76ff1Sjsg 							    mst_state);
54121bb76ff1Sjsg 		mutex_unlock(&mgr->lock);
54131bb76ff1Sjsg 		if (ret < 0)
54141bb76ff1Sjsg 			break;
54151bb76ff1Sjsg 		else
54161bb76ff1Sjsg 			ret = 0;
54171bb76ff1Sjsg 	}
54181bb76ff1Sjsg 
54191bb76ff1Sjsg 	return ret;
54201bb76ff1Sjsg }
54211bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_check);
54221bb76ff1Sjsg 
54231bb76ff1Sjsg const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
54241bb76ff1Sjsg 	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
54251bb76ff1Sjsg 	.atomic_destroy_state = drm_dp_mst_destroy_state,
54261bb76ff1Sjsg };
54271bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
54281bb76ff1Sjsg 
54291bb76ff1Sjsg /**
54301bb76ff1Sjsg  * drm_atomic_get_mst_topology_state: get MST topology state
54311bb76ff1Sjsg  * @state: global atomic state
54321bb76ff1Sjsg  * @mgr: MST topology manager, also the private object in this case
54331bb76ff1Sjsg  *
54341bb76ff1Sjsg  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
54351bb76ff1Sjsg  * state vtable so that the private object state returned is that of a MST
54361bb76ff1Sjsg  * topology object.
54371bb76ff1Sjsg  *
54381bb76ff1Sjsg  * RETURNS:
54391bb76ff1Sjsg  *
54401bb76ff1Sjsg  * The MST topology state or error pointer.
54411bb76ff1Sjsg  */
drm_atomic_get_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)54421bb76ff1Sjsg struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
54431bb76ff1Sjsg 								    struct drm_dp_mst_topology_mgr *mgr)
54441bb76ff1Sjsg {
54451bb76ff1Sjsg 	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
54461bb76ff1Sjsg }
54471bb76ff1Sjsg EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
54481bb76ff1Sjsg 
54491bb76ff1Sjsg /**
545061b9d4c9Sjsg  * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
54511bb76ff1Sjsg  * @state: global atomic state
54521bb76ff1Sjsg  * @mgr: MST topology manager, also the private object in this case
54531bb76ff1Sjsg  *
545461b9d4c9Sjsg  * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
54551bb76ff1Sjsg  * state vtable so that the private object state returned is that of a MST
54561bb76ff1Sjsg  * topology object.
54571bb76ff1Sjsg  *
54581bb76ff1Sjsg  * Returns:
54591bb76ff1Sjsg  *
546061b9d4c9Sjsg  * The old MST topology state, or NULL if there's no topology state for this MST mgr
546161b9d4c9Sjsg  * in the global atomic state
546261b9d4c9Sjsg  */
546361b9d4c9Sjsg struct drm_dp_mst_topology_state *
drm_atomic_get_old_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)546461b9d4c9Sjsg drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
546561b9d4c9Sjsg 				      struct drm_dp_mst_topology_mgr *mgr)
546661b9d4c9Sjsg {
546761b9d4c9Sjsg 	struct drm_private_state *old_priv_state =
546861b9d4c9Sjsg 		drm_atomic_get_old_private_obj_state(state, &mgr->base);
546961b9d4c9Sjsg 
547061b9d4c9Sjsg 	return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
547161b9d4c9Sjsg }
547261b9d4c9Sjsg EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
547361b9d4c9Sjsg 
547461b9d4c9Sjsg /**
547561b9d4c9Sjsg  * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
547661b9d4c9Sjsg  * @state: global atomic state
547761b9d4c9Sjsg  * @mgr: MST topology manager, also the private object in this case
547861b9d4c9Sjsg  *
547961b9d4c9Sjsg  * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
548061b9d4c9Sjsg  * state vtable so that the private object state returned is that of a MST
548161b9d4c9Sjsg  * topology object.
548261b9d4c9Sjsg  *
548361b9d4c9Sjsg  * Returns:
548461b9d4c9Sjsg  *
548561b9d4c9Sjsg  * The new MST topology state, or NULL if there's no topology state for this MST mgr
54861bb76ff1Sjsg  * in the global atomic state
54871bb76ff1Sjsg  */
54881bb76ff1Sjsg struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)54891bb76ff1Sjsg drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
54901bb76ff1Sjsg 				      struct drm_dp_mst_topology_mgr *mgr)
54911bb76ff1Sjsg {
549261b9d4c9Sjsg 	struct drm_private_state *new_priv_state =
54931bb76ff1Sjsg 		drm_atomic_get_new_private_obj_state(state, &mgr->base);
54941bb76ff1Sjsg 
549561b9d4c9Sjsg 	return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
54961bb76ff1Sjsg }
54971bb76ff1Sjsg EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
54981bb76ff1Sjsg 
54991bb76ff1Sjsg /**
55001bb76ff1Sjsg  * drm_dp_mst_topology_mgr_init - initialise a topology manager
55011bb76ff1Sjsg  * @mgr: manager struct to initialise
55021bb76ff1Sjsg  * @dev: device providing this structure - for i2c addition.
55031bb76ff1Sjsg  * @aux: DP helper aux channel to talk to this device
55041bb76ff1Sjsg  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
55051bb76ff1Sjsg  * @max_payloads: maximum number of payloads this GPU can source
55061bb76ff1Sjsg  * @conn_base_id: the connector object ID the MST device is connected to.
55071bb76ff1Sjsg  *
55081bb76ff1Sjsg  * Return 0 for success, or negative error code on failure
55091bb76ff1Sjsg  */
drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr * mgr,struct drm_device * dev,struct drm_dp_aux * aux,int max_dpcd_transaction_bytes,int max_payloads,int conn_base_id)55101bb76ff1Sjsg int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
55111bb76ff1Sjsg 				 struct drm_device *dev, struct drm_dp_aux *aux,
55121bb76ff1Sjsg 				 int max_dpcd_transaction_bytes, int max_payloads,
55131bb76ff1Sjsg 				 int conn_base_id)
55141bb76ff1Sjsg {
55151bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
55161bb76ff1Sjsg 
55171bb76ff1Sjsg 	rw_init(&mgr->lock, "mst");
55181bb76ff1Sjsg 	rw_init(&mgr->qlock, "mstq");
55191bb76ff1Sjsg 	rw_init(&mgr->delayed_destroy_lock, "mstdc");
55201bb76ff1Sjsg 	rw_init(&mgr->up_req_lock, "mstup");
55211bb76ff1Sjsg 	rw_init(&mgr->probe_lock, "mstprb");
55221bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
55231bb76ff1Sjsg 	rw_init(&mgr->topology_ref_history_lock, "msttr");
55241bb76ff1Sjsg 	stack_depot_init();
55251bb76ff1Sjsg #endif
55261bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
55271bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->destroy_port_list);
55281bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
55291bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->up_req_list);
55301bb76ff1Sjsg 
55311bb76ff1Sjsg 	/*
55321bb76ff1Sjsg 	 * delayed_destroy_work will be queued on a dedicated WQ, so that any
55331bb76ff1Sjsg 	 * requeuing will be also flushed when deiniting the topology manager.
55341bb76ff1Sjsg 	 */
55351bb76ff1Sjsg 	mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
55361bb76ff1Sjsg 	if (mgr->delayed_destroy_wq == NULL)
55371bb76ff1Sjsg 		return -ENOMEM;
55381bb76ff1Sjsg 
55391bb76ff1Sjsg 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
55401bb76ff1Sjsg 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
55411bb76ff1Sjsg 	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
55421bb76ff1Sjsg 	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
55431bb76ff1Sjsg 	init_waitqueue_head(&mgr->tx_waitq);
55441bb76ff1Sjsg 	mgr->dev = dev;
55451bb76ff1Sjsg 	mgr->aux = aux;
55461bb76ff1Sjsg 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
55471bb76ff1Sjsg 	mgr->max_payloads = max_payloads;
55481bb76ff1Sjsg 	mgr->conn_base_id = conn_base_id;
55491bb76ff1Sjsg 
55501bb76ff1Sjsg 	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
55511bb76ff1Sjsg 	if (mst_state == NULL)
55521bb76ff1Sjsg 		return -ENOMEM;
55531bb76ff1Sjsg 
55541bb76ff1Sjsg 	mst_state->total_avail_slots = 63;
55551bb76ff1Sjsg 	mst_state->start_slot = 1;
55561bb76ff1Sjsg 
55571bb76ff1Sjsg 	mst_state->mgr = mgr;
55581bb76ff1Sjsg 	INIT_LIST_HEAD(&mst_state->payloads);
55591bb76ff1Sjsg 
55601bb76ff1Sjsg 	drm_atomic_private_obj_init(dev, &mgr->base,
55611bb76ff1Sjsg 				    &mst_state->base,
55621bb76ff1Sjsg 				    &drm_dp_mst_topology_state_funcs);
55631bb76ff1Sjsg 
55641bb76ff1Sjsg 	return 0;
55651bb76ff1Sjsg }
55661bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
55671bb76ff1Sjsg 
55681bb76ff1Sjsg /**
55691bb76ff1Sjsg  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
55701bb76ff1Sjsg  * @mgr: manager to destroy
55711bb76ff1Sjsg  */
drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr * mgr)55721bb76ff1Sjsg void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
55731bb76ff1Sjsg {
55741bb76ff1Sjsg 	drm_dp_mst_topology_mgr_set_mst(mgr, false);
55751bb76ff1Sjsg 	flush_work(&mgr->work);
55761bb76ff1Sjsg 	/* The following will also drain any requeued work on the WQ. */
55771bb76ff1Sjsg 	if (mgr->delayed_destroy_wq) {
55781bb76ff1Sjsg 		destroy_workqueue(mgr->delayed_destroy_wq);
55791bb76ff1Sjsg 		mgr->delayed_destroy_wq = NULL;
55801bb76ff1Sjsg 	}
55811bb76ff1Sjsg 	mgr->dev = NULL;
55821bb76ff1Sjsg 	mgr->aux = NULL;
55831bb76ff1Sjsg 	drm_atomic_private_obj_fini(&mgr->base);
55841bb76ff1Sjsg 	mgr->funcs = NULL;
55851bb76ff1Sjsg 
55861bb76ff1Sjsg 	mutex_destroy(&mgr->delayed_destroy_lock);
55871bb76ff1Sjsg 	mutex_destroy(&mgr->qlock);
55881bb76ff1Sjsg 	mutex_destroy(&mgr->lock);
55891bb76ff1Sjsg 	mutex_destroy(&mgr->up_req_lock);
55901bb76ff1Sjsg 	mutex_destroy(&mgr->probe_lock);
55911bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
55921bb76ff1Sjsg 	mutex_destroy(&mgr->topology_ref_history_lock);
55931bb76ff1Sjsg #endif
55941bb76ff1Sjsg }
55951bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
55961bb76ff1Sjsg 
remote_i2c_read_ok(const struct i2c_msg msgs[],int num)55971bb76ff1Sjsg static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
55981bb76ff1Sjsg {
55991bb76ff1Sjsg 	int i;
56001bb76ff1Sjsg 
56011bb76ff1Sjsg 	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
56021bb76ff1Sjsg 		return false;
56031bb76ff1Sjsg 
56041bb76ff1Sjsg 	for (i = 0; i < num - 1; i++) {
56051bb76ff1Sjsg 		if (msgs[i].flags & I2C_M_RD ||
56061bb76ff1Sjsg 		    msgs[i].len > 0xff)
56071bb76ff1Sjsg 			return false;
56081bb76ff1Sjsg 	}
56091bb76ff1Sjsg 
56101bb76ff1Sjsg 	return msgs[num - 1].flags & I2C_M_RD &&
56111bb76ff1Sjsg 		msgs[num - 1].len <= 0xff;
56121bb76ff1Sjsg }
56131bb76ff1Sjsg 
remote_i2c_write_ok(const struct i2c_msg msgs[],int num)56141bb76ff1Sjsg static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
56151bb76ff1Sjsg {
56161bb76ff1Sjsg 	int i;
56171bb76ff1Sjsg 
56181bb76ff1Sjsg 	for (i = 0; i < num - 1; i++) {
56191bb76ff1Sjsg 		if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
56201bb76ff1Sjsg 		    msgs[i].len > 0xff)
56211bb76ff1Sjsg 			return false;
56221bb76ff1Sjsg 	}
56231bb76ff1Sjsg 
56241bb76ff1Sjsg 	return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
56251bb76ff1Sjsg }
56261bb76ff1Sjsg 
drm_dp_mst_i2c_read(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port,struct i2c_msg * msgs,int num)56271bb76ff1Sjsg static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
56281bb76ff1Sjsg 			       struct drm_dp_mst_port *port,
56291bb76ff1Sjsg 			       struct i2c_msg *msgs, int num)
56301bb76ff1Sjsg {
56311bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
56321bb76ff1Sjsg 	unsigned int i;
56331bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body msg;
56341bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
56351bb76ff1Sjsg 	int ret;
56361bb76ff1Sjsg 
56371bb76ff1Sjsg 	memset(&msg, 0, sizeof(msg));
56381bb76ff1Sjsg 	msg.req_type = DP_REMOTE_I2C_READ;
56391bb76ff1Sjsg 	msg.u.i2c_read.num_transactions = num - 1;
56401bb76ff1Sjsg 	msg.u.i2c_read.port_number = port->port_num;
56411bb76ff1Sjsg 	for (i = 0; i < num - 1; i++) {
56421bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
56431bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
56441bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
56451bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
56461bb76ff1Sjsg 	}
56471bb76ff1Sjsg 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
56481bb76ff1Sjsg 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
56491bb76ff1Sjsg 
56501bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
56511bb76ff1Sjsg 	if (!txmsg) {
56521bb76ff1Sjsg 		ret = -ENOMEM;
56531bb76ff1Sjsg 		goto out;
56541bb76ff1Sjsg 	}
56551bb76ff1Sjsg 
56561bb76ff1Sjsg 	txmsg->dst = mstb;
56571bb76ff1Sjsg 	drm_dp_encode_sideband_req(&msg, txmsg);
56581bb76ff1Sjsg 
56591bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
56601bb76ff1Sjsg 
56611bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
56621bb76ff1Sjsg 	if (ret > 0) {
56631bb76ff1Sjsg 
56641bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
56651bb76ff1Sjsg 			ret = -EREMOTEIO;
56661bb76ff1Sjsg 			goto out;
56671bb76ff1Sjsg 		}
56681bb76ff1Sjsg 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
56691bb76ff1Sjsg 			ret = -EIO;
56701bb76ff1Sjsg 			goto out;
56711bb76ff1Sjsg 		}
56721bb76ff1Sjsg 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
56731bb76ff1Sjsg 		ret = num;
56741bb76ff1Sjsg 	}
56751bb76ff1Sjsg out:
56761bb76ff1Sjsg 	kfree(txmsg);
56771bb76ff1Sjsg 	return ret;
56781bb76ff1Sjsg }
56791bb76ff1Sjsg 
drm_dp_mst_i2c_write(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port,struct i2c_msg * msgs,int num)56801bb76ff1Sjsg static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
56811bb76ff1Sjsg 				struct drm_dp_mst_port *port,
56821bb76ff1Sjsg 				struct i2c_msg *msgs, int num)
56831bb76ff1Sjsg {
56841bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
56851bb76ff1Sjsg 	unsigned int i;
56861bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body msg;
56871bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
56881bb76ff1Sjsg 	int ret;
56891bb76ff1Sjsg 
56901bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
56911bb76ff1Sjsg 	if (!txmsg) {
56921bb76ff1Sjsg 		ret = -ENOMEM;
56931bb76ff1Sjsg 		goto out;
56941bb76ff1Sjsg 	}
56951bb76ff1Sjsg 	for (i = 0; i < num; i++) {
56961bb76ff1Sjsg 		memset(&msg, 0, sizeof(msg));
56971bb76ff1Sjsg 		msg.req_type = DP_REMOTE_I2C_WRITE;
56981bb76ff1Sjsg 		msg.u.i2c_write.port_number = port->port_num;
56991bb76ff1Sjsg 		msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
57001bb76ff1Sjsg 		msg.u.i2c_write.num_bytes = msgs[i].len;
57011bb76ff1Sjsg 		msg.u.i2c_write.bytes = msgs[i].buf;
57021bb76ff1Sjsg 
57031bb76ff1Sjsg 		memset(txmsg, 0, sizeof(*txmsg));
57041bb76ff1Sjsg 		txmsg->dst = mstb;
57051bb76ff1Sjsg 
57061bb76ff1Sjsg 		drm_dp_encode_sideband_req(&msg, txmsg);
57071bb76ff1Sjsg 		drm_dp_queue_down_tx(mgr, txmsg);
57081bb76ff1Sjsg 
57091bb76ff1Sjsg 		ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
57101bb76ff1Sjsg 		if (ret > 0) {
57111bb76ff1Sjsg 			if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
57121bb76ff1Sjsg 				ret = -EREMOTEIO;
57131bb76ff1Sjsg 				goto out;
57141bb76ff1Sjsg 			}
57151bb76ff1Sjsg 		} else {
57161bb76ff1Sjsg 			goto out;
57171bb76ff1Sjsg 		}
57181bb76ff1Sjsg 	}
57191bb76ff1Sjsg 	ret = num;
57201bb76ff1Sjsg out:
57211bb76ff1Sjsg 	kfree(txmsg);
57221bb76ff1Sjsg 	return ret;
57231bb76ff1Sjsg }
57241bb76ff1Sjsg 
57251bb76ff1Sjsg /* I2C device */
drm_dp_mst_i2c_xfer(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)57261bb76ff1Sjsg static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
57271bb76ff1Sjsg 			       struct i2c_msg *msgs, int num)
57281bb76ff1Sjsg {
57291bb76ff1Sjsg 	struct drm_dp_aux *aux = adapter->algo_data;
57301bb76ff1Sjsg 	struct drm_dp_mst_port *port =
57311bb76ff1Sjsg 		container_of(aux, struct drm_dp_mst_port, aux);
57321bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
57331bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
57341bb76ff1Sjsg 	int ret;
57351bb76ff1Sjsg 
57361bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
57371bb76ff1Sjsg 	if (!mstb)
57381bb76ff1Sjsg 		return -EREMOTEIO;
57391bb76ff1Sjsg 
57401bb76ff1Sjsg 	if (remote_i2c_read_ok(msgs, num)) {
57411bb76ff1Sjsg 		ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
57421bb76ff1Sjsg 	} else if (remote_i2c_write_ok(msgs, num)) {
57431bb76ff1Sjsg 		ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
57441bb76ff1Sjsg 	} else {
57451bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
57461bb76ff1Sjsg 		ret = -EIO;
57471bb76ff1Sjsg 	}
57481bb76ff1Sjsg 
57491bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
57501bb76ff1Sjsg 	return ret;
57511bb76ff1Sjsg }
57521bb76ff1Sjsg 
drm_dp_mst_i2c_functionality(struct i2c_adapter * adapter)57531bb76ff1Sjsg static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
57541bb76ff1Sjsg {
57551bb76ff1Sjsg 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
57561bb76ff1Sjsg 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
57571bb76ff1Sjsg 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
57581bb76ff1Sjsg 	       I2C_FUNC_10BIT_ADDR;
57591bb76ff1Sjsg }
57601bb76ff1Sjsg 
57611bb76ff1Sjsg static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
57621bb76ff1Sjsg 	.functionality = drm_dp_mst_i2c_functionality,
57631bb76ff1Sjsg 	.master_xfer = drm_dp_mst_i2c_xfer,
57641bb76ff1Sjsg };
57651bb76ff1Sjsg 
57661bb76ff1Sjsg /**
57671bb76ff1Sjsg  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
57681bb76ff1Sjsg  * @port: The port to add the I2C bus on
57691bb76ff1Sjsg  *
57701bb76ff1Sjsg  * Returns 0 on success or a negative error code on failure.
57711bb76ff1Sjsg  */
drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port * port)57721bb76ff1Sjsg static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
57731bb76ff1Sjsg {
57741bb76ff1Sjsg 	struct drm_dp_aux *aux = &port->aux;
57751bb76ff1Sjsg #ifdef __linux__
57761bb76ff1Sjsg 	struct device *parent_dev = port->mgr->dev->dev;
57771bb76ff1Sjsg #endif
57781bb76ff1Sjsg 
57791bb76ff1Sjsg 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
57801bb76ff1Sjsg 	aux->ddc.algo_data = aux;
57811bb76ff1Sjsg 	aux->ddc.retries = 3;
57821bb76ff1Sjsg 
57831bb76ff1Sjsg #ifdef __linux__
57841bb76ff1Sjsg 	aux->ddc.class = I2C_CLASS_DDC;
57851bb76ff1Sjsg 	aux->ddc.owner = THIS_MODULE;
57861bb76ff1Sjsg 	/* FIXME: set the kdev of the port's connector as parent */
57871bb76ff1Sjsg 	aux->ddc.dev.parent = parent_dev;
57881bb76ff1Sjsg 	aux->ddc.dev.of_node = parent_dev->of_node;
57891bb76ff1Sjsg #endif
57901bb76ff1Sjsg 
5791f005ef32Sjsg 	strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
57921bb76ff1Sjsg 		sizeof(aux->ddc.name));
57931bb76ff1Sjsg 
57941bb76ff1Sjsg 	return i2c_add_adapter(&aux->ddc);
57951bb76ff1Sjsg }
57961bb76ff1Sjsg 
57971bb76ff1Sjsg /**
57981bb76ff1Sjsg  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
57991bb76ff1Sjsg  * @port: The port to remove the I2C bus from
58001bb76ff1Sjsg  */
drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port * port)58011bb76ff1Sjsg static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
58021bb76ff1Sjsg {
58031bb76ff1Sjsg 	i2c_del_adapter(&port->aux.ddc);
58041bb76ff1Sjsg }
58051bb76ff1Sjsg 
58061bb76ff1Sjsg /**
58071bb76ff1Sjsg  * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
58081bb76ff1Sjsg  * @port: The port to check
58091bb76ff1Sjsg  *
58101bb76ff1Sjsg  * A single physical MST hub object can be represented in the topology
58111bb76ff1Sjsg  * by multiple branches, with virtual ports between those branches.
58121bb76ff1Sjsg  *
58131bb76ff1Sjsg  * As of DP1.4, An MST hub with internal (virtual) ports must expose
58141bb76ff1Sjsg  * certain DPCD registers over those ports. See sections 2.6.1.1.1
58151bb76ff1Sjsg  * and 2.6.1.1.2 of Display Port specification v1.4 for details.
58161bb76ff1Sjsg  *
58171bb76ff1Sjsg  * May acquire mgr->lock
58181bb76ff1Sjsg  *
58191bb76ff1Sjsg  * Returns:
58201bb76ff1Sjsg  * true if the port is a virtual DP peer device, false otherwise
58211bb76ff1Sjsg  */
drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port * port)58221bb76ff1Sjsg static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
58231bb76ff1Sjsg {
58241bb76ff1Sjsg 	struct drm_dp_mst_port *downstream_port;
58251bb76ff1Sjsg 
58261bb76ff1Sjsg 	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
58271bb76ff1Sjsg 		return false;
58281bb76ff1Sjsg 
58291bb76ff1Sjsg 	/* Virtual DP Sink (Internal Display Panel) */
58301bb76ff1Sjsg 	if (port->port_num >= 8)
58311bb76ff1Sjsg 		return true;
58321bb76ff1Sjsg 
58331bb76ff1Sjsg 	/* DP-to-HDMI Protocol Converter */
58341bb76ff1Sjsg 	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
58351bb76ff1Sjsg 	    !port->mcs &&
58361bb76ff1Sjsg 	    port->ldps)
58371bb76ff1Sjsg 		return true;
58381bb76ff1Sjsg 
58391bb76ff1Sjsg 	/* DP-to-DP */
58401bb76ff1Sjsg 	mutex_lock(&port->mgr->lock);
58411bb76ff1Sjsg 	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
58421bb76ff1Sjsg 	    port->mstb &&
58431bb76ff1Sjsg 	    port->mstb->num_ports == 2) {
58441bb76ff1Sjsg 		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
58451bb76ff1Sjsg 			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
58461bb76ff1Sjsg 			    !downstream_port->input) {
58471bb76ff1Sjsg 				mutex_unlock(&port->mgr->lock);
58481bb76ff1Sjsg 				return true;
58491bb76ff1Sjsg 			}
58501bb76ff1Sjsg 		}
58511bb76ff1Sjsg 	}
58521bb76ff1Sjsg 	mutex_unlock(&port->mgr->lock);
58531bb76ff1Sjsg 
58541bb76ff1Sjsg 	return false;
58551bb76ff1Sjsg }
58561bb76ff1Sjsg 
58571bb76ff1Sjsg /**
58581bb76ff1Sjsg  * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
58591bb76ff1Sjsg  * @port: The port to check. A leaf of the MST tree with an attached display.
58601bb76ff1Sjsg  *
58611bb76ff1Sjsg  * Depending on the situation, DSC may be enabled via the endpoint aux,
58621bb76ff1Sjsg  * the immediately upstream aux, or the connector's physical aux.
58631bb76ff1Sjsg  *
58641bb76ff1Sjsg  * This is both the correct aux to read DSC_CAPABILITY and the
58651bb76ff1Sjsg  * correct aux to write DSC_ENABLED.
58661bb76ff1Sjsg  *
58671bb76ff1Sjsg  * This operation can be expensive (up to four aux reads), so
58681bb76ff1Sjsg  * the caller should cache the return.
58691bb76ff1Sjsg  *
58701bb76ff1Sjsg  * Returns:
58711bb76ff1Sjsg  * NULL if DSC cannot be enabled on this port, otherwise the aux device
58721bb76ff1Sjsg  */
drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port * port)58731bb76ff1Sjsg struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
58741bb76ff1Sjsg {
58751bb76ff1Sjsg 	struct drm_dp_mst_port *immediate_upstream_port;
58761bb76ff1Sjsg 	struct drm_dp_mst_port *fec_port;
58771bb76ff1Sjsg 	struct drm_dp_desc desc = {};
58781bb76ff1Sjsg 	u8 endpoint_fec;
58791bb76ff1Sjsg 	u8 endpoint_dsc;
58801bb76ff1Sjsg 
58811bb76ff1Sjsg 	if (!port)
58821bb76ff1Sjsg 		return NULL;
58831bb76ff1Sjsg 
58841bb76ff1Sjsg 	if (port->parent->port_parent)
58851bb76ff1Sjsg 		immediate_upstream_port = port->parent->port_parent;
58861bb76ff1Sjsg 	else
58871bb76ff1Sjsg 		immediate_upstream_port = NULL;
58881bb76ff1Sjsg 
58891bb76ff1Sjsg 	fec_port = immediate_upstream_port;
58901bb76ff1Sjsg 	while (fec_port) {
58911bb76ff1Sjsg 		/*
58921bb76ff1Sjsg 		 * Each physical link (i.e. not a virtual port) between the
58931bb76ff1Sjsg 		 * output and the primary device must support FEC
58941bb76ff1Sjsg 		 */
58951bb76ff1Sjsg 		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
58961bb76ff1Sjsg 		    !fec_port->fec_capable)
58971bb76ff1Sjsg 			return NULL;
58981bb76ff1Sjsg 
58991bb76ff1Sjsg 		fec_port = fec_port->parent->port_parent;
59001bb76ff1Sjsg 	}
59011bb76ff1Sjsg 
59021bb76ff1Sjsg 	/* DP-to-DP peer device */
59031bb76ff1Sjsg 	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
59041bb76ff1Sjsg 		u8 upstream_dsc;
59051bb76ff1Sjsg 
59061bb76ff1Sjsg 		if (drm_dp_dpcd_read(&port->aux,
59071bb76ff1Sjsg 				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
59081bb76ff1Sjsg 			return NULL;
59091bb76ff1Sjsg 		if (drm_dp_dpcd_read(&port->aux,
59101bb76ff1Sjsg 				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
59111bb76ff1Sjsg 			return NULL;
59121bb76ff1Sjsg 		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
59131bb76ff1Sjsg 				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
59141bb76ff1Sjsg 			return NULL;
59151bb76ff1Sjsg 
59161bb76ff1Sjsg 		/* Enpoint decompression with DP-to-DP peer device */
59171bb76ff1Sjsg 		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
59181bb76ff1Sjsg 		    (endpoint_fec & DP_FEC_CAPABLE) &&
59191bb76ff1Sjsg 		    (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
59201bb76ff1Sjsg 			port->passthrough_aux = &immediate_upstream_port->aux;
59211bb76ff1Sjsg 			return &port->aux;
59221bb76ff1Sjsg 		}
59231bb76ff1Sjsg 
59241bb76ff1Sjsg 		/* Virtual DPCD decompression with DP-to-DP peer device */
59251bb76ff1Sjsg 		return &immediate_upstream_port->aux;
59261bb76ff1Sjsg 	}
59271bb76ff1Sjsg 
59281bb76ff1Sjsg 	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
59291bb76ff1Sjsg 	if (drm_dp_mst_is_virtual_dpcd(port))
59301bb76ff1Sjsg 		return &port->aux;
59311bb76ff1Sjsg 
59321bb76ff1Sjsg 	/*
59331bb76ff1Sjsg 	 * Synaptics quirk
59341bb76ff1Sjsg 	 * Applies to ports for which:
59351bb76ff1Sjsg 	 * - Physical aux has Synaptics OUI
59361bb76ff1Sjsg 	 * - DPv1.4 or higher
59371bb76ff1Sjsg 	 * - Port is on primary branch device
59381bb76ff1Sjsg 	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
59391bb76ff1Sjsg 	 */
59401bb76ff1Sjsg 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
59411bb76ff1Sjsg 		return NULL;
59421bb76ff1Sjsg 
59431bb76ff1Sjsg 	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
59441bb76ff1Sjsg 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
59451bb76ff1Sjsg 	    port->parent == port->mgr->mst_primary) {
59461bb76ff1Sjsg 		u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
59471bb76ff1Sjsg 
59481bb76ff1Sjsg 		if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
59491bb76ff1Sjsg 			return NULL;
59501bb76ff1Sjsg 
59511bb76ff1Sjsg 		if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
59521bb76ff1Sjsg 		    ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
59531bb76ff1Sjsg 		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
59541bb76ff1Sjsg 			return port->mgr->aux;
59551bb76ff1Sjsg 	}
59561bb76ff1Sjsg 
59571bb76ff1Sjsg 	/*
59581bb76ff1Sjsg 	 * The check below verifies if the MST sink
59591bb76ff1Sjsg 	 * connected to the GPU is capable of DSC -
59601bb76ff1Sjsg 	 * therefore the endpoint needs to be
59611bb76ff1Sjsg 	 * both DSC and FEC capable.
59621bb76ff1Sjsg 	 */
59631bb76ff1Sjsg 	if (drm_dp_dpcd_read(&port->aux,
59641bb76ff1Sjsg 	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
59651bb76ff1Sjsg 		return NULL;
59661bb76ff1Sjsg 	if (drm_dp_dpcd_read(&port->aux,
59671bb76ff1Sjsg 	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
59681bb76ff1Sjsg 		return NULL;
59691bb76ff1Sjsg 	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
59701bb76ff1Sjsg 	   (endpoint_fec & DP_FEC_CAPABLE))
59711bb76ff1Sjsg 		return &port->aux;
59721bb76ff1Sjsg 
59731bb76ff1Sjsg 	return NULL;
59741bb76ff1Sjsg }
59751bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
5976