xref: /dragonfly/sys/dev/drm/drm_dp_mst_topology.c (revision 029e6489)
1 /*
2  * Copyright © 2014 Red Hat
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22 
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/sched.h>
27 #include <linux/seq_file.h>
28 #include <linux/i2c.h>
29 #include <drm/drm_dp_mst_helper.h>
30 #include <drm/drmP.h>
31 
32 #include <drm/drm_fixed.h>
33 
34 /**
35  * DOC: dp mst helper
36  *
37  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
38  * protocol. The helpers contain a topology manager and bandwidth manager.
39  * The helpers encapsulate the sending and received of sideband msgs.
40  */
41 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
42 				  char *buf);
43 static int test_calc_pbn_mode(void);
44 
45 static void drm_dp_put_port(struct drm_dp_mst_port *port);
46 
47 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
48 				     int id,
49 				     struct drm_dp_payload *payload);
50 
51 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
52 				  struct drm_dp_mst_port *port,
53 				  int offset, int size, u8 *bytes);
54 
55 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
56 				     struct drm_dp_mst_branch *mstb);
57 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
58 					   struct drm_dp_mst_branch *mstb,
59 					   struct drm_dp_mst_port *port);
60 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
61 				 u8 *guid);
62 
63 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
64 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
65 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
66 /* sideband msg handling */
67 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
68 {
69 	u8 bitmask = 0x80;
70 	u8 bitshift = 7;
71 	u8 array_index = 0;
72 	int number_of_bits = num_nibbles * 4;
73 	u8 remainder = 0;
74 
75 	while (number_of_bits != 0) {
76 		number_of_bits--;
77 		remainder <<= 1;
78 		remainder |= (data[array_index] & bitmask) >> bitshift;
79 		bitmask >>= 1;
80 		bitshift--;
81 		if (bitmask == 0) {
82 			bitmask = 0x80;
83 			bitshift = 7;
84 			array_index++;
85 		}
86 		if ((remainder & 0x10) == 0x10)
87 			remainder ^= 0x13;
88 	}
89 
90 	number_of_bits = 4;
91 	while (number_of_bits != 0) {
92 		number_of_bits--;
93 		remainder <<= 1;
94 		if ((remainder & 0x10) != 0)
95 			remainder ^= 0x13;
96 	}
97 
98 	return remainder;
99 }
100 
101 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
102 {
103 	u8 bitmask = 0x80;
104 	u8 bitshift = 7;
105 	u8 array_index = 0;
106 	int number_of_bits = number_of_bytes * 8;
107 	u16 remainder = 0;
108 
109 	while (number_of_bits != 0) {
110 		number_of_bits--;
111 		remainder <<= 1;
112 		remainder |= (data[array_index] & bitmask) >> bitshift;
113 		bitmask >>= 1;
114 		bitshift--;
115 		if (bitmask == 0) {
116 			bitmask = 0x80;
117 			bitshift = 7;
118 			array_index++;
119 		}
120 		if ((remainder & 0x100) == 0x100)
121 			remainder ^= 0xd5;
122 	}
123 
124 	number_of_bits = 8;
125 	while (number_of_bits != 0) {
126 		number_of_bits--;
127 		remainder <<= 1;
128 		if ((remainder & 0x100) != 0)
129 			remainder ^= 0xd5;
130 	}
131 
132 	return remainder & 0xff;
133 }
134 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
135 {
136 	u8 size = 3;
137 	size += (hdr->lct / 2);
138 	return size;
139 }
140 
141 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
142 					   u8 *buf, int *len)
143 {
144 	int idx = 0;
145 	int i;
146 	u8 crc4;
147 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
148 	for (i = 0; i < (hdr->lct / 2); i++)
149 		buf[idx++] = hdr->rad[i];
150 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
151 		(hdr->msg_len & 0x3f);
152 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
153 
154 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
155 	buf[idx - 1] |= (crc4 & 0xf);
156 
157 	*len = idx;
158 }
159 
160 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
161 					   u8 *buf, int buflen, u8 *hdrlen)
162 {
163 	u8 crc4;
164 	u8 len;
165 	int i;
166 	u8 idx;
167 	if (buf[0] == 0)
168 		return false;
169 	len = 3;
170 	len += ((buf[0] & 0xf0) >> 4) / 2;
171 	if (len > buflen)
172 		return false;
173 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
174 
175 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
176 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
177 		return false;
178 	}
179 
180 	hdr->lct = (buf[0] & 0xf0) >> 4;
181 	hdr->lcr = (buf[0] & 0xf);
182 	idx = 1;
183 	for (i = 0; i < (hdr->lct / 2); i++)
184 		hdr->rad[i] = buf[idx++];
185 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
186 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
187 	hdr->msg_len = buf[idx] & 0x3f;
188 	idx++;
189 	hdr->somt = (buf[idx] >> 7) & 0x1;
190 	hdr->eomt = (buf[idx] >> 6) & 0x1;
191 	hdr->seqno = (buf[idx] >> 4) & 0x1;
192 	idx++;
193 	*hdrlen = idx;
194 	return true;
195 }
196 
197 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
198 				       struct drm_dp_sideband_msg_tx *raw)
199 {
200 	int idx = 0;
201 	int i;
202 	u8 *buf = raw->msg;
203 	buf[idx++] = req->req_type & 0x7f;
204 
205 	switch (req->req_type) {
206 	case DP_ENUM_PATH_RESOURCES:
207 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
208 		idx++;
209 		break;
210 	case DP_ALLOCATE_PAYLOAD:
211 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
212 			(req->u.allocate_payload.number_sdp_streams & 0xf);
213 		idx++;
214 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
215 		idx++;
216 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
217 		idx++;
218 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
219 		idx++;
220 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
221 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
222 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
223 			idx++;
224 		}
225 		if (req->u.allocate_payload.number_sdp_streams & 1) {
226 			i = req->u.allocate_payload.number_sdp_streams - 1;
227 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
228 			idx++;
229 		}
230 		break;
231 	case DP_QUERY_PAYLOAD:
232 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
233 		idx++;
234 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
235 		idx++;
236 		break;
237 	case DP_REMOTE_DPCD_READ:
238 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
239 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
240 		idx++;
241 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
242 		idx++;
243 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
244 		idx++;
245 		buf[idx] = (req->u.dpcd_read.num_bytes);
246 		idx++;
247 		break;
248 
249 	case DP_REMOTE_DPCD_WRITE:
250 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
251 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
252 		idx++;
253 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
254 		idx++;
255 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
256 		idx++;
257 		buf[idx] = (req->u.dpcd_write.num_bytes);
258 		idx++;
259 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
260 		idx += req->u.dpcd_write.num_bytes;
261 		break;
262 	case DP_REMOTE_I2C_READ:
263 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
264 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
265 		idx++;
266 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
267 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
268 			idx++;
269 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
270 			idx++;
271 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
272 			idx += req->u.i2c_read.transactions[i].num_bytes;
273 
274 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
275 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
276 			idx++;
277 		}
278 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
279 		idx++;
280 		buf[idx] = (req->u.i2c_read.num_bytes_read);
281 		idx++;
282 		break;
283 
284 	case DP_REMOTE_I2C_WRITE:
285 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
286 		idx++;
287 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
288 		idx++;
289 		buf[idx] = (req->u.i2c_write.num_bytes);
290 		idx++;
291 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
292 		idx += req->u.i2c_write.num_bytes;
293 		break;
294 	}
295 	raw->cur_len = idx;
296 }
297 
298 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
299 {
300 	u8 crc4;
301 	crc4 = drm_dp_msg_data_crc4(msg, len);
302 	msg[len] = crc4;
303 }
304 
305 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
306 					 struct drm_dp_sideband_msg_tx *raw)
307 {
308 	int idx = 0;
309 	u8 *buf = raw->msg;
310 
311 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
312 
313 	raw->cur_len = idx;
314 }
315 
316 /* this adds a chunk of msg to the builder to get the final msg */
317 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
318 				      u8 *replybuf, u8 replybuflen, bool hdr)
319 {
320 	int ret;
321 	u8 crc4;
322 
323 	if (hdr) {
324 		u8 hdrlen;
325 		struct drm_dp_sideband_msg_hdr recv_hdr;
326 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
327 		if (ret == false) {
328 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
329 			return false;
330 		}
331 
332 		/*
333 		 * ignore out-of-order messages or messages that are part of a
334 		 * failed transaction
335 		 */
336 		if (!recv_hdr.somt && !msg->have_somt)
337 			return false;
338 
339 		/* get length contained in this portion */
340 		msg->curchunk_len = recv_hdr.msg_len;
341 		msg->curchunk_hdrlen = hdrlen;
342 
343 		/* we have already gotten an somt - don't bother parsing */
344 		if (recv_hdr.somt && msg->have_somt)
345 			return false;
346 
347 		if (recv_hdr.somt) {
348 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
349 			msg->have_somt = true;
350 		}
351 		if (recv_hdr.eomt)
352 			msg->have_eomt = true;
353 
354 		/* copy the bytes for the remainder of this header chunk */
355 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
356 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
357 	} else {
358 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
359 		msg->curchunk_idx += replybuflen;
360 	}
361 
362 	if (msg->curchunk_idx >= msg->curchunk_len) {
363 		/* do CRC */
364 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
365 		/* copy chunk into bigger msg */
366 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
367 		msg->curlen += msg->curchunk_len - 1;
368 	}
369 	return true;
370 }
371 
372 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
373 					       struct drm_dp_sideband_msg_reply_body *repmsg)
374 {
375 	int idx = 1;
376 	int i;
377 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
378 	idx += 16;
379 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
380 	idx++;
381 	if (idx > raw->curlen)
382 		goto fail_len;
383 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
384 		if (raw->msg[idx] & 0x80)
385 			repmsg->u.link_addr.ports[i].input_port = 1;
386 
387 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
388 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
389 
390 		idx++;
391 		if (idx > raw->curlen)
392 			goto fail_len;
393 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
394 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
395 		if (repmsg->u.link_addr.ports[i].input_port == 0)
396 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
397 		idx++;
398 		if (idx > raw->curlen)
399 			goto fail_len;
400 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
401 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
402 			idx++;
403 			if (idx > raw->curlen)
404 				goto fail_len;
405 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
406 			idx += 16;
407 			if (idx > raw->curlen)
408 				goto fail_len;
409 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
410 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
411 			idx++;
412 
413 		}
414 		if (idx > raw->curlen)
415 			goto fail_len;
416 	}
417 
418 	return true;
419 fail_len:
420 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
421 	return false;
422 }
423 
424 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
425 						   struct drm_dp_sideband_msg_reply_body *repmsg)
426 {
427 	int idx = 1;
428 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
429 	idx++;
430 	if (idx > raw->curlen)
431 		goto fail_len;
432 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
433 	if (idx > raw->curlen)
434 		goto fail_len;
435 
436 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
437 	return true;
438 fail_len:
439 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
440 	return false;
441 }
442 
443 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
444 						      struct drm_dp_sideband_msg_reply_body *repmsg)
445 {
446 	int idx = 1;
447 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
448 	idx++;
449 	if (idx > raw->curlen)
450 		goto fail_len;
451 	return true;
452 fail_len:
453 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
454 	return false;
455 }
456 
457 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
458 						      struct drm_dp_sideband_msg_reply_body *repmsg)
459 {
460 	int idx = 1;
461 
462 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
463 	idx++;
464 	if (idx > raw->curlen)
465 		goto fail_len;
466 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
467 	idx++;
468 	/* TODO check */
469 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
470 	return true;
471 fail_len:
472 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
473 	return false;
474 }
475 
476 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
477 							  struct drm_dp_sideband_msg_reply_body *repmsg)
478 {
479 	int idx = 1;
480 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
481 	idx++;
482 	if (idx > raw->curlen)
483 		goto fail_len;
484 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
485 	idx += 2;
486 	if (idx > raw->curlen)
487 		goto fail_len;
488 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
489 	idx += 2;
490 	if (idx > raw->curlen)
491 		goto fail_len;
492 	return true;
493 fail_len:
494 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
495 	return false;
496 }
497 
498 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
499 							  struct drm_dp_sideband_msg_reply_body *repmsg)
500 {
501 	int idx = 1;
502 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
503 	idx++;
504 	if (idx > raw->curlen)
505 		goto fail_len;
506 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
507 	idx++;
508 	if (idx > raw->curlen)
509 		goto fail_len;
510 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
511 	idx += 2;
512 	if (idx > raw->curlen)
513 		goto fail_len;
514 	return true;
515 fail_len:
516 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
517 	return false;
518 }
519 
520 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
521 						    struct drm_dp_sideband_msg_reply_body *repmsg)
522 {
523 	int idx = 1;
524 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
525 	idx++;
526 	if (idx > raw->curlen)
527 		goto fail_len;
528 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
529 	idx += 2;
530 	if (idx > raw->curlen)
531 		goto fail_len;
532 	return true;
533 fail_len:
534 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
535 	return false;
536 }
537 
538 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
539 					struct drm_dp_sideband_msg_reply_body *msg)
540 {
541 	memset(msg, 0, sizeof(*msg));
542 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
543 	msg->req_type = (raw->msg[0] & 0x7f);
544 
545 	if (msg->reply_type) {
546 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
547 		msg->u.nak.reason = raw->msg[17];
548 		msg->u.nak.nak_data = raw->msg[18];
549 		return false;
550 	}
551 
552 	switch (msg->req_type) {
553 	case DP_LINK_ADDRESS:
554 		return drm_dp_sideband_parse_link_address(raw, msg);
555 	case DP_QUERY_PAYLOAD:
556 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
557 	case DP_REMOTE_DPCD_READ:
558 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
559 	case DP_REMOTE_DPCD_WRITE:
560 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
561 	case DP_REMOTE_I2C_READ:
562 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
563 	case DP_ENUM_PATH_RESOURCES:
564 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
565 	case DP_ALLOCATE_PAYLOAD:
566 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
567 	default:
568 		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
569 		return false;
570 	}
571 }
572 
573 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
574 							   struct drm_dp_sideband_msg_req_body *msg)
575 {
576 	int idx = 1;
577 
578 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
579 	idx++;
580 	if (idx > raw->curlen)
581 		goto fail_len;
582 
583 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
584 	idx += 16;
585 	if (idx > raw->curlen)
586 		goto fail_len;
587 
588 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
589 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
590 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
591 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
592 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
593 	idx++;
594 	return true;
595 fail_len:
596 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
597 	return false;
598 }
599 
600 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
601 							   struct drm_dp_sideband_msg_req_body *msg)
602 {
603 	int idx = 1;
604 
605 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
606 	idx++;
607 	if (idx > raw->curlen)
608 		goto fail_len;
609 
610 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
611 	idx += 16;
612 	if (idx > raw->curlen)
613 		goto fail_len;
614 
615 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
616 	idx++;
617 	return true;
618 fail_len:
619 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
620 	return false;
621 }
622 
623 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
624 				      struct drm_dp_sideband_msg_req_body *msg)
625 {
626 	memset(msg, 0, sizeof(*msg));
627 	msg->req_type = (raw->msg[0] & 0x7f);
628 
629 	switch (msg->req_type) {
630 	case DP_CONNECTION_STATUS_NOTIFY:
631 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
632 	case DP_RESOURCE_STATUS_NOTIFY:
633 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
634 	default:
635 		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
636 		return false;
637 	}
638 }
639 
640 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
641 {
642 	struct drm_dp_sideband_msg_req_body req;
643 
644 	req.req_type = DP_REMOTE_DPCD_WRITE;
645 	req.u.dpcd_write.port_number = port_num;
646 	req.u.dpcd_write.dpcd_address = offset;
647 	req.u.dpcd_write.num_bytes = num_bytes;
648 	req.u.dpcd_write.bytes = bytes;
649 	drm_dp_encode_sideband_req(&req, msg);
650 
651 	return 0;
652 }
653 
654 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
655 {
656 	struct drm_dp_sideband_msg_req_body req;
657 
658 	req.req_type = DP_LINK_ADDRESS;
659 	drm_dp_encode_sideband_req(&req, msg);
660 	return 0;
661 }
662 
663 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
664 {
665 	struct drm_dp_sideband_msg_req_body req;
666 
667 	req.req_type = DP_ENUM_PATH_RESOURCES;
668 	req.u.port_num.port_number = port_num;
669 	drm_dp_encode_sideband_req(&req, msg);
670 	msg->path_msg = true;
671 	return 0;
672 }
673 
674 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
675 				  u8 vcpi, uint16_t pbn,
676 				  u8 number_sdp_streams,
677 				  u8 *sdp_stream_sink)
678 {
679 	struct drm_dp_sideband_msg_req_body req;
680 	memset(&req, 0, sizeof(req));
681 	req.req_type = DP_ALLOCATE_PAYLOAD;
682 	req.u.allocate_payload.port_number = port_num;
683 	req.u.allocate_payload.vcpi = vcpi;
684 	req.u.allocate_payload.pbn = pbn;
685 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
686 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
687 		   number_sdp_streams);
688 	drm_dp_encode_sideband_req(&req, msg);
689 	msg->path_msg = true;
690 	return 0;
691 }
692 
693 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
694 					struct drm_dp_vcpi *vcpi)
695 {
696 	int ret, vcpi_ret;
697 
698 	mutex_lock(&mgr->payload_lock);
699 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
700 	if (ret > mgr->max_payloads) {
701 		ret = -EINVAL;
702 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
703 		goto out_unlock;
704 	}
705 
706 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
707 	if (vcpi_ret > mgr->max_payloads) {
708 		ret = -EINVAL;
709 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
710 		goto out_unlock;
711 	}
712 
713 	set_bit(ret, &mgr->payload_mask);
714 	set_bit(vcpi_ret, &mgr->vcpi_mask);
715 	vcpi->vcpi = vcpi_ret + 1;
716 	mgr->proposed_vcpis[ret - 1] = vcpi;
717 out_unlock:
718 	mutex_unlock(&mgr->payload_lock);
719 	return ret;
720 }
721 
722 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
723 				      int vcpi)
724 {
725 	int i;
726 	if (vcpi == 0)
727 		return;
728 
729 	mutex_lock(&mgr->payload_lock);
730 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
731 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
732 
733 	for (i = 0; i < mgr->max_payloads; i++) {
734 		if (mgr->proposed_vcpis[i])
735 			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
736 				mgr->proposed_vcpis[i] = NULL;
737 				clear_bit(i + 1, &mgr->payload_mask);
738 			}
739 	}
740 	mutex_unlock(&mgr->payload_lock);
741 }
742 
743 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
744 			      struct drm_dp_sideband_msg_tx *txmsg)
745 {
746 	bool ret;
747 
748 	/*
749 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
750 	 * cases we check here are terminal states. For those the barriers
751 	 * provided by the wake_up/wait_event pair are enough.
752 	 */
753 	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
754 	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
755 	return ret;
756 }
757 
758 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
759 				    struct drm_dp_sideband_msg_tx *txmsg)
760 {
761 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
762 	int ret;
763 
764 	ret = wait_event_timeout(mgr->tx_waitq,
765 				 check_txmsg_state(mgr, txmsg),
766 				 (4 * HZ));
767 	mutex_lock(&mstb->mgr->qlock);
768 	if (ret > 0) {
769 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
770 			ret = -EIO;
771 			goto out;
772 		}
773 	} else {
774 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
775 
776 		/* dump some state */
777 		ret = -EIO;
778 
779 		/* remove from q */
780 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
781 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
782 			list_del(&txmsg->next);
783 		}
784 
785 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
786 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
787 			mstb->tx_slots[txmsg->seqno] = NULL;
788 		}
789 	}
790 out:
791 	mutex_unlock(&mgr->qlock);
792 
793 	return ret;
794 }
795 
796 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
797 {
798 	struct drm_dp_mst_branch *mstb;
799 
800 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
801 	if (!mstb)
802 		return NULL;
803 
804 	mstb->lct = lct;
805 	if (lct > 1)
806 		memcpy(mstb->rad, rad, lct / 2);
807 	INIT_LIST_HEAD(&mstb->ports);
808 	kref_init(&mstb->kref);
809 	return mstb;
810 }
811 
812 static void drm_dp_free_mst_port(struct kref *kref);
813 
814 static void drm_dp_free_mst_branch_device(struct kref *kref)
815 {
816 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
817 	if (mstb->port_parent) {
818 		if (list_empty(&mstb->port_parent->next))
819 			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
820 	}
821 	kfree(mstb);
822 }
823 
824 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
825 {
826 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
827 	struct drm_dp_mst_port *port, *tmp;
828 	bool wake_tx = false;
829 
830 	/*
831 	 * init kref again to be used by ports to remove mst branch when it is
832 	 * not needed anymore
833 	 */
834 	kref_init(kref);
835 
836 	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
837 		kref_get(&mstb->port_parent->kref);
838 
839 	/*
840 	 * destroy all ports - don't need lock
841 	 * as there are no more references to the mst branch
842 	 * device at this point.
843 	 */
844 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
845 		list_del(&port->next);
846 		drm_dp_put_port(port);
847 	}
848 
849 	/* drop any tx slots msg */
850 	mutex_lock(&mstb->mgr->qlock);
851 	if (mstb->tx_slots[0]) {
852 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
853 		mstb->tx_slots[0] = NULL;
854 		wake_tx = true;
855 	}
856 	if (mstb->tx_slots[1]) {
857 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
858 		mstb->tx_slots[1] = NULL;
859 		wake_tx = true;
860 	}
861 	mutex_unlock(&mstb->mgr->qlock);
862 
863 	if (wake_tx)
864 		wake_up(&mstb->mgr->tx_waitq);
865 
866 	kref_put(kref, drm_dp_free_mst_branch_device);
867 }
868 
869 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
870 {
871 	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
872 }
873 
874 
875 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
876 {
877 	struct drm_dp_mst_branch *mstb;
878 
879 	switch (old_pdt) {
880 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
881 	case DP_PEER_DEVICE_SST_SINK:
882 		/* remove i2c over sideband */
883 		drm_dp_mst_unregister_i2c_bus(&port->aux);
884 		break;
885 	case DP_PEER_DEVICE_MST_BRANCHING:
886 		mstb = port->mstb;
887 		port->mstb = NULL;
888 		drm_dp_put_mst_branch_device(mstb);
889 		break;
890 	}
891 }
892 
893 static void drm_dp_destroy_port(struct kref *kref)
894 {
895 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
896 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
897 
898 	if (!port->input) {
899 		port->vcpi.num_slots = 0;
900 
901 		kfree(port->cached_edid);
902 
903 		/*
904 		 * The only time we don't have a connector
905 		 * on an output port is if the connector init
906 		 * fails.
907 		 */
908 		if (port->connector) {
909 			/* we can't destroy the connector here, as
910 			 * we might be holding the mode_config.mutex
911 			 * from an EDID retrieval */
912 
913 			mutex_lock(&mgr->destroy_connector_lock);
914 			kref_get(&port->parent->kref);
915 			list_add(&port->next, &mgr->destroy_connector_list);
916 			mutex_unlock(&mgr->destroy_connector_lock);
917 			schedule_work(&mgr->destroy_connector_work);
918 			return;
919 		}
920 		/* no need to clean up vcpi
921 		 * as if we have no connector we never setup a vcpi */
922 		drm_dp_port_teardown_pdt(port, port->pdt);
923 		port->pdt = DP_PEER_DEVICE_NONE;
924 	}
925 	kfree(port);
926 }
927 
928 static void drm_dp_put_port(struct drm_dp_mst_port *port)
929 {
930 	kref_put(&port->kref, drm_dp_destroy_port);
931 }
932 
933 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
934 {
935 	struct drm_dp_mst_port *port;
936 	struct drm_dp_mst_branch *rmstb;
937 	if (to_find == mstb) {
938 		kref_get(&mstb->kref);
939 		return mstb;
940 	}
941 	list_for_each_entry(port, &mstb->ports, next) {
942 		if (port->mstb) {
943 			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
944 			if (rmstb)
945 				return rmstb;
946 		}
947 	}
948 	return NULL;
949 }
950 
951 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
952 {
953 	struct drm_dp_mst_branch *rmstb = NULL;
954 	mutex_lock(&mgr->lock);
955 	if (mgr->mst_primary)
956 		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
957 	mutex_unlock(&mgr->lock);
958 	return rmstb;
959 }
960 
961 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
962 {
963 	struct drm_dp_mst_port *port, *mport;
964 
965 	list_for_each_entry(port, &mstb->ports, next) {
966 		if (port == to_find) {
967 			kref_get(&port->kref);
968 			return port;
969 		}
970 		if (port->mstb) {
971 			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
972 			if (mport)
973 				return mport;
974 		}
975 	}
976 	return NULL;
977 }
978 
979 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
980 {
981 	struct drm_dp_mst_port *rport = NULL;
982 	mutex_lock(&mgr->lock);
983 	if (mgr->mst_primary)
984 		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
985 	mutex_unlock(&mgr->lock);
986 	return rport;
987 }
988 
989 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
990 {
991 	struct drm_dp_mst_port *port;
992 
993 	list_for_each_entry(port, &mstb->ports, next) {
994 		if (port->port_num == port_num) {
995 			kref_get(&port->kref);
996 			return port;
997 		}
998 	}
999 
1000 	return NULL;
1001 }
1002 
1003 /*
1004  * calculate a new RAD for this MST branch device
1005  * if parent has an LCT of 2 then it has 1 nibble of RAD,
1006  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1007  */
1008 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1009 				 u8 *rad)
1010 {
1011 	int parent_lct = port->parent->lct;
1012 	int shift = 4;
1013 	int idx = (parent_lct - 1) / 2;
1014 	if (parent_lct > 1) {
1015 		memcpy(rad, port->parent->rad, idx + 1);
1016 		shift = (parent_lct % 2) ? 4 : 0;
1017 	} else
1018 		rad[0] = 0;
1019 
1020 	rad[idx] |= port->port_num << shift;
1021 	return parent_lct + 1;
1022 }
1023 
1024 /*
1025  * return sends link address for new mstb
1026  */
1027 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1028 {
1029 	int ret;
1030 	u8 rad[6], lct;
1031 	bool send_link = false;
1032 	switch (port->pdt) {
1033 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1034 	case DP_PEER_DEVICE_SST_SINK:
1035 		/* add i2c over sideband */
1036 		ret = drm_dp_mst_register_i2c_bus(&port->aux);
1037 		break;
1038 	case DP_PEER_DEVICE_MST_BRANCHING:
1039 		lct = drm_dp_calculate_rad(port, rad);
1040 
1041 		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1042 		port->mstb->mgr = port->mgr;
1043 		port->mstb->port_parent = port;
1044 
1045 		send_link = true;
1046 		break;
1047 	}
1048 	return send_link;
1049 }
1050 
1051 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1052 {
1053 	int ret;
1054 
1055 	memcpy(mstb->guid, guid, 16);
1056 
1057 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1058 		if (mstb->port_parent) {
1059 			ret = drm_dp_send_dpcd_write(
1060 					mstb->mgr,
1061 					mstb->port_parent,
1062 					DP_GUID,
1063 					16,
1064 					mstb->guid);
1065 		} else {
1066 
1067 			ret = drm_dp_dpcd_write(
1068 					mstb->mgr->aux,
1069 					DP_GUID,
1070 					mstb->guid,
1071 					16);
1072 		}
1073 	}
1074 }
1075 
1076 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1077 				int pnum,
1078 				char *proppath,
1079 				size_t proppath_size)
1080 {
1081 	int i;
1082 	char temp[8];
1083 	ksnprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1084 	for (i = 0; i < (mstb->lct - 1); i++) {
1085 		int shift = (i % 2) ? 0 : 4;
1086 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1087 		ksnprintf(temp, sizeof(temp), "-%d", port_num);
1088 		strlcat(proppath, temp, proppath_size);
1089 	}
1090 	ksnprintf(temp, sizeof(temp), "-%d", pnum);
1091 	strlcat(proppath, temp, proppath_size);
1092 }
1093 
1094 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1095 			    struct drm_device *dev,
1096 			    struct drm_dp_link_addr_reply_port *port_msg)
1097 {
1098 	struct drm_dp_mst_port *port;
1099 	bool ret;
1100 	bool created = false;
1101 	int old_pdt = 0;
1102 	int old_ddps = 0;
1103 	port = drm_dp_get_port(mstb, port_msg->port_number);
1104 	if (!port) {
1105 		port = kzalloc(sizeof(*port), GFP_KERNEL);
1106 		if (!port)
1107 			return;
1108 		kref_init(&port->kref);
1109 		port->parent = mstb;
1110 		port->port_num = port_msg->port_number;
1111 		port->mgr = mstb->mgr;
1112 		port->aux.name = "DPMST";
1113 		port->aux.dev = dev->dev;
1114 		created = true;
1115 	} else {
1116 		old_pdt = port->pdt;
1117 		old_ddps = port->ddps;
1118 	}
1119 
1120 	port->pdt = port_msg->peer_device_type;
1121 	port->input = port_msg->input_port;
1122 	port->mcs = port_msg->mcs;
1123 	port->ddps = port_msg->ddps;
1124 	port->ldps = port_msg->legacy_device_plug_status;
1125 	port->dpcd_rev = port_msg->dpcd_revision;
1126 	port->num_sdp_streams = port_msg->num_sdp_streams;
1127 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1128 
1129 	/* manage mstb port lists with mgr lock - take a reference
1130 	   for this list */
1131 	if (created) {
1132 		mutex_lock(&mstb->mgr->lock);
1133 		kref_get(&port->kref);
1134 		list_add(&port->next, &mstb->ports);
1135 		mutex_unlock(&mstb->mgr->lock);
1136 	}
1137 
1138 	if (old_ddps != port->ddps) {
1139 		if (port->ddps) {
1140 			if (!port->input)
1141 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1142 		} else {
1143 			port->available_pbn = 0;
1144 			}
1145 	}
1146 
1147 	if (old_pdt != port->pdt && !port->input) {
1148 		drm_dp_port_teardown_pdt(port, old_pdt);
1149 
1150 		ret = drm_dp_port_setup_pdt(port);
1151 		if (ret == true)
1152 			drm_dp_send_link_address(mstb->mgr, port->mstb);
1153 	}
1154 
1155 	if (created && !port->input) {
1156 		char proppath[255];
1157 
1158 		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1159 		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1160 		if (!port->connector) {
1161 			/* remove it from the port list */
1162 			mutex_lock(&mstb->mgr->lock);
1163 			list_del(&port->next);
1164 			mutex_unlock(&mstb->mgr->lock);
1165 			/* drop port list reference */
1166 			drm_dp_put_port(port);
1167 			goto out;
1168 		}
1169 		if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1170 		     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1171 		    port->port_num >= DP_MST_LOGICAL_PORT_0) {
1172 			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1173 			drm_mode_connector_set_tile_property(port->connector);
1174 		}
1175 		(*mstb->mgr->cbs->register_connector)(port->connector);
1176 	}
1177 
1178 out:
1179 	/* put reference to this port */
1180 	drm_dp_put_port(port);
1181 }
1182 
1183 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1184 			       struct drm_dp_connection_status_notify *conn_stat)
1185 {
1186 	struct drm_dp_mst_port *port;
1187 	int old_pdt;
1188 	int old_ddps;
1189 	bool dowork = false;
1190 	port = drm_dp_get_port(mstb, conn_stat->port_number);
1191 	if (!port)
1192 		return;
1193 
1194 	old_ddps = port->ddps;
1195 	old_pdt = port->pdt;
1196 	port->pdt = conn_stat->peer_device_type;
1197 	port->mcs = conn_stat->message_capability_status;
1198 	port->ldps = conn_stat->legacy_device_plug_status;
1199 	port->ddps = conn_stat->displayport_device_plug_status;
1200 
1201 	if (old_ddps != port->ddps) {
1202 		if (port->ddps) {
1203 			dowork = true;
1204 		} else {
1205 			port->available_pbn = 0;
1206 		}
1207 	}
1208 	if (old_pdt != port->pdt && !port->input) {
1209 		drm_dp_port_teardown_pdt(port, old_pdt);
1210 
1211 		if (drm_dp_port_setup_pdt(port))
1212 			dowork = true;
1213 	}
1214 
1215 	drm_dp_put_port(port);
1216 	if (dowork)
1217 		queue_work(system_long_wq, &mstb->mgr->work);
1218 
1219 }
1220 
1221 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1222 							       u8 lct, u8 *rad)
1223 {
1224 	struct drm_dp_mst_branch *mstb;
1225 	struct drm_dp_mst_port *port;
1226 	int i;
1227 	/* find the port by iterating down */
1228 
1229 	mutex_lock(&mgr->lock);
1230 	mstb = mgr->mst_primary;
1231 
1232 	for (i = 0; i < lct - 1; i++) {
1233 		int shift = (i % 2) ? 0 : 4;
1234 		int port_num = (rad[i / 2] >> shift) & 0xf;
1235 
1236 		list_for_each_entry(port, &mstb->ports, next) {
1237 			if (port->port_num == port_num) {
1238 				mstb = port->mstb;
1239 				if (!mstb) {
1240 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1241 					goto out;
1242 				}
1243 
1244 				break;
1245 			}
1246 		}
1247 	}
1248 	kref_get(&mstb->kref);
1249 out:
1250 	mutex_unlock(&mgr->lock);
1251 	return mstb;
1252 }
1253 
1254 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1255 	struct drm_dp_mst_branch *mstb,
1256 	uint8_t *guid)
1257 {
1258 	struct drm_dp_mst_branch *found_mstb;
1259 	struct drm_dp_mst_port *port;
1260 
1261 	if (memcmp(mstb->guid, guid, 16) == 0)
1262 		return mstb;
1263 
1264 
1265 	list_for_each_entry(port, &mstb->ports, next) {
1266 		if (!port->mstb)
1267 			continue;
1268 
1269 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1270 
1271 		if (found_mstb)
1272 			return found_mstb;
1273 	}
1274 
1275 	return NULL;
1276 }
1277 
1278 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1279 	struct drm_dp_mst_topology_mgr *mgr,
1280 	uint8_t *guid)
1281 {
1282 	struct drm_dp_mst_branch *mstb;
1283 
1284 	/* find the port by iterating down */
1285 	mutex_lock(&mgr->lock);
1286 
1287 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1288 
1289 	if (mstb)
1290 		kref_get(&mstb->kref);
1291 
1292 	mutex_unlock(&mgr->lock);
1293 	return mstb;
1294 }
1295 
1296 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1297 					       struct drm_dp_mst_branch *mstb)
1298 {
1299 	struct drm_dp_mst_port *port;
1300 	struct drm_dp_mst_branch *mstb_child;
1301 	if (!mstb->link_address_sent)
1302 		drm_dp_send_link_address(mgr, mstb);
1303 
1304 	list_for_each_entry(port, &mstb->ports, next) {
1305 		if (port->input)
1306 			continue;
1307 
1308 		if (!port->ddps)
1309 			continue;
1310 
1311 		if (!port->available_pbn)
1312 			drm_dp_send_enum_path_resources(mgr, mstb, port);
1313 
1314 		if (port->mstb) {
1315 			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1316 			if (mstb_child) {
1317 				drm_dp_check_and_send_link_address(mgr, mstb_child);
1318 				drm_dp_put_mst_branch_device(mstb_child);
1319 			}
1320 		}
1321 	}
1322 }
1323 
1324 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1325 {
1326 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1327 	struct drm_dp_mst_branch *mstb;
1328 
1329 	mutex_lock(&mgr->lock);
1330 	mstb = mgr->mst_primary;
1331 	if (mstb) {
1332 		kref_get(&mstb->kref);
1333 	}
1334 	mutex_unlock(&mgr->lock);
1335 	if (mstb) {
1336 		drm_dp_check_and_send_link_address(mgr, mstb);
1337 		drm_dp_put_mst_branch_device(mstb);
1338 	}
1339 }
1340 
1341 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1342 				 u8 *guid)
1343 {
1344 	static u8 zero_guid[16];
1345 
1346 	if (!memcmp(guid, zero_guid, 16)) {
1347 		u64 salt = get_jiffies_64();
1348 		memcpy(&guid[0], &salt, sizeof(u64));
1349 		memcpy(&guid[8], &salt, sizeof(u64));
1350 		return false;
1351 	}
1352 	return true;
1353 }
1354 
1355 #if 0
1356 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1357 {
1358 	struct drm_dp_sideband_msg_req_body req;
1359 
1360 	req.req_type = DP_REMOTE_DPCD_READ;
1361 	req.u.dpcd_read.port_number = port_num;
1362 	req.u.dpcd_read.dpcd_address = offset;
1363 	req.u.dpcd_read.num_bytes = num_bytes;
1364 	drm_dp_encode_sideband_req(&req, msg);
1365 
1366 	return 0;
1367 }
1368 #endif
1369 
1370 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1371 				    bool up, u8 *msg, int len)
1372 {
1373 	int ret;
1374 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1375 	int tosend, total, offset;
1376 	int retries = 0;
1377 
1378 retry:
1379 	total = len;
1380 	offset = 0;
1381 	do {
1382 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1383 
1384 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1385 					&msg[offset],
1386 					tosend);
1387 		if (ret != tosend) {
1388 			if (ret == -EIO && retries < 5) {
1389 				retries++;
1390 				goto retry;
1391 			}
1392 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1393 
1394 			return -EIO;
1395 		}
1396 		offset += tosend;
1397 		total -= tosend;
1398 	} while (total > 0);
1399 	return 0;
1400 }
1401 
1402 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1403 				  struct drm_dp_sideband_msg_tx *txmsg)
1404 {
1405 	struct drm_dp_mst_branch *mstb = txmsg->dst;
1406 	u8 req_type;
1407 
1408 	/* both msg slots are full */
1409 	if (txmsg->seqno == -1) {
1410 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1411 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1412 			return -EAGAIN;
1413 		}
1414 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1415 			txmsg->seqno = mstb->last_seqno;
1416 			mstb->last_seqno ^= 1;
1417 		} else if (mstb->tx_slots[0] == NULL)
1418 			txmsg->seqno = 0;
1419 		else
1420 			txmsg->seqno = 1;
1421 		mstb->tx_slots[txmsg->seqno] = txmsg;
1422 	}
1423 
1424 	req_type = txmsg->msg[0] & 0x7f;
1425 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1426 		req_type == DP_RESOURCE_STATUS_NOTIFY)
1427 		hdr->broadcast = 1;
1428 	else
1429 		hdr->broadcast = 0;
1430 	hdr->path_msg = txmsg->path_msg;
1431 	hdr->lct = mstb->lct;
1432 	hdr->lcr = mstb->lct - 1;
1433 	if (mstb->lct > 1)
1434 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1435 	hdr->seqno = txmsg->seqno;
1436 	return 0;
1437 }
1438 /*
1439  * process a single block of the next message in the sideband queue
1440  */
1441 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1442 				   struct drm_dp_sideband_msg_tx *txmsg,
1443 				   bool up)
1444 {
1445 	u8 chunk[48];
1446 	struct drm_dp_sideband_msg_hdr hdr;
1447 	int len, space, idx, tosend;
1448 	int ret;
1449 
1450 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1451 
1452 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1453 		txmsg->seqno = -1;
1454 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1455 	}
1456 
1457 	/* make hdr from dst mst - for replies use seqno
1458 	   otherwise assign one */
1459 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1460 	if (ret < 0)
1461 		return ret;
1462 
1463 	/* amount left to send in this message */
1464 	len = txmsg->cur_len - txmsg->cur_offset;
1465 
1466 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1467 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1468 
1469 	tosend = min(len, space);
1470 	if (len == txmsg->cur_len)
1471 		hdr.somt = 1;
1472 	if (space >= len)
1473 		hdr.eomt = 1;
1474 
1475 
1476 	hdr.msg_len = tosend + 1;
1477 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1478 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1479 	/* add crc at end */
1480 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1481 	idx += tosend + 1;
1482 
1483 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1484 	if (ret) {
1485 		DRM_DEBUG_KMS("sideband msg failed to send\n");
1486 		return ret;
1487 	}
1488 
1489 	txmsg->cur_offset += tosend;
1490 	if (txmsg->cur_offset == txmsg->cur_len) {
1491 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1492 		return 1;
1493 	}
1494 	return 0;
1495 }
1496 
1497 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1498 {
1499 	struct drm_dp_sideband_msg_tx *txmsg;
1500 	int ret;
1501 
1502 	WARN_ON(!mutex_is_locked(&mgr->qlock));
1503 
1504 	/* construct a chunk from the first msg in the tx_msg queue */
1505 	if (list_empty(&mgr->tx_msg_downq))
1506 		return;
1507 
1508 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1509 	ret = process_single_tx_qlock(mgr, txmsg, false);
1510 	if (ret == 1) {
1511 		/* txmsg is sent it should be in the slots now */
1512 		list_del(&txmsg->next);
1513 	} else if (ret) {
1514 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1515 		list_del(&txmsg->next);
1516 		if (txmsg->seqno != -1)
1517 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1518 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1519 		wake_up(&mgr->tx_waitq);
1520 	}
1521 }
1522 
1523 /* called holding qlock */
1524 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1525 				       struct drm_dp_sideband_msg_tx *txmsg)
1526 {
1527 	int ret;
1528 
1529 	/* construct a chunk from the first msg in the tx_msg queue */
1530 	ret = process_single_tx_qlock(mgr, txmsg, true);
1531 
1532 	if (ret != 1)
1533 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1534 
1535 	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1536 }
1537 
1538 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1539 				 struct drm_dp_sideband_msg_tx *txmsg)
1540 {
1541 	mutex_lock(&mgr->qlock);
1542 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1543 	if (list_is_singular(&mgr->tx_msg_downq))
1544 		process_single_down_tx_qlock(mgr);
1545 	mutex_unlock(&mgr->qlock);
1546 }
1547 
1548 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1549 				     struct drm_dp_mst_branch *mstb)
1550 {
1551 	int len;
1552 	struct drm_dp_sideband_msg_tx *txmsg;
1553 	int ret;
1554 
1555 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1556 	if (!txmsg)
1557 		return;
1558 
1559 	txmsg->dst = mstb;
1560 	len = build_link_address(txmsg);
1561 
1562 	mstb->link_address_sent = true;
1563 	drm_dp_queue_down_tx(mgr, txmsg);
1564 
1565 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1566 	if (ret > 0) {
1567 		int i;
1568 
1569 		if (txmsg->reply.reply_type == 1)
1570 			DRM_DEBUG_KMS("link address nak received\n");
1571 		else {
1572 			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1573 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1574 				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1575 				       txmsg->reply.u.link_addr.ports[i].input_port,
1576 				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
1577 				       txmsg->reply.u.link_addr.ports[i].port_number,
1578 				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1579 				       txmsg->reply.u.link_addr.ports[i].mcs,
1580 				       txmsg->reply.u.link_addr.ports[i].ddps,
1581 				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1582 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1583 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1584 			}
1585 
1586 			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1587 
1588 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1589 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1590 			}
1591 			(*mgr->cbs->hotplug)(mgr);
1592 		}
1593 	} else {
1594 		mstb->link_address_sent = false;
1595 		DRM_DEBUG_KMS("link address failed %d\n", ret);
1596 	}
1597 
1598 	kfree(txmsg);
1599 }
1600 
1601 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1602 					   struct drm_dp_mst_branch *mstb,
1603 					   struct drm_dp_mst_port *port)
1604 {
1605 	int len;
1606 	struct drm_dp_sideband_msg_tx *txmsg;
1607 	int ret;
1608 
1609 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1610 	if (!txmsg)
1611 		return -ENOMEM;
1612 
1613 	txmsg->dst = mstb;
1614 	len = build_enum_path_resources(txmsg, port->port_num);
1615 
1616 	drm_dp_queue_down_tx(mgr, txmsg);
1617 
1618 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1619 	if (ret > 0) {
1620 		if (txmsg->reply.reply_type == 1)
1621 			DRM_DEBUG_KMS("enum path resources nak received\n");
1622 		else {
1623 			if (port->port_num != txmsg->reply.u.path_resources.port_number)
1624 				DRM_ERROR("got incorrect port in response\n");
1625 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1626 			       txmsg->reply.u.path_resources.avail_payload_bw_number);
1627 			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1628 		}
1629 	}
1630 
1631 	kfree(txmsg);
1632 	return 0;
1633 }
1634 
1635 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1636 {
1637 	if (!mstb->port_parent)
1638 		return NULL;
1639 
1640 	if (mstb->port_parent->mstb != mstb)
1641 		return mstb->port_parent;
1642 
1643 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1644 }
1645 
1646 static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1647 									 struct drm_dp_mst_branch *mstb,
1648 									 int *port_num)
1649 {
1650 	struct drm_dp_mst_branch *rmstb = NULL;
1651 	struct drm_dp_mst_port *found_port;
1652 	mutex_lock(&mgr->lock);
1653 	if (mgr->mst_primary) {
1654 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1655 
1656 		if (found_port) {
1657 			rmstb = found_port->parent;
1658 			kref_get(&rmstb->kref);
1659 			*port_num = found_port->port_num;
1660 		}
1661 	}
1662 	mutex_unlock(&mgr->lock);
1663 	return rmstb;
1664 }
1665 
1666 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1667 				   struct drm_dp_mst_port *port,
1668 				   int id,
1669 				   int pbn)
1670 {
1671 	struct drm_dp_sideband_msg_tx *txmsg;
1672 	struct drm_dp_mst_branch *mstb;
1673 	int len, ret, port_num;
1674 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1675 	int i;
1676 
1677 	port = drm_dp_get_validated_port_ref(mgr, port);
1678 	if (!port)
1679 		return -EINVAL;
1680 
1681 	port_num = port->port_num;
1682 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1683 	if (!mstb) {
1684 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1685 
1686 		if (!mstb) {
1687 			drm_dp_put_port(port);
1688 			return -EINVAL;
1689 		}
1690 	}
1691 
1692 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1693 	if (!txmsg) {
1694 		ret = -ENOMEM;
1695 		goto fail_put;
1696 	}
1697 
1698 	for (i = 0; i < port->num_sdp_streams; i++)
1699 		sinks[i] = i;
1700 
1701 	txmsg->dst = mstb;
1702 	len = build_allocate_payload(txmsg, port_num,
1703 				     id,
1704 				     pbn, port->num_sdp_streams, sinks);
1705 
1706 	drm_dp_queue_down_tx(mgr, txmsg);
1707 
1708 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1709 	if (ret > 0) {
1710 		if (txmsg->reply.reply_type == 1) {
1711 			ret = -EINVAL;
1712 		} else
1713 			ret = 0;
1714 	}
1715 	kfree(txmsg);
1716 fail_put:
1717 	drm_dp_put_mst_branch_device(mstb);
1718 	drm_dp_put_port(port);
1719 	return ret;
1720 }
1721 
1722 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1723 				       int id,
1724 				       struct drm_dp_payload *payload)
1725 {
1726 	int ret;
1727 
1728 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1729 	if (ret < 0) {
1730 		payload->payload_state = 0;
1731 		return ret;
1732 	}
1733 	payload->payload_state = DP_PAYLOAD_LOCAL;
1734 	return 0;
1735 }
1736 
1737 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1738 				       struct drm_dp_mst_port *port,
1739 				       int id,
1740 				       struct drm_dp_payload *payload)
1741 {
1742 	int ret;
1743 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1744 	if (ret < 0)
1745 		return ret;
1746 	payload->payload_state = DP_PAYLOAD_REMOTE;
1747 	return ret;
1748 }
1749 
1750 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1751 					struct drm_dp_mst_port *port,
1752 					int id,
1753 					struct drm_dp_payload *payload)
1754 {
1755 	DRM_DEBUG_KMS("\n");
1756 	/* its okay for these to fail */
1757 	if (port) {
1758 		drm_dp_payload_send_msg(mgr, port, id, 0);
1759 	}
1760 
1761 	drm_dp_dpcd_write_payload(mgr, id, payload);
1762 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1763 	return 0;
1764 }
1765 
1766 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1767 					int id,
1768 					struct drm_dp_payload *payload)
1769 {
1770 	payload->payload_state = 0;
1771 	return 0;
1772 }
1773 
1774 /**
1775  * drm_dp_update_payload_part1() - Execute payload update part 1
1776  * @mgr: manager to use.
1777  *
1778  * This iterates over all proposed virtual channels, and tries to
1779  * allocate space in the link for them. For 0->slots transitions,
1780  * this step just writes the VCPI to the MST device. For slots->0
1781  * transitions, this writes the updated VCPIs and removes the
1782  * remote VC payloads.
1783  *
1784  * after calling this the driver should generate ACT and payload
1785  * packets.
1786  */
1787 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1788 {
1789 	int i, j;
1790 	int cur_slots = 1;
1791 	struct drm_dp_payload req_payload;
1792 	struct drm_dp_mst_port *port;
1793 
1794 	mutex_lock(&mgr->payload_lock);
1795 	for (i = 0; i < mgr->max_payloads; i++) {
1796 		/* solve the current payloads - compare to the hw ones
1797 		   - update the hw view */
1798 		req_payload.start_slot = cur_slots;
1799 		if (mgr->proposed_vcpis[i]) {
1800 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1801 			port = drm_dp_get_validated_port_ref(mgr, port);
1802 			if (!port) {
1803 				mutex_unlock(&mgr->payload_lock);
1804 				return -EINVAL;
1805 			}
1806 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1807 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1808 		} else {
1809 			port = NULL;
1810 			req_payload.num_slots = 0;
1811 		}
1812 
1813 		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1814 			mgr->payloads[i].start_slot = req_payload.start_slot;
1815 		}
1816 		/* work out what is required to happen with this payload */
1817 		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1818 
1819 			/* need to push an update for this payload */
1820 			if (req_payload.num_slots) {
1821 				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1822 				mgr->payloads[i].num_slots = req_payload.num_slots;
1823 				mgr->payloads[i].vcpi = req_payload.vcpi;
1824 			} else if (mgr->payloads[i].num_slots) {
1825 				mgr->payloads[i].num_slots = 0;
1826 				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
1827 				req_payload.payload_state = mgr->payloads[i].payload_state;
1828 				mgr->payloads[i].start_slot = 0;
1829 			}
1830 			mgr->payloads[i].payload_state = req_payload.payload_state;
1831 		}
1832 		cur_slots += req_payload.num_slots;
1833 
1834 		if (port)
1835 			drm_dp_put_port(port);
1836 	}
1837 
1838 	for (i = 0; i < mgr->max_payloads; i++) {
1839 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1840 			DRM_DEBUG_KMS("removing payload %d\n", i);
1841 			for (j = i; j < mgr->max_payloads - 1; j++) {
1842 				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1843 				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1844 				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1845 					set_bit(j + 1, &mgr->payload_mask);
1846 				} else {
1847 					clear_bit(j + 1, &mgr->payload_mask);
1848 				}
1849 			}
1850 			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1851 			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1852 			clear_bit(mgr->max_payloads, &mgr->payload_mask);
1853 
1854 		}
1855 	}
1856 	mutex_unlock(&mgr->payload_lock);
1857 
1858 	return 0;
1859 }
1860 EXPORT_SYMBOL(drm_dp_update_payload_part1);
1861 
1862 /**
1863  * drm_dp_update_payload_part2() - Execute payload update part 2
1864  * @mgr: manager to use.
1865  *
1866  * This iterates over all proposed virtual channels, and tries to
1867  * allocate space in the link for them. For 0->slots transitions,
1868  * this step writes the remote VC payload commands. For slots->0
1869  * this just resets some internal state.
1870  */
1871 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1872 {
1873 	struct drm_dp_mst_port *port;
1874 	int i;
1875 	int ret = 0;
1876 	mutex_lock(&mgr->payload_lock);
1877 	for (i = 0; i < mgr->max_payloads; i++) {
1878 
1879 		if (!mgr->proposed_vcpis[i])
1880 			continue;
1881 
1882 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1883 
1884 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1885 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1886 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1887 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1888 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1889 		}
1890 		if (ret) {
1891 			mutex_unlock(&mgr->payload_lock);
1892 			return ret;
1893 		}
1894 	}
1895 	mutex_unlock(&mgr->payload_lock);
1896 	return 0;
1897 }
1898 EXPORT_SYMBOL(drm_dp_update_payload_part2);
1899 
1900 #if 0 /* unused as of yet */
1901 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1902 				 struct drm_dp_mst_port *port,
1903 				 int offset, int size)
1904 {
1905 	int len;
1906 	struct drm_dp_sideband_msg_tx *txmsg;
1907 
1908 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1909 	if (!txmsg)
1910 		return -ENOMEM;
1911 
1912 	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1913 	txmsg->dst = port->parent;
1914 
1915 	drm_dp_queue_down_tx(mgr, txmsg);
1916 
1917 	return 0;
1918 }
1919 #endif
1920 
1921 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1922 				  struct drm_dp_mst_port *port,
1923 				  int offset, int size, u8 *bytes)
1924 {
1925 	int len;
1926 	int ret;
1927 	struct drm_dp_sideband_msg_tx *txmsg;
1928 	struct drm_dp_mst_branch *mstb;
1929 
1930 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1931 	if (!mstb)
1932 		return -EINVAL;
1933 
1934 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1935 	if (!txmsg) {
1936 		ret = -ENOMEM;
1937 		goto fail_put;
1938 	}
1939 
1940 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1941 	txmsg->dst = mstb;
1942 
1943 	drm_dp_queue_down_tx(mgr, txmsg);
1944 
1945 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1946 	if (ret > 0) {
1947 		if (txmsg->reply.reply_type == 1) {
1948 			ret = -EINVAL;
1949 		} else
1950 			ret = 0;
1951 	}
1952 	kfree(txmsg);
1953 fail_put:
1954 	drm_dp_put_mst_branch_device(mstb);
1955 	return ret;
1956 }
1957 
1958 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1959 {
1960 	struct drm_dp_sideband_msg_reply_body reply;
1961 
1962 	reply.reply_type = 0;
1963 	reply.req_type = req_type;
1964 	drm_dp_encode_sideband_reply(&reply, msg);
1965 	return 0;
1966 }
1967 
1968 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1969 				    struct drm_dp_mst_branch *mstb,
1970 				    int req_type, int seqno, bool broadcast)
1971 {
1972 	struct drm_dp_sideband_msg_tx *txmsg;
1973 
1974 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1975 	if (!txmsg)
1976 		return -ENOMEM;
1977 
1978 	txmsg->dst = mstb;
1979 	txmsg->seqno = seqno;
1980 	drm_dp_encode_up_ack_reply(txmsg, req_type);
1981 
1982 	mutex_lock(&mgr->qlock);
1983 
1984 	process_single_up_tx_qlock(mgr, txmsg);
1985 
1986 	mutex_unlock(&mgr->qlock);
1987 
1988 	kfree(txmsg);
1989 	return 0;
1990 }
1991 
1992 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1993 				     int dp_link_count,
1994 				     int *out)
1995 {
1996 	switch (dp_link_bw) {
1997 	default:
1998 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1999 			      dp_link_bw, dp_link_count);
2000 		return false;
2001 
2002 	case DP_LINK_BW_1_62:
2003 		*out = 3 * dp_link_count;
2004 		break;
2005 	case DP_LINK_BW_2_7:
2006 		*out = 5 * dp_link_count;
2007 		break;
2008 	case DP_LINK_BW_5_4:
2009 		*out = 10 * dp_link_count;
2010 		break;
2011 	}
2012 	return true;
2013 }
2014 
2015 /**
2016  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2017  * @mgr: manager to set state for
2018  * @mst_state: true to enable MST on this connector - false to disable.
2019  *
2020  * This is called by the driver when it detects an MST capable device plugged
2021  * into a DP MST capable port, or when a DP MST capable device is unplugged.
2022  */
2023 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2024 {
2025 	int ret = 0;
2026 	struct drm_dp_mst_branch *mstb = NULL;
2027 
2028 	mutex_lock(&mgr->lock);
2029 	if (mst_state == mgr->mst_state)
2030 		goto out_unlock;
2031 
2032 	mgr->mst_state = mst_state;
2033 	/* set the device into MST mode */
2034 	if (mst_state) {
2035 		WARN_ON(mgr->mst_primary);
2036 
2037 		/* get dpcd info */
2038 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2039 		if (ret != DP_RECEIVER_CAP_SIZE) {
2040 			DRM_DEBUG_KMS("failed to read DPCD\n");
2041 			goto out_unlock;
2042 		}
2043 
2044 		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2045 					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2046 					      &mgr->pbn_div)) {
2047 			ret = -EINVAL;
2048 			goto out_unlock;
2049 		}
2050 
2051 		/* add initial branch device at LCT 1 */
2052 		mstb = drm_dp_add_mst_branch_device(1, NULL);
2053 		if (mstb == NULL) {
2054 			ret = -ENOMEM;
2055 			goto out_unlock;
2056 		}
2057 		mstb->mgr = mgr;
2058 
2059 		/* give this the main reference */
2060 		mgr->mst_primary = mstb;
2061 		kref_get(&mgr->mst_primary->kref);
2062 
2063 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2064 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2065 		if (ret < 0) {
2066 			goto out_unlock;
2067 		}
2068 
2069 		{
2070 			struct drm_dp_payload reset_pay;
2071 			reset_pay.start_slot = 0;
2072 			reset_pay.num_slots = 0x3f;
2073 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2074 		}
2075 
2076 		queue_work(system_long_wq, &mgr->work);
2077 
2078 		ret = 0;
2079 	} else {
2080 		/* disable MST on the device */
2081 		mstb = mgr->mst_primary;
2082 		mgr->mst_primary = NULL;
2083 		/* this can fail if the device is gone */
2084 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2085 		ret = 0;
2086 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2087 		mgr->payload_mask = 0;
2088 		set_bit(0, &mgr->payload_mask);
2089 		mgr->vcpi_mask = 0;
2090 	}
2091 
2092 out_unlock:
2093 	mutex_unlock(&mgr->lock);
2094 	if (mstb)
2095 		drm_dp_put_mst_branch_device(mstb);
2096 	return ret;
2097 
2098 }
2099 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2100 
2101 /**
2102  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2103  * @mgr: manager to suspend
2104  *
2105  * This function tells the MST device that we can't handle UP messages
2106  * anymore. This should stop it from sending any since we are suspended.
2107  */
2108 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2109 {
2110 	mutex_lock(&mgr->lock);
2111 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2112 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
2113 	mutex_unlock(&mgr->lock);
2114 	flush_work(&mgr->work);
2115 	flush_work(&mgr->destroy_connector_work);
2116 }
2117 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2118 
2119 /**
2120  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2121  * @mgr: manager to resume
2122  *
2123  * This will fetch DPCD and see if the device is still there,
2124  * if it is, it will rewrite the MSTM control bits, and return.
2125  *
2126  * if the device fails this returns -1, and the driver should do
2127  * a full MST reprobe, in case we were undocked.
2128  */
2129 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2130 {
2131 	int ret = 0;
2132 
2133 	mutex_lock(&mgr->lock);
2134 
2135 	if (mgr->mst_primary) {
2136 		int sret;
2137 		u8 guid[16];
2138 
2139 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2140 		if (sret != DP_RECEIVER_CAP_SIZE) {
2141 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2142 			ret = -1;
2143 			goto out_unlock;
2144 		}
2145 
2146 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2147 					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2148 		if (ret < 0) {
2149 			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2150 			ret = -1;
2151 			goto out_unlock;
2152 		}
2153 
2154 		/* Some hubs forget their guids after they resume */
2155 		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2156 		if (sret != 16) {
2157 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2158 			ret = -1;
2159 			goto out_unlock;
2160 		}
2161 		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2162 
2163 		ret = 0;
2164 	} else
2165 		ret = -1;
2166 
2167 out_unlock:
2168 	mutex_unlock(&mgr->lock);
2169 	return ret;
2170 }
2171 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2172 
2173 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2174 {
2175 	int len;
2176 	u8 replyblock[32];
2177 	int replylen, origlen, curreply;
2178 	int ret;
2179 	struct drm_dp_sideband_msg_rx *msg;
2180 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2181 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2182 
2183 	len = min(mgr->max_dpcd_transaction_bytes, 16);
2184 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
2185 			       replyblock, len);
2186 	if (ret != len) {
2187 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2188 		return false;
2189 	}
2190 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2191 	if (!ret) {
2192 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2193 		return false;
2194 	}
2195 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2196 
2197 	origlen = replylen;
2198 	replylen -= len;
2199 	curreply = len;
2200 	while (replylen > 0) {
2201 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2202 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2203 				    replyblock, len);
2204 		if (ret != len) {
2205 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2206 				      len, ret);
2207 			return false;
2208 		}
2209 
2210 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2211 		if (!ret) {
2212 			DRM_DEBUG_KMS("failed to build sideband msg\n");
2213 			return false;
2214 		}
2215 
2216 		curreply += len;
2217 		replylen -= len;
2218 	}
2219 	return true;
2220 }
2221 
2222 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2223 {
2224 	int ret = 0;
2225 
2226 	if (!drm_dp_get_one_sb_msg(mgr, false)) {
2227 		memset(&mgr->down_rep_recv, 0,
2228 		       sizeof(struct drm_dp_sideband_msg_rx));
2229 		return 0;
2230 	}
2231 
2232 	if (mgr->down_rep_recv.have_eomt) {
2233 		struct drm_dp_sideband_msg_tx *txmsg;
2234 		struct drm_dp_mst_branch *mstb;
2235 		int slot = -1;
2236 		mstb = drm_dp_get_mst_branch_device(mgr,
2237 						    mgr->down_rep_recv.initial_hdr.lct,
2238 						    mgr->down_rep_recv.initial_hdr.rad);
2239 
2240 		if (!mstb) {
2241 			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2242 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2243 			return 0;
2244 		}
2245 
2246 		/* find the message */
2247 		slot = mgr->down_rep_recv.initial_hdr.seqno;
2248 		mutex_lock(&mgr->qlock);
2249 		txmsg = mstb->tx_slots[slot];
2250 		/* remove from slots */
2251 		mutex_unlock(&mgr->qlock);
2252 
2253 		if (!txmsg) {
2254 			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2255 			       mstb,
2256 			       mgr->down_rep_recv.initial_hdr.seqno,
2257 			       mgr->down_rep_recv.initial_hdr.lct,
2258 				      mgr->down_rep_recv.initial_hdr.rad[0],
2259 				      mgr->down_rep_recv.msg[0]);
2260 			drm_dp_put_mst_branch_device(mstb);
2261 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2262 			return 0;
2263 		}
2264 
2265 		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2266 		if (txmsg->reply.reply_type == 1) {
2267 			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2268 		}
2269 
2270 		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2271 		drm_dp_put_mst_branch_device(mstb);
2272 
2273 		mutex_lock(&mgr->qlock);
2274 		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2275 		mstb->tx_slots[slot] = NULL;
2276 		mutex_unlock(&mgr->qlock);
2277 
2278 		wake_up(&mgr->tx_waitq);
2279 	}
2280 	return ret;
2281 }
2282 
2283 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2284 {
2285 	int ret = 0;
2286 
2287 	if (!drm_dp_get_one_sb_msg(mgr, true)) {
2288 		memset(&mgr->up_req_recv, 0,
2289 		       sizeof(struct drm_dp_sideband_msg_rx));
2290 		return 0;
2291 	}
2292 
2293 	if (mgr->up_req_recv.have_eomt) {
2294 		struct drm_dp_sideband_msg_req_body msg;
2295 		struct drm_dp_mst_branch *mstb = NULL;
2296 		bool seqno;
2297 
2298 		if (!mgr->up_req_recv.initial_hdr.broadcast) {
2299 			mstb = drm_dp_get_mst_branch_device(mgr,
2300 							    mgr->up_req_recv.initial_hdr.lct,
2301 							    mgr->up_req_recv.initial_hdr.rad);
2302 			if (!mstb) {
2303 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2304 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2305 				return 0;
2306 			}
2307 		}
2308 
2309 		seqno = mgr->up_req_recv.initial_hdr.seqno;
2310 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2311 
2312 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2313 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2314 
2315 			if (!mstb)
2316 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2317 
2318 			if (!mstb) {
2319 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2320 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2321 				return 0;
2322 			}
2323 
2324 			drm_dp_update_port(mstb, &msg.u.conn_stat);
2325 
2326 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2327 			(*mgr->cbs->hotplug)(mgr);
2328 
2329 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2330 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2331 			if (!mstb)
2332 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2333 
2334 			if (!mstb) {
2335 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2336 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2337 				return 0;
2338 			}
2339 
2340 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2341 		}
2342 
2343 		if (mstb)
2344 			drm_dp_put_mst_branch_device(mstb);
2345 
2346 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2347 	}
2348 	return ret;
2349 }
2350 
2351 /**
2352  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2353  * @mgr: manager to notify irq for.
2354  * @esi: 4 bytes from SINK_COUNT_ESI
2355  * @handled: whether the hpd interrupt was consumed or not
2356  *
2357  * This should be called from the driver when it detects a short IRQ,
2358  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2359  * topology manager will process the sideband messages received as a result
2360  * of this.
2361  */
2362 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2363 {
2364 	int ret = 0;
2365 	int sc;
2366 	*handled = false;
2367 	sc = esi[0] & 0x3f;
2368 
2369 	if (sc != mgr->sink_count) {
2370 		mgr->sink_count = sc;
2371 		*handled = true;
2372 	}
2373 
2374 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2375 		ret = drm_dp_mst_handle_down_rep(mgr);
2376 		*handled = true;
2377 	}
2378 
2379 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
2380 		ret |= drm_dp_mst_handle_up_req(mgr);
2381 		*handled = true;
2382 	}
2383 
2384 	drm_dp_mst_kick_tx(mgr);
2385 	return ret;
2386 }
2387 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2388 
2389 /**
2390  * drm_dp_mst_detect_port() - get connection status for an MST port
2391  * @connector: DRM connector for this port
2392  * @mgr: manager for this port
2393  * @port: unverified pointer to a port
2394  *
2395  * This returns the current connection state for a port. It validates the
2396  * port pointer still exists so the caller doesn't require a reference
2397  */
2398 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2399 						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2400 {
2401 	enum drm_connector_status status = connector_status_disconnected;
2402 
2403 	/* we need to search for the port in the mgr in case its gone */
2404 	port = drm_dp_get_validated_port_ref(mgr, port);
2405 	if (!port)
2406 		return connector_status_disconnected;
2407 
2408 	if (!port->ddps)
2409 		goto out;
2410 
2411 	switch (port->pdt) {
2412 	case DP_PEER_DEVICE_NONE:
2413 	case DP_PEER_DEVICE_MST_BRANCHING:
2414 		break;
2415 
2416 	case DP_PEER_DEVICE_SST_SINK:
2417 		status = connector_status_connected;
2418 		/* for logical ports - cache the EDID */
2419 		if (port->port_num >= 8 && !port->cached_edid) {
2420 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2421 		}
2422 		break;
2423 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
2424 		if (port->ldps)
2425 			status = connector_status_connected;
2426 		break;
2427 	}
2428 out:
2429 	drm_dp_put_port(port);
2430 	return status;
2431 }
2432 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2433 
2434 /**
2435  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2436  * @mgr: manager for this port
2437  * @port: unverified pointer to a port.
2438  *
2439  * This returns whether the port supports audio or not.
2440  */
2441 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2442 					struct drm_dp_mst_port *port)
2443 {
2444 	bool ret = false;
2445 
2446 	port = drm_dp_get_validated_port_ref(mgr, port);
2447 	if (!port)
2448 		return ret;
2449 	ret = port->has_audio;
2450 	drm_dp_put_port(port);
2451 	return ret;
2452 }
2453 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
2454 
2455 /**
2456  * drm_dp_mst_get_edid() - get EDID for an MST port
2457  * @connector: toplevel connector to get EDID for
2458  * @mgr: manager for this port
2459  * @port: unverified pointer to a port.
2460  *
2461  * This returns an EDID for the port connected to a connector,
2462  * It validates the pointer still exists so the caller doesn't require a
2463  * reference.
2464  */
2465 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2466 {
2467 	struct edid *edid = NULL;
2468 
2469 	/* we need to search for the port in the mgr in case its gone */
2470 	port = drm_dp_get_validated_port_ref(mgr, port);
2471 	if (!port)
2472 		return NULL;
2473 
2474 	if (port->cached_edid)
2475 		edid = drm_edid_duplicate(port->cached_edid);
2476 	else {
2477 		edid = drm_get_edid(connector, &port->aux.ddc);
2478 		drm_mode_connector_set_tile_property(connector);
2479 	}
2480 	port->has_audio = drm_detect_monitor_audio(edid);
2481 	drm_dp_put_port(port);
2482 	return edid;
2483 }
2484 EXPORT_SYMBOL(drm_dp_mst_get_edid);
2485 
2486 /**
2487  * drm_dp_find_vcpi_slots() - find slots for this PBN value
2488  * @mgr: manager to use
2489  * @pbn: payload bandwidth to convert into slots.
2490  */
2491 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2492 			   int pbn)
2493 {
2494 	int num_slots;
2495 
2496 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2497 
2498 	/* max. time slots - one slot for MTP header */
2499 	if (num_slots > 63)
2500 		return -ENOSPC;
2501 	return num_slots;
2502 }
2503 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2504 
2505 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2506 			    struct drm_dp_vcpi *vcpi, int pbn, int slots)
2507 {
2508 	int ret;
2509 
2510 	/* max. time slots - one slot for MTP header */
2511 	if (slots > 63)
2512 		return -ENOSPC;
2513 
2514 	vcpi->pbn = pbn;
2515 	vcpi->aligned_pbn = slots * mgr->pbn_div;
2516 	vcpi->num_slots = slots;
2517 
2518 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2519 	if (ret < 0)
2520 		return ret;
2521 	return 0;
2522 }
2523 
2524 /**
2525  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2526  * @mgr: manager for this port
2527  * @port: port to allocate a virtual channel for.
2528  * @pbn: payload bandwidth number to request
2529  * @slots: returned number of slots for this PBN.
2530  */
2531 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2532 			      struct drm_dp_mst_port *port, int pbn, int slots)
2533 {
2534 	int ret;
2535 
2536 	port = drm_dp_get_validated_port_ref(mgr, port);
2537 	if (!port)
2538 		return false;
2539 
2540 	if (slots < 0)
2541 		return false;
2542 
2543 	if (port->vcpi.vcpi > 0) {
2544 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2545 		if (pbn == port->vcpi.pbn) {
2546 			drm_dp_put_port(port);
2547 			return true;
2548 		}
2549 	}
2550 
2551 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
2552 	if (ret) {
2553 		DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2554 				DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
2555 		goto out;
2556 	}
2557 	DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2558 			pbn, port->vcpi.num_slots);
2559 
2560 	drm_dp_put_port(port);
2561 	return true;
2562 out:
2563 	return false;
2564 }
2565 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2566 
2567 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2568 {
2569 	int slots = 0;
2570 	port = drm_dp_get_validated_port_ref(mgr, port);
2571 	if (!port)
2572 		return slots;
2573 
2574 	slots = port->vcpi.num_slots;
2575 	drm_dp_put_port(port);
2576 	return slots;
2577 }
2578 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2579 
2580 /**
2581  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2582  * @mgr: manager for this port
2583  * @port: unverified pointer to a port.
2584  *
2585  * This just resets the number of slots for the ports VCPI for later programming.
2586  */
2587 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2588 {
2589 	port = drm_dp_get_validated_port_ref(mgr, port);
2590 	if (!port)
2591 		return;
2592 	port->vcpi.num_slots = 0;
2593 	drm_dp_put_port(port);
2594 }
2595 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2596 
2597 /**
2598  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2599  * @mgr: manager for this port
2600  * @port: unverified port to deallocate vcpi for
2601  */
2602 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2603 {
2604 	port = drm_dp_get_validated_port_ref(mgr, port);
2605 	if (!port)
2606 		return;
2607 
2608 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2609 	port->vcpi.num_slots = 0;
2610 	port->vcpi.pbn = 0;
2611 	port->vcpi.aligned_pbn = 0;
2612 	port->vcpi.vcpi = 0;
2613 	drm_dp_put_port(port);
2614 }
2615 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2616 
2617 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2618 				     int id, struct drm_dp_payload *payload)
2619 {
2620 	u8 payload_alloc[3], status;
2621 	int ret;
2622 	int retries = 0;
2623 
2624 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2625 			   DP_PAYLOAD_TABLE_UPDATED);
2626 
2627 	payload_alloc[0] = id;
2628 	payload_alloc[1] = payload->start_slot;
2629 	payload_alloc[2] = payload->num_slots;
2630 
2631 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2632 	if (ret != 3) {
2633 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2634 		goto fail;
2635 	}
2636 
2637 retry:
2638 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2639 	if (ret < 0) {
2640 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2641 		goto fail;
2642 	}
2643 
2644 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2645 		retries++;
2646 		if (retries < 20) {
2647 			usleep_range(10000, 20000);
2648 			goto retry;
2649 		}
2650 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2651 		ret = -EINVAL;
2652 		goto fail;
2653 	}
2654 	ret = 0;
2655 fail:
2656 	return ret;
2657 }
2658 
2659 
2660 /**
2661  * drm_dp_check_act_status() - Check ACT handled status.
2662  * @mgr: manager to use
2663  *
2664  * Check the payload status bits in the DPCD for ACT handled completion.
2665  */
2666 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2667 {
2668 	u8 status;
2669 	int ret;
2670 	int count = 0;
2671 
2672 	do {
2673 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2674 
2675 		if (ret < 0) {
2676 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2677 			goto fail;
2678 		}
2679 
2680 		if (status & DP_PAYLOAD_ACT_HANDLED)
2681 			break;
2682 		count++;
2683 		udelay(100);
2684 
2685 	} while (count < 30);
2686 
2687 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2688 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2689 		ret = -EINVAL;
2690 		goto fail;
2691 	}
2692 	return 0;
2693 fail:
2694 	return ret;
2695 }
2696 EXPORT_SYMBOL(drm_dp_check_act_status);
2697 
2698 /**
2699  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2700  * @clock: dot clock for the mode
2701  * @bpp: bpp for the mode.
2702  *
2703  * This uses the formula in the spec to calculate the PBN value for a mode.
2704  */
2705 int drm_dp_calc_pbn_mode(int clock, int bpp)
2706 {
2707 	u64 kbps;
2708 	s64 peak_kbps;
2709 	u32 numerator;
2710 	u32 denominator;
2711 
2712 	kbps = clock * bpp;
2713 
2714 	/*
2715 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2716 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2717 	 * common multiplier to render an integer PBN for all link rate/lane
2718 	 * counts combinations
2719 	 * calculate
2720 	 * peak_kbps *= (1006/1000)
2721 	 * peak_kbps *= (64/54)
2722 	 * peak_kbps *= 8    convert to bytes
2723 	 */
2724 
2725 	numerator = 64 * 1006;
2726 	denominator = 54 * 8 * 1000 * 1000;
2727 
2728 	kbps *= numerator;
2729 	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2730 
2731 	return drm_fixp2int_ceil(peak_kbps);
2732 }
2733 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2734 
2735 static int test_calc_pbn_mode(void)
2736 {
2737 	int ret;
2738 	ret = drm_dp_calc_pbn_mode(154000, 30);
2739 	if (ret != 689) {
2740 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2741 				154000, 30, 689, ret);
2742 		return -EINVAL;
2743 	}
2744 	ret = drm_dp_calc_pbn_mode(234000, 30);
2745 	if (ret != 1047) {
2746 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2747 				234000, 30, 1047, ret);
2748 		return -EINVAL;
2749 	}
2750 	ret = drm_dp_calc_pbn_mode(297000, 24);
2751 	if (ret != 1063) {
2752 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2753 				297000, 24, 1063, ret);
2754 		return -EINVAL;
2755 	}
2756 	return 0;
2757 }
2758 
2759 /* we want to kick the TX after we've ack the up/down IRQs. */
2760 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2761 {
2762 	queue_work(system_long_wq, &mgr->tx_work);
2763 }
2764 
2765 static void drm_dp_mst_dump_mstb(struct seq_file *m,
2766 				 struct drm_dp_mst_branch *mstb)
2767 {
2768 	struct drm_dp_mst_port *port;
2769 	int tabs = mstb->lct;
2770 	char prefix[10];
2771 	int i;
2772 
2773 	for (i = 0; i < tabs; i++)
2774 		prefix[i] = '\t';
2775 	prefix[i] = '\0';
2776 
2777 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2778 	list_for_each_entry(port, &mstb->ports, next) {
2779 		seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
2780 		if (port->mstb)
2781 			drm_dp_mst_dump_mstb(m, port->mstb);
2782 	}
2783 }
2784 
2785 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2786 				  char *buf)
2787 {
2788 	int ret;
2789 	int i;
2790 	for (i = 0; i < 4; i++) {
2791 		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2792 		if (ret != 16)
2793 			break;
2794 	}
2795 	if (i == 4)
2796 		return true;
2797 	return false;
2798 }
2799 
2800 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
2801 			       struct drm_dp_mst_port *port, char *name,
2802 			       int namelen)
2803 {
2804 	struct edid *mst_edid;
2805 
2806 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
2807 	drm_edid_get_monitor_name(mst_edid, name, namelen);
2808 }
2809 
2810 /**
2811  * drm_dp_mst_dump_topology(): dump topology to seq file.
2812  * @m: seq_file to dump output to
2813  * @mgr: manager to dump current topology for.
2814  *
2815  * helper to dump MST topology to a seq file for debugfs.
2816  */
2817 void drm_dp_mst_dump_topology(struct seq_file *m,
2818 			      struct drm_dp_mst_topology_mgr *mgr)
2819 {
2820 	int i;
2821 	struct drm_dp_mst_port *port;
2822 
2823 	mutex_lock(&mgr->lock);
2824 	if (mgr->mst_primary)
2825 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2826 
2827 	/* dump VCPIs */
2828 	mutex_unlock(&mgr->lock);
2829 
2830 	mutex_lock(&mgr->payload_lock);
2831 	seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
2832 		mgr->max_payloads);
2833 
2834 	for (i = 0; i < mgr->max_payloads; i++) {
2835 		if (mgr->proposed_vcpis[i]) {
2836 			char name[14];
2837 
2838 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2839 			fetch_monitor_name(mgr, port, name, sizeof(name));
2840 			seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
2841 				   port->port_num, port->vcpi.vcpi,
2842 				   port->vcpi.num_slots,
2843 				   (*name != 0) ? name :  "Unknown");
2844 		} else
2845 			seq_printf(m, "vcpi %d:unused\n", i);
2846 	}
2847 	for (i = 0; i < mgr->max_payloads; i++) {
2848 		seq_printf(m, "payload %d: %d, %d, %d\n",
2849 			   i,
2850 			   mgr->payloads[i].payload_state,
2851 			   mgr->payloads[i].start_slot,
2852 			   mgr->payloads[i].num_slots);
2853 
2854 
2855 	}
2856 	mutex_unlock(&mgr->payload_lock);
2857 
2858 	mutex_lock(&mgr->lock);
2859 	if (mgr->mst_primary) {
2860 		u8 buf[64];
2861 		bool bret;
2862 		int ret;
2863 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2864 		seq_printf(m, "dpcd: ");
2865 		for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2866 			seq_printf(m, "%02x ", buf[i]);
2867 		seq_printf(m, "\n");
2868 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2869 		seq_printf(m, "faux/mst: ");
2870 		for (i = 0; i < 2; i++)
2871 			seq_printf(m, "%02x ", buf[i]);
2872 		seq_printf(m, "\n");
2873 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2874 		seq_printf(m, "mst ctrl: ");
2875 		for (i = 0; i < 1; i++)
2876 			seq_printf(m, "%02x ", buf[i]);
2877 		seq_printf(m, "\n");
2878 
2879 		/* dump the standard OUI branch header */
2880 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
2881 		seq_printf(m, "branch oui: ");
2882 		for (i = 0; i < 0x3; i++)
2883 			seq_printf(m, "%02x", buf[i]);
2884 		seq_printf(m, " devid: ");
2885 		for (i = 0x3; i < 0x8 && buf[i]; i++)
2886 			seq_printf(m, "%c", buf[i]);
2887 
2888 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
2889 		seq_printf(m, "\n");
2890 		bret = dump_dp_payload_table(mgr, buf);
2891 		if (bret == true) {
2892 			seq_printf(m, "payload table: ");
2893 			for (i = 0; i < 63; i++)
2894 				seq_printf(m, "%02x ", buf[i]);
2895 			seq_printf(m, "\n");
2896 		}
2897 
2898 	}
2899 
2900 	mutex_unlock(&mgr->lock);
2901 
2902 }
2903 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2904 
2905 static void drm_dp_tx_work(struct work_struct *work)
2906 {
2907 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2908 
2909 	mutex_lock(&mgr->qlock);
2910 	if (!list_empty(&mgr->tx_msg_downq))
2911 		process_single_down_tx_qlock(mgr);
2912 	mutex_unlock(&mgr->qlock);
2913 }
2914 
2915 static void drm_dp_free_mst_port(struct kref *kref)
2916 {
2917 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2918 	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2919 	kfree(port);
2920 }
2921 
2922 static void drm_dp_destroy_connector_work(struct work_struct *work)
2923 {
2924 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2925 	struct drm_dp_mst_port *port;
2926 	bool send_hotplug = false;
2927 	/*
2928 	 * Not a regular list traverse as we have to drop the destroy
2929 	 * connector lock before destroying the connector, to avoid AB->BA
2930 	 * ordering between this lock and the config mutex.
2931 	 */
2932 	for (;;) {
2933 		mutex_lock(&mgr->destroy_connector_lock);
2934 		port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2935 		if (!port) {
2936 			mutex_unlock(&mgr->destroy_connector_lock);
2937 			break;
2938 		}
2939 		list_del(&port->next);
2940 		mutex_unlock(&mgr->destroy_connector_lock);
2941 
2942 		kref_init(&port->kref);
2943 		INIT_LIST_HEAD(&port->next);
2944 
2945 		mgr->cbs->destroy_connector(mgr, port->connector);
2946 
2947 		drm_dp_port_teardown_pdt(port, port->pdt);
2948 		port->pdt = DP_PEER_DEVICE_NONE;
2949 
2950 		if (!port->input && port->vcpi.vcpi > 0) {
2951 			drm_dp_mst_reset_vcpi_slots(mgr, port);
2952 			drm_dp_update_payload_part1(mgr);
2953 			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2954 		}
2955 
2956 		kref_put(&port->kref, drm_dp_free_mst_port);
2957 		send_hotplug = true;
2958 	}
2959 	if (send_hotplug)
2960 		(*mgr->cbs->hotplug)(mgr);
2961 }
2962 
2963 /**
2964  * drm_dp_mst_topology_mgr_init - initialise a topology manager
2965  * @mgr: manager struct to initialise
2966  * @dev: device providing this structure - for i2c addition.
2967  * @aux: DP helper aux channel to talk to this device
2968  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2969  * @max_payloads: maximum number of payloads this GPU can source
2970  * @conn_base_id: the connector object ID the MST device is connected to.
2971  *
2972  * Return 0 for success, or negative error code on failure
2973  */
2974 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2975 				 struct drm_device *dev, struct drm_dp_aux *aux,
2976 				 int max_dpcd_transaction_bytes,
2977 				 int max_payloads, int conn_base_id)
2978 {
2979 	lockinit(&mgr->lock, "drmml", 0, LK_CANRECURSE);
2980 	lockinit(&mgr->qlock, "drmmql", 0, LK_CANRECURSE);
2981 	lockinit(&mgr->payload_lock, "drmmpl", 0, LK_CANRECURSE);
2982 	lockinit(&mgr->destroy_connector_lock, "drmmdcl", 0, LK_CANRECURSE);
2983 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
2984 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
2985 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2986 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2987 	INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
2988 	init_waitqueue_head(&mgr->tx_waitq);
2989 	mgr->dev = dev;
2990 	mgr->aux = aux;
2991 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2992 	mgr->max_payloads = max_payloads;
2993 	mgr->conn_base_id = conn_base_id;
2994 	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
2995 	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
2996 		return -EINVAL;
2997 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2998 	if (!mgr->payloads)
2999 		return -ENOMEM;
3000 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3001 	if (!mgr->proposed_vcpis)
3002 		return -ENOMEM;
3003 	set_bit(0, &mgr->payload_mask);
3004 	if (test_calc_pbn_mode() < 0)
3005 		DRM_ERROR("MST PBN self-test failed\n");
3006 
3007 	return 0;
3008 }
3009 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3010 
3011 /**
3012  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
3013  * @mgr: manager to destroy
3014  */
3015 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3016 {
3017 	flush_work(&mgr->work);
3018 	flush_work(&mgr->destroy_connector_work);
3019 	mutex_lock(&mgr->payload_lock);
3020 	kfree(mgr->payloads);
3021 	mgr->payloads = NULL;
3022 	kfree(mgr->proposed_vcpis);
3023 	mgr->proposed_vcpis = NULL;
3024 	mutex_unlock(&mgr->payload_lock);
3025 	mgr->dev = NULL;
3026 	mgr->aux = NULL;
3027 }
3028 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3029 
3030 /* I2C device */
3031 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3032 			       int num)
3033 {
3034 	struct drm_dp_aux *aux = adapter->algo_data;
3035 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3036 	struct drm_dp_mst_branch *mstb;
3037 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
3038 	unsigned int i;
3039 	bool reading = false;
3040 	struct drm_dp_sideband_msg_req_body msg;
3041 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
3042 	int ret;
3043 
3044 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3045 	if (!mstb)
3046 		return -EREMOTEIO;
3047 
3048 	/* construct i2c msg */
3049 	/* see if last msg is a read */
3050 	if (msgs[num - 1].flags & I2C_M_RD)
3051 		reading = true;
3052 
3053 	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
3054 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3055 		ret = -EIO;
3056 		goto out;
3057 	}
3058 
3059 	memset(&msg, 0, sizeof(msg));
3060 	msg.req_type = DP_REMOTE_I2C_READ;
3061 	msg.u.i2c_read.num_transactions = num - 1;
3062 	msg.u.i2c_read.port_number = port->port_num;
3063 	for (i = 0; i < num - 1; i++) {
3064 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3065 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3066 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3067 	}
3068 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3069 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3070 
3071 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3072 	if (!txmsg) {
3073 		ret = -ENOMEM;
3074 		goto out;
3075 	}
3076 
3077 	txmsg->dst = mstb;
3078 	drm_dp_encode_sideband_req(&msg, txmsg);
3079 
3080 	drm_dp_queue_down_tx(mgr, txmsg);
3081 
3082 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3083 	if (ret > 0) {
3084 
3085 		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
3086 			ret = -EREMOTEIO;
3087 			goto out;
3088 		}
3089 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3090 			ret = -EIO;
3091 			goto out;
3092 		}
3093 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3094 		ret = num;
3095 	}
3096 out:
3097 	kfree(txmsg);
3098 	drm_dp_put_mst_branch_device(mstb);
3099 	return ret;
3100 }
3101 
3102 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3103 {
3104 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3105 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3106 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3107 	       I2C_FUNC_10BIT_ADDR;
3108 }
3109 
3110 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3111 	.functionality = drm_dp_mst_i2c_functionality,
3112 	.master_xfer = drm_dp_mst_i2c_xfer,
3113 };
3114 
3115 /**
3116  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3117  * @aux: DisplayPort AUX channel
3118  *
3119  * Returns 0 on success or a negative error code on failure.
3120  */
3121 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3122 {
3123 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
3124 	aux->ddc.algo_data = aux;
3125 	aux->ddc.retries = 3;
3126 
3127 #if 0
3128 	aux->ddc.class = I2C_CLASS_DDC;
3129 	aux->ddc.owner = THIS_MODULE;
3130 #endif
3131 	aux->ddc.dev.parent = aux->dev;
3132 #if 0
3133 	aux->ddc.dev.of_node = aux->dev->of_node;
3134 #endif
3135 
3136 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
3137 		sizeof(aux->ddc.name));
3138 
3139 	return i2c_add_adapter(&aux->ddc);
3140 }
3141 
3142 /**
3143  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3144  * @aux: DisplayPort AUX channel
3145  */
3146 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
3147 {
3148 	i2c_del_adapter(&aux->ddc);
3149 }
3150