1 /*	$NetBSD: drm_dp_mst_topology.c,v 1.12 2022/08/22 18:30:50 riastradh Exp $	*/
2 
3 /*
4  * Copyright © 2014 Red Hat
5  *
6  * Permission to use, copy, modify, distribute, and sell this software and its
7  * documentation for any purpose is hereby granted without fee, provided that
8  * the above copyright notice appear in all copies and that both that copyright
9  * notice and this permission notice appear in supporting documentation, and
10  * that the name of the copyright holders not be used in advertising or
11  * publicity pertaining to distribution of the software without specific,
12  * written prior permission.  The copyright holders make no representations
13  * about the suitability of this software for any purpose.  It is provided "as
14  * is" without express or implied warranty.
15  *
16  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
18  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22  * OF THIS SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.12 2022/08/22 18:30:50 riastradh Exp $");
27 
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/i2c.h>
31 #include <linux/init.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/seq_file.h>
35 
36 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
37 #include <linux/stacktrace.h>
38 #include <linux/sort.h>
39 #include <linux/timekeeping.h>
40 #include <linux/math64.h>
41 #endif
42 
43 #include <drm/drm_atomic.h>
44 #include <drm/drm_atomic_helper.h>
45 #include <drm/drm_dp_mst_helper.h>
46 #include <drm/drm_drv.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
49 
50 #include "drm_crtc_helper_internal.h"
51 #include "drm_dp_mst_topology_internal.h"
52 
53 #include <linux/nbsd-namespace.h>
54 
55 /**
56  * DOC: dp mst helper
57  *
58  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
59  * protocol. The helpers contain a topology manager and bandwidth manager.
60  * The helpers encapsulate the sending and received of sideband msgs.
61  */
62 struct drm_dp_pending_up_req {
63 	struct drm_dp_sideband_msg_hdr hdr;
64 	struct drm_dp_sideband_msg_req_body msg;
65 	struct list_head next;
66 };
67 
68 #if IS_ENABLED(CONFIG_DEBUG_FS)
69 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
70 				  char *buf);
71 #endif
72 
73 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
74 
75 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
76 				     int id,
77 				     struct drm_dp_payload *payload);
78 
79 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
80 				 struct drm_dp_mst_port *port,
81 				 int offset, int size, u8 *bytes);
82 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
83 				  struct drm_dp_mst_port *port,
84 				  int offset, int size, u8 *bytes);
85 
86 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
87 				    struct drm_dp_mst_branch *mstb);
88 
89 static void
90 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
91 				   struct drm_dp_mst_branch *mstb);
92 
93 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
94 					   struct drm_dp_mst_branch *mstb,
95 					   struct drm_dp_mst_port *port);
96 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
97 				 u8 *guid);
98 
99 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
100 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
101 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
102 
103 #define DBG_PREFIX "[dp_mst]"
104 
105 #define DP_STR(x) [DP_ ## x] = #x
106 
drm_dp_mst_req_type_str(u8 req_type)107 static const char *drm_dp_mst_req_type_str(u8 req_type)
108 {
109 	static const char * const req_type_str[] = {
110 		DP_STR(GET_MSG_TRANSACTION_VERSION),
111 		DP_STR(LINK_ADDRESS),
112 		DP_STR(CONNECTION_STATUS_NOTIFY),
113 		DP_STR(ENUM_PATH_RESOURCES),
114 		DP_STR(ALLOCATE_PAYLOAD),
115 		DP_STR(QUERY_PAYLOAD),
116 		DP_STR(RESOURCE_STATUS_NOTIFY),
117 		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
118 		DP_STR(REMOTE_DPCD_READ),
119 		DP_STR(REMOTE_DPCD_WRITE),
120 		DP_STR(REMOTE_I2C_READ),
121 		DP_STR(REMOTE_I2C_WRITE),
122 		DP_STR(POWER_UP_PHY),
123 		DP_STR(POWER_DOWN_PHY),
124 		DP_STR(SINK_EVENT_NOTIFY),
125 		DP_STR(QUERY_STREAM_ENC_STATUS),
126 	};
127 
128 	if (req_type >= ARRAY_SIZE(req_type_str) ||
129 	    !req_type_str[req_type])
130 		return "unknown";
131 
132 	return req_type_str[req_type];
133 }
134 
135 #undef DP_STR
136 #define DP_STR(x) [DP_NAK_ ## x] = #x
137 
drm_dp_mst_nak_reason_str(u8 nak_reason)138 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
139 {
140 	static const char * const nak_reason_str[] = {
141 		DP_STR(WRITE_FAILURE),
142 		DP_STR(INVALID_READ),
143 		DP_STR(CRC_FAILURE),
144 		DP_STR(BAD_PARAM),
145 		DP_STR(DEFER),
146 		DP_STR(LINK_FAILURE),
147 		DP_STR(NO_RESOURCES),
148 		DP_STR(DPCD_FAIL),
149 		DP_STR(I2C_NAK),
150 		DP_STR(ALLOCATE_FAIL),
151 	};
152 
153 	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
154 	    !nak_reason_str[nak_reason])
155 		return "unknown";
156 
157 	return nak_reason_str[nak_reason];
158 }
159 
160 #undef DP_STR
161 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
162 
drm_dp_mst_sideband_tx_state_str(int state)163 static const char *drm_dp_mst_sideband_tx_state_str(int state)
164 {
165 	static const char * const sideband_reason_str[] = {
166 		DP_STR(QUEUED),
167 		DP_STR(START_SEND),
168 		DP_STR(SENT),
169 		DP_STR(RX),
170 		DP_STR(TIMEOUT),
171 	};
172 
173 	if (state >= ARRAY_SIZE(sideband_reason_str) ||
174 	    !sideband_reason_str[state])
175 		return "unknown";
176 
177 	return sideband_reason_str[state];
178 }
179 
180 static int
drm_dp_mst_rad_to_str(const u8 rad[8],u8 lct,char * out,size_t len)181 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
182 {
183 	int i;
184 	u8 unpacked_rad[16];
185 
186 	for (i = 0; i < lct; i++) {
187 		if (i % 2)
188 			unpacked_rad[i] = rad[i / 2] >> 4;
189 		else
190 			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
191 	}
192 
193 	/* TODO: Eventually add something to printk so we can format the rad
194 	 * like this: 1.2.3
195 	 */
196 	return snprintf(out, len, "%*phC", lct, unpacked_rad);
197 }
198 
199 /* sideband msg handling */
drm_dp_msg_header_crc4(const uint8_t * data,size_t num_nibbles)200 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
201 {
202 	u8 bitmask = 0x80;
203 	u8 bitshift = 7;
204 	u8 array_index = 0;
205 	int number_of_bits = num_nibbles * 4;
206 	u8 remainder = 0;
207 
208 	while (number_of_bits != 0) {
209 		number_of_bits--;
210 		remainder <<= 1;
211 		remainder |= (data[array_index] & bitmask) >> bitshift;
212 		bitmask >>= 1;
213 		bitshift--;
214 		if (bitmask == 0) {
215 			bitmask = 0x80;
216 			bitshift = 7;
217 			array_index++;
218 		}
219 		if ((remainder & 0x10) == 0x10)
220 			remainder ^= 0x13;
221 	}
222 
223 	number_of_bits = 4;
224 	while (number_of_bits != 0) {
225 		number_of_bits--;
226 		remainder <<= 1;
227 		if ((remainder & 0x10) != 0)
228 			remainder ^= 0x13;
229 	}
230 
231 	return remainder;
232 }
233 
drm_dp_msg_data_crc4(const uint8_t * data,u8 number_of_bytes)234 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
235 {
236 	u8 bitmask = 0x80;
237 	u8 bitshift = 7;
238 	u8 array_index = 0;
239 	int number_of_bits = number_of_bytes * 8;
240 	u16 remainder = 0;
241 
242 	while (number_of_bits != 0) {
243 		number_of_bits--;
244 		remainder <<= 1;
245 		remainder |= (data[array_index] & bitmask) >> bitshift;
246 		bitmask >>= 1;
247 		bitshift--;
248 		if (bitmask == 0) {
249 			bitmask = 0x80;
250 			bitshift = 7;
251 			array_index++;
252 		}
253 		if ((remainder & 0x100) == 0x100)
254 			remainder ^= 0xd5;
255 	}
256 
257 	number_of_bits = 8;
258 	while (number_of_bits != 0) {
259 		number_of_bits--;
260 		remainder <<= 1;
261 		if ((remainder & 0x100) != 0)
262 			remainder ^= 0xd5;
263 	}
264 
265 	return remainder & 0xff;
266 }
drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr * hdr)267 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
268 {
269 	u8 size = 3;
270 	size += (hdr->lct / 2);
271 	return size;
272 }
273 
drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int * len)274 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
275 					   u8 *buf, int *len)
276 {
277 	int idx = 0;
278 	int i;
279 	u8 crc4;
280 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
281 	for (i = 0; i < (hdr->lct / 2); i++)
282 		buf[idx++] = hdr->rad[i];
283 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
284 		(hdr->msg_len & 0x3f);
285 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
286 
287 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
288 	buf[idx - 1] |= (crc4 & 0xf);
289 
290 	*len = idx;
291 }
292 
drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr * hdr,u8 * buf,int buflen,u8 * hdrlen)293 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
294 					   u8 *buf, int buflen, u8 *hdrlen)
295 {
296 	u8 crc4;
297 	u8 len;
298 	int i;
299 	u8 idx;
300 	if (buf[0] == 0)
301 		return false;
302 	len = 3;
303 	len += ((buf[0] & 0xf0) >> 4) / 2;
304 	if (len > buflen)
305 		return false;
306 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
307 
308 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
309 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
310 		return false;
311 	}
312 
313 	hdr->lct = (buf[0] & 0xf0) >> 4;
314 	hdr->lcr = (buf[0] & 0xf);
315 	idx = 1;
316 	for (i = 0; i < (hdr->lct / 2); i++)
317 		hdr->rad[i] = buf[idx++];
318 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
319 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
320 	hdr->msg_len = buf[idx] & 0x3f;
321 	idx++;
322 	hdr->somt = (buf[idx] >> 7) & 0x1;
323 	hdr->eomt = (buf[idx] >> 6) & 0x1;
324 	hdr->seqno = (buf[idx] >> 4) & 0x1;
325 	idx++;
326 	*hdrlen = idx;
327 	return true;
328 }
329 
330 static void
drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body * req,struct drm_dp_sideband_msg_tx * raw)331 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
332 			   struct drm_dp_sideband_msg_tx *raw)
333 {
334 	int idx = 0;
335 	int i;
336 	u8 *buf = raw->msg;
337 	buf[idx++] = req->req_type & 0x7f;
338 
339 	switch (req->req_type) {
340 	case DP_ENUM_PATH_RESOURCES:
341 	case DP_POWER_DOWN_PHY:
342 	case DP_POWER_UP_PHY:
343 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
344 		idx++;
345 		break;
346 	case DP_ALLOCATE_PAYLOAD:
347 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
348 			(req->u.allocate_payload.number_sdp_streams & 0xf);
349 		idx++;
350 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
351 		idx++;
352 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
353 		idx++;
354 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
355 		idx++;
356 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
357 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
358 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
359 			idx++;
360 		}
361 		if (req->u.allocate_payload.number_sdp_streams & 1) {
362 			i = req->u.allocate_payload.number_sdp_streams - 1;
363 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
364 			idx++;
365 		}
366 		break;
367 	case DP_QUERY_PAYLOAD:
368 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
369 		idx++;
370 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
371 		idx++;
372 		break;
373 	case DP_REMOTE_DPCD_READ:
374 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
375 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
376 		idx++;
377 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
378 		idx++;
379 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
380 		idx++;
381 		buf[idx] = (req->u.dpcd_read.num_bytes);
382 		idx++;
383 		break;
384 
385 	case DP_REMOTE_DPCD_WRITE:
386 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
387 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
388 		idx++;
389 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
390 		idx++;
391 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
392 		idx++;
393 		buf[idx] = (req->u.dpcd_write.num_bytes);
394 		idx++;
395 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
396 		idx += req->u.dpcd_write.num_bytes;
397 		break;
398 	case DP_REMOTE_I2C_READ:
399 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
400 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
401 		idx++;
402 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
403 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
404 			idx++;
405 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
406 			idx++;
407 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
408 			idx += req->u.i2c_read.transactions[i].num_bytes;
409 
410 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
411 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
412 			idx++;
413 		}
414 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
415 		idx++;
416 		buf[idx] = (req->u.i2c_read.num_bytes_read);
417 		idx++;
418 		break;
419 
420 	case DP_REMOTE_I2C_WRITE:
421 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
422 		idx++;
423 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
424 		idx++;
425 		buf[idx] = (req->u.i2c_write.num_bytes);
426 		idx++;
427 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
428 		idx += req->u.i2c_write.num_bytes;
429 		break;
430 	}
431 	raw->cur_len = idx;
432 }
433 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
434 
435 /* Decode a sideband request we've encoded, mainly used for debugging */
436 static int
drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx * raw,struct drm_dp_sideband_msg_req_body * req)437 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
438 			   struct drm_dp_sideband_msg_req_body *req)
439 {
440 	const u8 *buf = raw->msg;
441 	int i, idx = 0;
442 
443 	req->req_type = buf[idx++] & 0x7f;
444 	switch (req->req_type) {
445 	case DP_ENUM_PATH_RESOURCES:
446 	case DP_POWER_DOWN_PHY:
447 	case DP_POWER_UP_PHY:
448 		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
449 		break;
450 	case DP_ALLOCATE_PAYLOAD:
451 		{
452 			struct drm_dp_allocate_payload *a =
453 				&req->u.allocate_payload;
454 
455 			a->number_sdp_streams = buf[idx] & 0xf;
456 			a->port_number = (buf[idx] >> 4) & 0xf;
457 
458 			WARN_ON(buf[++idx] & 0x80);
459 			a->vcpi = buf[idx] & 0x7f;
460 
461 			a->pbn = buf[++idx] << 8;
462 			a->pbn |= buf[++idx];
463 
464 			idx++;
465 			for (i = 0; i < a->number_sdp_streams; i++) {
466 				a->sdp_stream_sink[i] =
467 					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
468 			}
469 		}
470 		break;
471 	case DP_QUERY_PAYLOAD:
472 		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
473 		WARN_ON(buf[++idx] & 0x80);
474 		req->u.query_payload.vcpi = buf[idx] & 0x7f;
475 		break;
476 	case DP_REMOTE_DPCD_READ:
477 		{
478 			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
479 
480 			r->port_number = (buf[idx] >> 4) & 0xf;
481 
482 			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
483 			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
484 			r->dpcd_address |= buf[++idx] & 0xff;
485 
486 			r->num_bytes = buf[++idx];
487 		}
488 		break;
489 	case DP_REMOTE_DPCD_WRITE:
490 		{
491 			struct drm_dp_remote_dpcd_write *w =
492 				&req->u.dpcd_write;
493 
494 			w->port_number = (buf[idx] >> 4) & 0xf;
495 
496 			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
497 			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
498 			w->dpcd_address |= buf[++idx] & 0xff;
499 
500 			w->num_bytes = buf[++idx];
501 
502 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
503 					   GFP_KERNEL);
504 			if (!w->bytes)
505 				return -ENOMEM;
506 		}
507 		break;
508 	case DP_REMOTE_I2C_READ:
509 		{
510 			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
511 			struct drm_dp_remote_i2c_read_tx *tx;
512 			bool failed = false;
513 
514 			r->num_transactions = buf[idx] & 0x3;
515 			r->port_number = (buf[idx] >> 4) & 0xf;
516 			for (i = 0; i < r->num_transactions; i++) {
517 				tx = &r->transactions[i];
518 
519 				tx->i2c_dev_id = buf[++idx] & 0x7f;
520 				tx->num_bytes = buf[++idx];
521 				tx->bytes = kmemdup(&buf[++idx],
522 						    tx->num_bytes,
523 						    GFP_KERNEL);
524 				if (!tx->bytes) {
525 					failed = true;
526 					break;
527 				}
528 				idx += tx->num_bytes;
529 				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
530 				tx->i2c_transaction_delay = buf[idx] & 0xf;
531 			}
532 
533 			if (failed) {
534 				for (i = 0; i < r->num_transactions; i++) {
535 					tx = &r->transactions[i];
536 					kfree(tx->bytes);
537 				}
538 				return -ENOMEM;
539 			}
540 
541 			r->read_i2c_device_id = buf[++idx] & 0x7f;
542 			r->num_bytes_read = buf[++idx];
543 		}
544 		break;
545 	case DP_REMOTE_I2C_WRITE:
546 		{
547 			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
548 
549 			w->port_number = (buf[idx] >> 4) & 0xf;
550 			w->write_i2c_device_id = buf[++idx] & 0x7f;
551 			w->num_bytes = buf[++idx];
552 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
553 					   GFP_KERNEL);
554 			if (!w->bytes)
555 				return -ENOMEM;
556 		}
557 		break;
558 	}
559 
560 	return 0;
561 }
562 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
563 
564 static void
drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body * req,int indent,struct drm_printer * printer)565 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
566 				  int indent, struct drm_printer *printer)
567 {
568 	int i;
569 
570 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
571 	if (req->req_type == DP_LINK_ADDRESS) {
572 		/* No contents to print */
573 		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
574 		return;
575 	}
576 
577 	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
578 	indent++;
579 
580 	switch (req->req_type) {
581 	case DP_ENUM_PATH_RESOURCES:
582 	case DP_POWER_DOWN_PHY:
583 	case DP_POWER_UP_PHY:
584 		P("port=%d\n", req->u.port_num.port_number);
585 		break;
586 	case DP_ALLOCATE_PAYLOAD:
587 		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
588 		  req->u.allocate_payload.port_number,
589 		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
590 		  req->u.allocate_payload.number_sdp_streams,
591 		  req->u.allocate_payload.number_sdp_streams,
592 		  req->u.allocate_payload.sdp_stream_sink);
593 		break;
594 	case DP_QUERY_PAYLOAD:
595 		P("port=%d vcpi=%d\n",
596 		  req->u.query_payload.port_number,
597 		  req->u.query_payload.vcpi);
598 		break;
599 	case DP_REMOTE_DPCD_READ:
600 		P("port=%d dpcd_addr=%05x len=%d\n",
601 		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
602 		  req->u.dpcd_read.num_bytes);
603 		break;
604 	case DP_REMOTE_DPCD_WRITE:
605 		P("port=%d addr=%05x len=%d: %*ph\n",
606 		  req->u.dpcd_write.port_number,
607 		  req->u.dpcd_write.dpcd_address,
608 		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
609 		  req->u.dpcd_write.bytes);
610 		break;
611 	case DP_REMOTE_I2C_READ:
612 		P("port=%d num_tx=%d id=%d size=%d:\n",
613 		  req->u.i2c_read.port_number,
614 		  req->u.i2c_read.num_transactions,
615 		  req->u.i2c_read.read_i2c_device_id,
616 		  req->u.i2c_read.num_bytes_read);
617 
618 		indent++;
619 		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
620 			const struct drm_dp_remote_i2c_read_tx *rtx =
621 				&req->u.i2c_read.transactions[i];
622 
623 			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
624 			  i, rtx->i2c_dev_id, rtx->num_bytes,
625 			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
626 			  rtx->num_bytes, rtx->bytes);
627 		}
628 		break;
629 	case DP_REMOTE_I2C_WRITE:
630 		P("port=%d id=%d size=%d: %*ph\n",
631 		  req->u.i2c_write.port_number,
632 		  req->u.i2c_write.write_i2c_device_id,
633 		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
634 		  req->u.i2c_write.bytes);
635 		break;
636 	default:
637 		P("???\n");
638 		break;
639 	}
640 #undef P
641 }
642 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
643 
644 static inline void
drm_dp_mst_dump_sideband_msg_tx(struct drm_printer * p,const struct drm_dp_sideband_msg_tx * txmsg)645 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
646 				const struct drm_dp_sideband_msg_tx *txmsg)
647 {
648 	struct drm_dp_sideband_msg_req_body req;
649 	char buf[64];
650 	int ret;
651 	int i;
652 
653 	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
654 			      sizeof(buf));
655 	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
656 		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
657 		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
658 		   txmsg->path_msg, buf);
659 
660 	ret = drm_dp_decode_sideband_req(txmsg, &req);
661 	if (ret) {
662 		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
663 		return;
664 	}
665 	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
666 
667 	switch (req.req_type) {
668 	case DP_REMOTE_DPCD_WRITE:
669 		kfree(req.u.dpcd_write.bytes);
670 		break;
671 	case DP_REMOTE_I2C_READ:
672 		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
673 			kfree(req.u.i2c_read.transactions[i].bytes);
674 		break;
675 	case DP_REMOTE_I2C_WRITE:
676 		kfree(req.u.i2c_write.bytes);
677 		break;
678 	}
679 }
680 
drm_dp_crc_sideband_chunk_req(u8 * msg,u8 len)681 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
682 {
683 	u8 crc4;
684 	crc4 = drm_dp_msg_data_crc4(msg, len);
685 	msg[len] = crc4;
686 }
687 
drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body * rep,struct drm_dp_sideband_msg_tx * raw)688 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
689 					 struct drm_dp_sideband_msg_tx *raw)
690 {
691 	int idx = 0;
692 	u8 *buf = raw->msg;
693 
694 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
695 
696 	raw->cur_len = idx;
697 }
698 
699 /* this adds a chunk of msg to the builder to get the final msg */
drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx * msg,u8 * replybuf,u8 replybuflen,bool hdr)700 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
701 				      u8 *replybuf, u8 replybuflen, bool hdr)
702 {
703 	int ret;
704 	u8 crc4 __unused;	/* XXX Mistake?  */
705 
706 	if (hdr) {
707 		u8 hdrlen;
708 		struct drm_dp_sideband_msg_hdr recv_hdr;
709 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
710 		if (ret == false) {
711 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
712 			return false;
713 		}
714 
715 		/*
716 		 * ignore out-of-order messages or messages that are part of a
717 		 * failed transaction
718 		 */
719 		if (!recv_hdr.somt && !msg->have_somt)
720 			return false;
721 
722 		/* get length contained in this portion */
723 		msg->curchunk_len = recv_hdr.msg_len;
724 		msg->curchunk_hdrlen = hdrlen;
725 
726 		/* we have already gotten an somt - don't bother parsing */
727 		if (recv_hdr.somt && msg->have_somt)
728 			return false;
729 
730 		if (recv_hdr.somt) {
731 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
732 			msg->have_somt = true;
733 		}
734 		if (recv_hdr.eomt)
735 			msg->have_eomt = true;
736 
737 		/* copy the bytes for the remainder of this header chunk */
738 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
739 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
740 	} else {
741 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
742 		msg->curchunk_idx += replybuflen;
743 	}
744 
745 	if (msg->curchunk_idx >= msg->curchunk_len) {
746 		/* do CRC */
747 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
748 		/* copy chunk into bigger msg */
749 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
750 		msg->curlen += msg->curchunk_len - 1;
751 	}
752 	return true;
753 }
754 
drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)755 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
756 					       struct drm_dp_sideband_msg_reply_body *repmsg)
757 {
758 	int idx = 1;
759 	int i;
760 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
761 	idx += 16;
762 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
763 	idx++;
764 	if (idx > raw->curlen)
765 		goto fail_len;
766 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
767 		if (raw->msg[idx] & 0x80)
768 			repmsg->u.link_addr.ports[i].input_port = 1;
769 
770 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
771 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
772 
773 		idx++;
774 		if (idx > raw->curlen)
775 			goto fail_len;
776 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
777 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
778 		if (repmsg->u.link_addr.ports[i].input_port == 0)
779 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
780 		idx++;
781 		if (idx > raw->curlen)
782 			goto fail_len;
783 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
784 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
785 			idx++;
786 			if (idx > raw->curlen)
787 				goto fail_len;
788 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
789 			idx += 16;
790 			if (idx > raw->curlen)
791 				goto fail_len;
792 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
793 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
794 			idx++;
795 
796 		}
797 		if (idx > raw->curlen)
798 			goto fail_len;
799 	}
800 
801 	return true;
802 fail_len:
803 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
804 	return false;
805 }
806 
drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)807 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
808 						   struct drm_dp_sideband_msg_reply_body *repmsg)
809 {
810 	int idx = 1;
811 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
812 	idx++;
813 	if (idx > raw->curlen)
814 		goto fail_len;
815 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
816 	idx++;
817 	if (idx > raw->curlen)
818 		goto fail_len;
819 
820 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
821 	return true;
822 fail_len:
823 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
824 	return false;
825 }
826 
drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)827 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
828 						      struct drm_dp_sideband_msg_reply_body *repmsg)
829 {
830 	int idx = 1;
831 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
832 	idx++;
833 	if (idx > raw->curlen)
834 		goto fail_len;
835 	return true;
836 fail_len:
837 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
838 	return false;
839 }
840 
drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)841 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
842 						      struct drm_dp_sideband_msg_reply_body *repmsg)
843 {
844 	int idx = 1;
845 
846 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
847 	idx++;
848 	if (idx > raw->curlen)
849 		goto fail_len;
850 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
851 	idx++;
852 	/* TODO check */
853 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
854 	return true;
855 fail_len:
856 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
857 	return false;
858 }
859 
drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)860 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
861 							  struct drm_dp_sideband_msg_reply_body *repmsg)
862 {
863 	int idx = 1;
864 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
865 	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
866 	idx++;
867 	if (idx > raw->curlen)
868 		goto fail_len;
869 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
870 	idx += 2;
871 	if (idx > raw->curlen)
872 		goto fail_len;
873 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
874 	idx += 2;
875 	if (idx > raw->curlen)
876 		goto fail_len;
877 	return true;
878 fail_len:
879 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
880 	return false;
881 }
882 
drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)883 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
884 							  struct drm_dp_sideband_msg_reply_body *repmsg)
885 {
886 	int idx = 1;
887 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
888 	idx++;
889 	if (idx > raw->curlen)
890 		goto fail_len;
891 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
892 	idx++;
893 	if (idx > raw->curlen)
894 		goto fail_len;
895 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
896 	idx += 2;
897 	if (idx > raw->curlen)
898 		goto fail_len;
899 	return true;
900 fail_len:
901 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
902 	return false;
903 }
904 
drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)905 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
906 						    struct drm_dp_sideband_msg_reply_body *repmsg)
907 {
908 	int idx = 1;
909 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
910 	idx++;
911 	if (idx > raw->curlen)
912 		goto fail_len;
913 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
914 	idx += 2;
915 	if (idx > raw->curlen)
916 		goto fail_len;
917 	return true;
918 fail_len:
919 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
920 	return false;
921 }
922 
drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * repmsg)923 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
924 						       struct drm_dp_sideband_msg_reply_body *repmsg)
925 {
926 	int idx = 1;
927 
928 	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
929 	idx++;
930 	if (idx > raw->curlen) {
931 		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
932 			      idx, raw->curlen);
933 		return false;
934 	}
935 	return true;
936 }
937 
drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_reply_body * msg)938 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
939 					struct drm_dp_sideband_msg_reply_body *msg)
940 {
941 	memset(msg, 0, sizeof(*msg));
942 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
943 	msg->req_type = (raw->msg[0] & 0x7f);
944 
945 	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
946 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
947 		msg->u.nak.reason = raw->msg[17];
948 		msg->u.nak.nak_data = raw->msg[18];
949 		return false;
950 	}
951 
952 	switch (msg->req_type) {
953 	case DP_LINK_ADDRESS:
954 		return drm_dp_sideband_parse_link_address(raw, msg);
955 	case DP_QUERY_PAYLOAD:
956 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
957 	case DP_REMOTE_DPCD_READ:
958 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
959 	case DP_REMOTE_DPCD_WRITE:
960 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
961 	case DP_REMOTE_I2C_READ:
962 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
963 	case DP_ENUM_PATH_RESOURCES:
964 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
965 	case DP_ALLOCATE_PAYLOAD:
966 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
967 	case DP_POWER_DOWN_PHY:
968 	case DP_POWER_UP_PHY:
969 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
970 	case DP_CLEAR_PAYLOAD_ID_TABLE:
971 		return true; /* since there's nothing to parse */
972 	default:
973 		DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
974 			  drm_dp_mst_req_type_str(msg->req_type));
975 		return false;
976 	}
977 }
978 
drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)979 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
980 							   struct drm_dp_sideband_msg_req_body *msg)
981 {
982 	int idx = 1;
983 
984 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
985 	idx++;
986 	if (idx > raw->curlen)
987 		goto fail_len;
988 
989 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
990 	idx += 16;
991 	if (idx > raw->curlen)
992 		goto fail_len;
993 
994 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
995 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
996 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
997 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
998 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
999 	idx++;
1000 	return true;
1001 fail_len:
1002 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
1003 	return false;
1004 }
1005 
drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)1006 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
1007 							   struct drm_dp_sideband_msg_req_body *msg)
1008 {
1009 	int idx = 1;
1010 
1011 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
1012 	idx++;
1013 	if (idx > raw->curlen)
1014 		goto fail_len;
1015 
1016 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
1017 	idx += 16;
1018 	if (idx > raw->curlen)
1019 		goto fail_len;
1020 
1021 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
1022 	idx++;
1023 	return true;
1024 fail_len:
1025 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
1026 	return false;
1027 }
1028 
drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx * raw,struct drm_dp_sideband_msg_req_body * msg)1029 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
1030 				      struct drm_dp_sideband_msg_req_body *msg)
1031 {
1032 	memset(msg, 0, sizeof(*msg));
1033 	msg->req_type = (raw->msg[0] & 0x7f);
1034 
1035 	switch (msg->req_type) {
1036 	case DP_CONNECTION_STATUS_NOTIFY:
1037 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1038 	case DP_RESOURCE_STATUS_NOTIFY:
1039 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1040 	default:
1041 		DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1042 			  drm_dp_mst_req_type_str(msg->req_type));
1043 		return false;
1044 	}
1045 }
1046 
build_dpcd_write(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes,u8 * bytes)1047 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1048 {
1049 	struct drm_dp_sideband_msg_req_body req;
1050 
1051 	req.req_type = DP_REMOTE_DPCD_WRITE;
1052 	req.u.dpcd_write.port_number = port_num;
1053 	req.u.dpcd_write.dpcd_address = offset;
1054 	req.u.dpcd_write.num_bytes = num_bytes;
1055 	req.u.dpcd_write.bytes = bytes;
1056 	drm_dp_encode_sideband_req(&req, msg);
1057 
1058 	return 0;
1059 }
1060 
build_link_address(struct drm_dp_sideband_msg_tx * msg)1061 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1062 {
1063 	struct drm_dp_sideband_msg_req_body req;
1064 
1065 	req.req_type = DP_LINK_ADDRESS;
1066 	drm_dp_encode_sideband_req(&req, msg);
1067 	return 0;
1068 }
1069 
build_clear_payload_id_table(struct drm_dp_sideband_msg_tx * msg)1070 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
1071 {
1072 	struct drm_dp_sideband_msg_req_body req;
1073 
1074 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
1075 	drm_dp_encode_sideband_req(&req, msg);
1076 	return 0;
1077 }
1078 
build_enum_path_resources(struct drm_dp_sideband_msg_tx * msg,int port_num)1079 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1080 {
1081 	struct drm_dp_sideband_msg_req_body req;
1082 
1083 	req.req_type = DP_ENUM_PATH_RESOURCES;
1084 	req.u.port_num.port_number = port_num;
1085 	drm_dp_encode_sideband_req(&req, msg);
1086 	msg->path_msg = true;
1087 	return 0;
1088 }
1089 
build_allocate_payload(struct drm_dp_sideband_msg_tx * msg,int port_num,u8 vcpi,uint16_t pbn,u8 number_sdp_streams,u8 * sdp_stream_sink)1090 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1091 				  u8 vcpi, uint16_t pbn,
1092 				  u8 number_sdp_streams,
1093 				  u8 *sdp_stream_sink)
1094 {
1095 	struct drm_dp_sideband_msg_req_body req;
1096 	memset(&req, 0, sizeof(req));
1097 	req.req_type = DP_ALLOCATE_PAYLOAD;
1098 	req.u.allocate_payload.port_number = port_num;
1099 	req.u.allocate_payload.vcpi = vcpi;
1100 	req.u.allocate_payload.pbn = pbn;
1101 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1102 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1103 		   number_sdp_streams);
1104 	drm_dp_encode_sideband_req(&req, msg);
1105 	msg->path_msg = true;
1106 	return 0;
1107 }
1108 
build_power_updown_phy(struct drm_dp_sideband_msg_tx * msg,int port_num,bool power_up)1109 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1110 				  int port_num, bool power_up)
1111 {
1112 	struct drm_dp_sideband_msg_req_body req;
1113 
1114 	if (power_up)
1115 		req.req_type = DP_POWER_UP_PHY;
1116 	else
1117 		req.req_type = DP_POWER_DOWN_PHY;
1118 
1119 	req.u.port_num.port_number = port_num;
1120 	drm_dp_encode_sideband_req(&req, msg);
1121 	msg->path_msg = true;
1122 	return 0;
1123 }
1124 
drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_vcpi * vcpi)1125 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1126 					struct drm_dp_vcpi *vcpi)
1127 {
1128 	int ret, vcpi_ret;
1129 
1130 	mutex_lock(&mgr->payload_lock);
1131 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1132 	if (ret > mgr->max_payloads) {
1133 		ret = -EINVAL;
1134 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1135 		goto out_unlock;
1136 	}
1137 
1138 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1139 	if (vcpi_ret > mgr->max_payloads) {
1140 		ret = -EINVAL;
1141 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1142 		goto out_unlock;
1143 	}
1144 
1145 	set_bit(ret, &mgr->payload_mask);
1146 	set_bit(vcpi_ret, &mgr->vcpi_mask);
1147 	vcpi->vcpi = vcpi_ret + 1;
1148 	mgr->proposed_vcpis[ret - 1] = vcpi;
1149 out_unlock:
1150 	mutex_unlock(&mgr->payload_lock);
1151 	return ret;
1152 }
1153 
drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr * mgr,int vcpi)1154 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1155 				      int vcpi)
1156 {
1157 	int i;
1158 	if (vcpi == 0)
1159 		return;
1160 
1161 	mutex_lock(&mgr->payload_lock);
1162 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1163 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
1164 
1165 	for (i = 0; i < mgr->max_payloads; i++) {
1166 		if (mgr->proposed_vcpis[i] &&
1167 		    mgr->proposed_vcpis[i]->vcpi == vcpi) {
1168 			mgr->proposed_vcpis[i] = NULL;
1169 			clear_bit(i + 1, &mgr->payload_mask);
1170 		}
1171 	}
1172 	mutex_unlock(&mgr->payload_lock);
1173 }
1174 
check_txmsg_state(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)1175 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1176 			      struct drm_dp_sideband_msg_tx *txmsg)
1177 {
1178 	unsigned int state;
1179 
1180 	/*
1181 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
1182 	 * cases we check here are terminal states. For those the barriers
1183 	 * provided by the wake_up/wait_event pair are enough.
1184 	 */
1185 	state = READ_ONCE(txmsg->state);
1186 	return (state == DRM_DP_SIDEBAND_TX_RX ||
1187 		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1188 }
1189 
drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch * mstb,struct drm_dp_sideband_msg_tx * txmsg)1190 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1191 				    struct drm_dp_sideband_msg_tx *txmsg)
1192 {
1193 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1194 	int ret;
1195 
1196 	mutex_lock(&mstb->mgr->qlock);
1197 	DRM_TIMED_WAIT_UNTIL(ret, &mgr->tx_waitq, &mstb->mgr->qlock, 4*HZ,
1198 	    check_txmsg_state(mgr, txmsg));
1199 	if (ret > 0) {
1200 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1201 			ret = -EIO;
1202 			goto out;
1203 		}
1204 	} else {
1205 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1206 
1207 		/* dump some state */
1208 		ret = -EIO;
1209 
1210 		/* remove from q */
1211 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1212 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1213 			list_del(&txmsg->next);
1214 		}
1215 
1216 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1217 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1218 			mstb->tx_slots[txmsg->seqno] = NULL;
1219 		}
1220 		mgr->is_waiting_for_dwn_reply = false;
1221 
1222 	}
1223 out:
1224 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1225 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1226 
1227 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1228 	}
1229 	mutex_unlock(&mgr->qlock);
1230 
1231 	drm_dp_mst_kick_tx(mgr);
1232 	return ret;
1233 }
1234 
drm_dp_add_mst_branch_device(u8 lct,u8 * rad)1235 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1236 {
1237 	struct drm_dp_mst_branch *mstb;
1238 
1239 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1240 	if (!mstb)
1241 		return NULL;
1242 
1243 	mstb->lct = lct;
1244 	if (lct > 1)
1245 		memcpy(mstb->rad, rad, lct / 2);
1246 	INIT_LIST_HEAD(&mstb->ports);
1247 	kref_init(&mstb->topology_kref);
1248 	kref_init(&mstb->malloc_kref);
1249 	return mstb;
1250 }
1251 
drm_dp_free_mst_branch_device(struct kref * kref)1252 static void drm_dp_free_mst_branch_device(struct kref *kref)
1253 {
1254 	struct drm_dp_mst_branch *mstb =
1255 		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1256 
1257 	if (mstb->port_parent)
1258 		drm_dp_mst_put_port_malloc(mstb->port_parent);
1259 
1260 	kfree(mstb);
1261 }
1262 
1263 /**
1264  * DOC: Branch device and port refcounting
1265  *
1266  * Topology refcount overview
1267  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1268  *
1269  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1270  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1271  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1272  *
1273  * Topology refcounts are not exposed to drivers, and are handled internally
1274  * by the DP MST helpers. The helpers use them in order to prevent the
1275  * in-memory topology state from being changed in the middle of critical
1276  * operations like changing the internal state of payload allocations. This
1277  * means each branch and port will be considered to be connected to the rest
1278  * of the topology until its topology refcount reaches zero. Additionally,
1279  * for ports this means that their associated &struct drm_connector will stay
1280  * registered with userspace until the port's refcount reaches 0.
1281  *
1282  * Malloc refcount overview
1283  * ~~~~~~~~~~~~~~~~~~~~~~~~
1284  *
1285  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1286  * drm_dp_mst_branch allocated even after all of its topology references have
1287  * been dropped, so that the driver or MST helpers can safely access each
1288  * branch's last known state before it was disconnected from the topology.
1289  * When the malloc refcount of a port or branch reaches 0, the memory
1290  * allocation containing the &struct drm_dp_mst_branch or &struct
1291  * drm_dp_mst_port respectively will be freed.
1292  *
1293  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1294  * to drivers. As of writing this documentation, there are no drivers that
1295  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1296  * helpers. Exposing this API to drivers in a race-free manner would take more
1297  * tweaking of the refcounting scheme, however patches are welcome provided
1298  * there is a legitimate driver usecase for this.
1299  *
1300  * Refcount relationships in a topology
1301  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1302  *
1303  * Let's take a look at why the relationship between topology and malloc
1304  * refcounts is designed the way it is.
1305  *
1306  * .. kernel-figure:: dp-mst/topology-figure-1.dot
1307  *
1308  *    An example of topology and malloc refs in a DP MST topology with two
1309  *    active payloads. Topology refcount increments are indicated by solid
1310  *    lines, and malloc refcount increments are indicated by dashed lines.
1311  *    Each starts from the branch which incremented the refcount, and ends at
1312  *    the branch to which the refcount belongs to, i.e. the arrow points the
1313  *    same way as the C pointers used to reference a structure.
1314  *
1315  * As you can see in the above figure, every branch increments the topology
1316  * refcount of its children, and increments the malloc refcount of its
1317  * parent. Additionally, every payload increments the malloc refcount of its
1318  * assigned port by 1.
1319  *
1320  * So, what would happen if MSTB #3 from the above figure was unplugged from
1321  * the system, but the driver hadn't yet removed payload #2 from port #3? The
1322  * topology would start to look like the figure below.
1323  *
1324  * .. kernel-figure:: dp-mst/topology-figure-2.dot
1325  *
1326  *    Ports and branch devices which have been released from memory are
1327  *    colored grey, and references which have been removed are colored red.
1328  *
1329  * Whenever a port or branch device's topology refcount reaches zero, it will
1330  * decrement the topology refcounts of all its children, the malloc refcount
1331  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1332  * #4, this means they both have been disconnected from the topology and freed
1333  * from memory. But, because payload #2 is still holding a reference to port
1334  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1335  * is still accessible from memory. This also means port #3 has not yet
1336  * decremented the malloc refcount of MSTB #3, so its &struct
1337  * drm_dp_mst_branch will also stay allocated in memory until port #3's
1338  * malloc refcount reaches 0.
1339  *
1340  * This relationship is necessary because in order to release payload #2, we
1341  * need to be able to figure out the last relative of port #3 that's still
1342  * connected to the topology. In this case, we would travel up the topology as
1343  * shown below.
1344  *
1345  * .. kernel-figure:: dp-mst/topology-figure-3.dot
1346  *
1347  * And finally, remove payload #2 by communicating with port #2 through
1348  * sideband transactions.
1349  */
1350 
1351 /**
1352  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1353  * device
1354  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1355  *
1356  * Increments &drm_dp_mst_branch.malloc_kref. When
1357  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1358  * will be released and @mstb may no longer be used.
1359  *
1360  * See also: drm_dp_mst_put_mstb_malloc()
1361  */
1362 static void
drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch * mstb)1363 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1364 {
1365 	kref_get(&mstb->malloc_kref);
1366 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1367 }
1368 
1369 /**
1370  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1371  * device
1372  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1373  *
1374  * Decrements &drm_dp_mst_branch.malloc_kref. When
1375  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1376  * will be released and @mstb may no longer be used.
1377  *
1378  * See also: drm_dp_mst_get_mstb_malloc()
1379  */
1380 static void
drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch * mstb)1381 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1382 {
1383 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1384 	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1385 }
1386 
drm_dp_free_mst_port(struct kref * kref)1387 static void drm_dp_free_mst_port(struct kref *kref)
1388 {
1389 	struct drm_dp_mst_port *port =
1390 		container_of(kref, struct drm_dp_mst_port, malloc_kref);
1391 
1392 	drm_dp_mst_put_mstb_malloc(port->parent);
1393 	kfree(port);
1394 }
1395 
1396 /**
1397  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1398  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1399  *
1400  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1401  * reaches 0, the memory allocation for @port will be released and @port may
1402  * no longer be used.
1403  *
1404  * Because @port could potentially be freed at any time by the DP MST helpers
1405  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1406  * function, drivers that which to make use of &struct drm_dp_mst_port should
1407  * ensure that they grab at least one main malloc reference to their MST ports
1408  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1409  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1410  *
1411  * See also: drm_dp_mst_put_port_malloc()
1412  */
1413 void
drm_dp_mst_get_port_malloc(struct drm_dp_mst_port * port)1414 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1415 {
1416 	kref_get(&port->malloc_kref);
1417 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1418 }
1419 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1420 
1421 /**
1422  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1423  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1424  *
1425  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1426  * reaches 0, the memory allocation for @port will be released and @port may
1427  * no longer be used.
1428  *
1429  * See also: drm_dp_mst_get_port_malloc()
1430  */
1431 void
drm_dp_mst_put_port_malloc(struct drm_dp_mst_port * port)1432 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1433 {
1434 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1435 	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1436 }
1437 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1438 
1439 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
1440 
1441 #define STACK_DEPTH 8
1442 
1443 static noinline void
__topology_ref_save(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_ref_history * history,enum drm_dp_mst_topology_ref_type type)1444 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
1445 		    struct drm_dp_mst_topology_ref_history *history,
1446 		    enum drm_dp_mst_topology_ref_type type)
1447 {
1448 	struct drm_dp_mst_topology_ref_entry *entry = NULL;
1449 	depot_stack_handle_t backtrace;
1450 	ulong stack_entries[STACK_DEPTH];
1451 	uint n;
1452 	int i;
1453 
1454 	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
1455 	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
1456 	if (!backtrace)
1457 		return;
1458 
1459 	/* Try to find an existing entry for this backtrace */
1460 	for (i = 0; i < history->len; i++) {
1461 		if (history->entries[i].backtrace == backtrace) {
1462 			entry = &history->entries[i];
1463 			break;
1464 		}
1465 	}
1466 
1467 	/* Otherwise add one */
1468 	if (!entry) {
1469 		struct drm_dp_mst_topology_ref_entry *new;
1470 		int new_len = history->len + 1;
1471 
1472 		new = krealloc(history->entries, sizeof(*new) * new_len,
1473 			       GFP_KERNEL);
1474 		if (!new)
1475 			return;
1476 
1477 		entry = &new[history->len];
1478 		history->len = new_len;
1479 		history->entries = new;
1480 
1481 		entry->backtrace = backtrace;
1482 		entry->type = type;
1483 		entry->count = 0;
1484 	}
1485 	entry->count++;
1486 	entry->ts_nsec = ktime_get_ns();
1487 }
1488 
1489 static int
topology_ref_history_cmp(const void * a,const void * b)1490 topology_ref_history_cmp(const void *a, const void *b)
1491 {
1492 	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
1493 
1494 	if (entry_a->ts_nsec > entry_b->ts_nsec)
1495 		return 1;
1496 	else if (entry_a->ts_nsec < entry_b->ts_nsec)
1497 		return -1;
1498 	else
1499 		return 0;
1500 }
1501 
1502 static inline const char *
topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)1503 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
1504 {
1505 	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
1506 		return "get";
1507 	else
1508 		return "put";
1509 }
1510 
1511 static void
__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history * history,void * ptr,const char * type_str)1512 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
1513 			    void *ptr, const char *type_str)
1514 {
1515 	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1516 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
1517 	int i;
1518 
1519 	if (!buf)
1520 		return;
1521 
1522 	if (!history->len)
1523 		goto out;
1524 
1525 	/* First, sort the list so that it goes from oldest to newest
1526 	 * reference entry
1527 	 */
1528 	sort(history->entries, history->len, sizeof(*history->entries),
1529 	     topology_ref_history_cmp, NULL);
1530 
1531 	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
1532 		   type_str, ptr);
1533 
1534 	for (i = 0; i < history->len; i++) {
1535 		const struct drm_dp_mst_topology_ref_entry *entry =
1536 			&history->entries[i];
1537 		ulong *entries;
1538 		uint nr_entries;
1539 		u64 ts_nsec = entry->ts_nsec;
1540 		u32 rem_nsec = do_div(ts_nsec, 1000000000);
1541 
1542 		nr_entries = stack_depot_fetch(entry->backtrace, &entries);
1543 		stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
1544 
1545 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
1546 			   entry->count,
1547 			   topology_ref_type_to_str(entry->type),
1548 			   ts_nsec, rem_nsec / 1000, buf);
1549 	}
1550 
1551 	/* Now free the history, since this is the only time we expose it */
1552 	kfree(history->entries);
1553 out:
1554 	kfree(buf);
1555 }
1556 
1557 static __always_inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch * mstb)1558 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
1559 {
1560 	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
1561 				    "MSTB");
1562 }
1563 
1564 static __always_inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port * port)1565 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
1566 {
1567 	__dump_topology_ref_history(&port->topology_ref_history, port,
1568 				    "Port");
1569 }
1570 
1571 static __always_inline void
save_mstb_topology_ref(struct drm_dp_mst_branch * mstb,enum drm_dp_mst_topology_ref_type type)1572 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
1573 		       enum drm_dp_mst_topology_ref_type type)
1574 {
1575 	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
1576 }
1577 
1578 static __always_inline void
save_port_topology_ref(struct drm_dp_mst_port * port,enum drm_dp_mst_topology_ref_type type)1579 save_port_topology_ref(struct drm_dp_mst_port *port,
1580 		       enum drm_dp_mst_topology_ref_type type)
1581 {
1582 	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
1583 }
1584 
1585 static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr * mgr)1586 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
1587 {
1588 	mutex_lock(&mgr->topology_ref_history_lock);
1589 }
1590 
1591 static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr * mgr)1592 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
1593 {
1594 	mutex_unlock(&mgr->topology_ref_history_lock);
1595 }
1596 #else
1597 static inline void
topology_ref_history_lock(struct drm_dp_mst_topology_mgr * mgr)1598 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
1599 static inline void
topology_ref_history_unlock(struct drm_dp_mst_topology_mgr * mgr)1600 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
1601 static inline void
drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch * mstb)1602 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
1603 static inline void
drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port * port)1604 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
1605 #define save_mstb_topology_ref(mstb, type)
1606 #define save_port_topology_ref(port, type)
1607 #endif
1608 
drm_dp_destroy_mst_branch_device(struct kref * kref)1609 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1610 {
1611 	struct drm_dp_mst_branch *mstb =
1612 		container_of(kref, struct drm_dp_mst_branch, topology_kref);
1613 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1614 
1615 	drm_dp_mst_dump_mstb_topology_history(mstb);
1616 
1617 	INIT_LIST_HEAD(&mstb->destroy_next);
1618 
1619 	/*
1620 	 * This can get called under mgr->mutex, so we need to perform the
1621 	 * actual destruction of the mstb in another worker
1622 	 */
1623 	mutex_lock(&mgr->delayed_destroy_lock);
1624 	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
1625 	mutex_unlock(&mgr->delayed_destroy_lock);
1626 	schedule_work(&mgr->delayed_destroy_work);
1627 }
1628 
1629 /**
1630  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1631  * branch device unless it's zero
1632  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1633  *
1634  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1635  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1636  * reached 0). Holding a topology reference implies that a malloc reference
1637  * will be held to @mstb as long as the user holds the topology reference.
1638  *
1639  * Care should be taken to ensure that the user has at least one malloc
1640  * reference to @mstb. If you already have a topology reference to @mstb, you
1641  * should use drm_dp_mst_topology_get_mstb() instead.
1642  *
1643  * See also:
1644  * drm_dp_mst_topology_get_mstb()
1645  * drm_dp_mst_topology_put_mstb()
1646  *
1647  * Returns:
1648  * * 1: A topology reference was grabbed successfully
1649  * * 0: @port is no longer in the topology, no reference was grabbed
1650  */
1651 static int __must_check
drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch * mstb)1652 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1653 {
1654 	int ret;
1655 
1656 	topology_ref_history_lock(mstb->mgr);
1657 	ret = kref_get_unless_zero(&mstb->topology_kref);
1658 	if (ret) {
1659 		DRM_DEBUG("mstb %p (%d)\n",
1660 			  mstb, kref_read(&mstb->topology_kref));
1661 		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1662 	}
1663 
1664 	topology_ref_history_unlock(mstb->mgr);
1665 
1666 	return ret;
1667 }
1668 
1669 /**
1670  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1671  * branch device
1672  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1673  *
1674  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1675  * not it's already reached 0. This is only valid to use in scenarios where
1676  * you are already guaranteed to have at least one active topology reference
1677  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1678  *
1679  * See also:
1680  * drm_dp_mst_topology_try_get_mstb()
1681  * drm_dp_mst_topology_put_mstb()
1682  */
drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch * mstb)1683 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1684 {
1685 	topology_ref_history_lock(mstb->mgr);
1686 
1687 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
1688 	WARN_ON(kref_read(&mstb->topology_kref) == 0);
1689 	kref_get(&mstb->topology_kref);
1690 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1691 
1692 	topology_ref_history_unlock(mstb->mgr);
1693 }
1694 
1695 /**
1696  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1697  * device
1698  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1699  *
1700  * Releases a topology reference from @mstb by decrementing
1701  * &drm_dp_mst_branch.topology_kref.
1702  *
1703  * See also:
1704  * drm_dp_mst_topology_try_get_mstb()
1705  * drm_dp_mst_topology_get_mstb()
1706  */
1707 static void
drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch * mstb)1708 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1709 {
1710 	topology_ref_history_lock(mstb->mgr);
1711 
1712 	DRM_DEBUG("mstb %p (%d)\n",
1713 		  mstb, kref_read(&mstb->topology_kref) - 1);
1714 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
1715 
1716 	topology_ref_history_unlock(mstb->mgr);
1717 	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1718 }
1719 
drm_dp_destroy_port(struct kref * kref)1720 static void drm_dp_destroy_port(struct kref *kref)
1721 {
1722 	struct drm_dp_mst_port *port =
1723 		container_of(kref, struct drm_dp_mst_port, topology_kref);
1724 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1725 
1726 	drm_dp_mst_dump_port_topology_history(port);
1727 
1728 	/* There's nothing that needs locking to destroy an input port yet */
1729 	if (port->input) {
1730 		drm_dp_mst_put_port_malloc(port);
1731 		return;
1732 	}
1733 
1734 	kfree(port->cached_edid);
1735 
1736 	/*
1737 	 * we can't destroy the connector here, as we might be holding the
1738 	 * mode_config.mutex from an EDID retrieval
1739 	 */
1740 	mutex_lock(&mgr->delayed_destroy_lock);
1741 	list_add(&port->next, &mgr->destroy_port_list);
1742 	mutex_unlock(&mgr->delayed_destroy_lock);
1743 	schedule_work(&mgr->delayed_destroy_work);
1744 }
1745 
1746 /**
1747  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1748  * port unless it's zero
1749  * @port: &struct drm_dp_mst_port to increment the topology refcount of
1750  *
1751  * Attempts to grab a topology reference to @port, if it hasn't yet been
1752  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1753  * 0). Holding a topology reference implies that a malloc reference will be
1754  * held to @port as long as the user holds the topology reference.
1755  *
1756  * Care should be taken to ensure that the user has at least one malloc
1757  * reference to @port. If you already have a topology reference to @port, you
1758  * should use drm_dp_mst_topology_get_port() instead.
1759  *
1760  * See also:
1761  * drm_dp_mst_topology_get_port()
1762  * drm_dp_mst_topology_put_port()
1763  *
1764  * Returns:
1765  * * 1: A topology reference was grabbed successfully
1766  * * 0: @port is no longer in the topology, no reference was grabbed
1767  */
1768 static int __must_check
drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port * port)1769 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1770 {
1771 	int ret;
1772 
1773 	topology_ref_history_lock(port->mgr);
1774 	ret = kref_get_unless_zero(&port->topology_kref);
1775 	if (ret) {
1776 		DRM_DEBUG("port %p (%d)\n",
1777 			  port, kref_read(&port->topology_kref));
1778 		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1779 	}
1780 
1781 	topology_ref_history_unlock(port->mgr);
1782 	return ret;
1783 }
1784 
1785 /**
1786  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1787  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1788  *
1789  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1790  * not it's already reached 0. This is only valid to use in scenarios where
1791  * you are already guaranteed to have at least one active topology reference
1792  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1793  *
1794  * See also:
1795  * drm_dp_mst_topology_try_get_port()
1796  * drm_dp_mst_topology_put_port()
1797  */
drm_dp_mst_topology_get_port(struct drm_dp_mst_port * port)1798 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1799 {
1800 	topology_ref_history_lock(port->mgr);
1801 
1802 	WARN_ON(kref_read(&port->topology_kref) == 0);
1803 	kref_get(&port->topology_kref);
1804 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1805 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
1806 
1807 	topology_ref_history_unlock(port->mgr);
1808 }
1809 
1810 /**
1811  * drm_dp_mst_topology_put_port() - release a topology reference to a port
1812  * @port: The &struct drm_dp_mst_port to release the topology reference from
1813  *
1814  * Releases a topology reference from @port by decrementing
1815  * &drm_dp_mst_port.topology_kref.
1816  *
1817  * See also:
1818  * drm_dp_mst_topology_try_get_port()
1819  * drm_dp_mst_topology_get_port()
1820  */
drm_dp_mst_topology_put_port(struct drm_dp_mst_port * port)1821 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1822 {
1823 	topology_ref_history_lock(port->mgr);
1824 
1825 	DRM_DEBUG("port %p (%d)\n",
1826 		  port, kref_read(&port->topology_kref) - 1);
1827 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
1828 
1829 	topology_ref_history_unlock(port->mgr);
1830 	kref_put(&port->topology_kref, drm_dp_destroy_port);
1831 }
1832 
1833 static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_branch * to_find)1834 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1835 					      struct drm_dp_mst_branch *to_find)
1836 {
1837 	struct drm_dp_mst_port *port;
1838 	struct drm_dp_mst_branch *rmstb;
1839 
1840 	if (to_find == mstb)
1841 		return mstb;
1842 
1843 	list_for_each_entry(port, &mstb->ports, next) {
1844 		if (port->mstb) {
1845 			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1846 			    port->mstb, to_find);
1847 			if (rmstb)
1848 				return rmstb;
1849 		}
1850 	}
1851 	return NULL;
1852 }
1853 
1854 static struct drm_dp_mst_branch *
drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)1855 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1856 				       struct drm_dp_mst_branch *mstb)
1857 {
1858 	struct drm_dp_mst_branch *rmstb = NULL;
1859 
1860 	mutex_lock(&mgr->lock);
1861 	if (mgr->mst_primary) {
1862 		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1863 		    mgr->mst_primary, mstb);
1864 
1865 		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1866 			rmstb = NULL;
1867 	}
1868 	mutex_unlock(&mgr->lock);
1869 	return rmstb;
1870 }
1871 
1872 static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * to_find)1873 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1874 					      struct drm_dp_mst_port *to_find)
1875 {
1876 	struct drm_dp_mst_port *port, *mport;
1877 
1878 	list_for_each_entry(port, &mstb->ports, next) {
1879 		if (port == to_find)
1880 			return port;
1881 
1882 		if (port->mstb) {
1883 			mport = drm_dp_mst_topology_get_port_validated_locked(
1884 			    port->mstb, to_find);
1885 			if (mport)
1886 				return mport;
1887 		}
1888 	}
1889 	return NULL;
1890 }
1891 
1892 static struct drm_dp_mst_port *
drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)1893 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1894 				       struct drm_dp_mst_port *port)
1895 {
1896 	struct drm_dp_mst_port *rport = NULL;
1897 
1898 	mutex_lock(&mgr->lock);
1899 	if (mgr->mst_primary) {
1900 		rport = drm_dp_mst_topology_get_port_validated_locked(
1901 		    mgr->mst_primary, port);
1902 
1903 		if (rport && !drm_dp_mst_topology_try_get_port(rport))
1904 			rport = NULL;
1905 	}
1906 	mutex_unlock(&mgr->lock);
1907 	return rport;
1908 }
1909 
drm_dp_get_port(struct drm_dp_mst_branch * mstb,u8 port_num)1910 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1911 {
1912 	struct drm_dp_mst_port *port;
1913 	int ret;
1914 
1915 	list_for_each_entry(port, &mstb->ports, next) {
1916 		if (port->port_num == port_num) {
1917 			ret = drm_dp_mst_topology_try_get_port(port);
1918 			return ret ? port : NULL;
1919 		}
1920 	}
1921 
1922 	return NULL;
1923 }
1924 
1925 /*
1926  * calculate a new RAD for this MST branch device
1927  * if parent has an LCT of 2 then it has 1 nibble of RAD,
1928  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1929  */
drm_dp_calculate_rad(struct drm_dp_mst_port * port,u8 * rad)1930 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1931 				 u8 *rad)
1932 {
1933 	int parent_lct = port->parent->lct;
1934 	int shift = 4;
1935 	int idx = (parent_lct - 1) / 2;
1936 	if (parent_lct > 1) {
1937 		memcpy(rad, port->parent->rad, idx + 1);
1938 		shift = (parent_lct % 2) ? 4 : 0;
1939 	} else
1940 		rad[0] = 0;
1941 
1942 	rad[idx] |= port->port_num << shift;
1943 	return parent_lct + 1;
1944 }
1945 
drm_dp_mst_is_dp_mst_end_device(u8 pdt,bool mcs)1946 static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
1947 {
1948 	switch (pdt) {
1949 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
1950 	case DP_PEER_DEVICE_SST_SINK:
1951 		return true;
1952 	case DP_PEER_DEVICE_MST_BRANCHING:
1953 		/* For sst branch device */
1954 		if (!mcs)
1955 			return true;
1956 
1957 		return false;
1958 	}
1959 	return true;
1960 }
1961 
1962 static int
drm_dp_port_set_pdt(struct drm_dp_mst_port * port,u8 new_pdt,bool new_mcs)1963 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
1964 		    bool new_mcs)
1965 {
1966 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1967 	struct drm_dp_mst_branch *mstb;
1968 	u8 rad[8], lct;
1969 	int ret = 0;
1970 
1971 	if (port->pdt == new_pdt && port->mcs == new_mcs)
1972 		return 0;
1973 
1974 	/* Teardown the old pdt, if there is one */
1975 	if (port->pdt != DP_PEER_DEVICE_NONE) {
1976 		if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
1977 			/*
1978 			 * If the new PDT would also have an i2c bus,
1979 			 * don't bother with reregistering it
1980 			 */
1981 			if (new_pdt != DP_PEER_DEVICE_NONE &&
1982 			    drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
1983 				port->pdt = new_pdt;
1984 				port->mcs = new_mcs;
1985 				return 0;
1986 			}
1987 
1988 			/* remove i2c over sideband */
1989 			drm_dp_mst_unregister_i2c_bus(&port->aux);
1990 		} else {
1991 			mutex_lock(&mgr->lock);
1992 			drm_dp_mst_topology_put_mstb(port->mstb);
1993 			port->mstb = NULL;
1994 			mutex_unlock(&mgr->lock);
1995 		}
1996 	}
1997 
1998 	port->pdt = new_pdt;
1999 	port->mcs = new_mcs;
2000 
2001 	if (port->pdt != DP_PEER_DEVICE_NONE) {
2002 		if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
2003 			/* add i2c over sideband */
2004 			ret = drm_dp_mst_register_i2c_bus(&port->aux);
2005 		} else {
2006 			lct = drm_dp_calculate_rad(port, rad);
2007 			mstb = drm_dp_add_mst_branch_device(lct, rad);
2008 			if (!mstb) {
2009 				ret = -ENOMEM;
2010 				DRM_ERROR("Failed to create MSTB for port %p",
2011 					  port);
2012 				goto out;
2013 			}
2014 
2015 			mutex_lock(&mgr->lock);
2016 			port->mstb = mstb;
2017 			mstb->mgr = port->mgr;
2018 			mstb->port_parent = port;
2019 
2020 			/*
2021 			 * Make sure this port's memory allocation stays
2022 			 * around until its child MSTB releases it
2023 			 */
2024 			drm_dp_mst_get_port_malloc(port);
2025 			mutex_unlock(&mgr->lock);
2026 
2027 			/* And make sure we send a link address for this */
2028 			ret = 1;
2029 		}
2030 	}
2031 
2032 out:
2033 	if (ret < 0)
2034 		port->pdt = DP_PEER_DEVICE_NONE;
2035 	return ret;
2036 }
2037 
2038 /**
2039  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
2040  * @aux: Fake sideband AUX CH
2041  * @offset: address of the (first) register to read
2042  * @buffer: buffer to store the register values
2043  * @size: number of bytes in @buffer
2044  *
2045  * Performs the same functionality for remote devices via
2046  * sideband messaging as drm_dp_dpcd_read() does for local
2047  * devices via actual AUX CH.
2048  *
2049  * Return: Number of bytes read, or negative error code on failure.
2050  */
drm_dp_mst_dpcd_read(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)2051 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
2052 			     unsigned int offset, void *buffer, size_t size)
2053 {
2054 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2055 						    aux);
2056 
2057 	return drm_dp_send_dpcd_read(port->mgr, port,
2058 				     offset, size, buffer);
2059 }
2060 
2061 /**
2062  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
2063  * @aux: Fake sideband AUX CH
2064  * @offset: address of the (first) register to write
2065  * @buffer: buffer containing the values to write
2066  * @size: number of bytes in @buffer
2067  *
2068  * Performs the same functionality for remote devices via
2069  * sideband messaging as drm_dp_dpcd_write() does for local
2070  * devices via actual AUX CH.
2071  *
2072  * Return: 0 on success, negative error code on failure.
2073  */
drm_dp_mst_dpcd_write(struct drm_dp_aux * aux,unsigned int offset,void * buffer,size_t size)2074 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
2075 			      unsigned int offset, void *buffer, size_t size)
2076 {
2077 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
2078 						    aux);
2079 
2080 	return drm_dp_send_dpcd_write(port->mgr, port,
2081 				      offset, size, buffer);
2082 }
2083 
drm_dp_check_mstb_guid(struct drm_dp_mst_branch * mstb,u8 * guid)2084 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
2085 {
2086 	int ret __unused;
2087 
2088 	memcpy(mstb->guid, guid, 16);
2089 
2090 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
2091 		if (mstb->port_parent) {
2092 			ret = drm_dp_send_dpcd_write(
2093 					mstb->mgr,
2094 					mstb->port_parent,
2095 					DP_GUID,
2096 					16,
2097 					mstb->guid);
2098 		} else {
2099 
2100 			ret = drm_dp_dpcd_write(
2101 					mstb->mgr->aux,
2102 					DP_GUID,
2103 					mstb->guid,
2104 					16);
2105 		}
2106 	}
2107 }
2108 
build_mst_prop_path(const struct drm_dp_mst_branch * mstb,int pnum,char * proppath,size_t proppath_size)2109 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
2110 				int pnum,
2111 				char *proppath,
2112 				size_t proppath_size)
2113 {
2114 	int i;
2115 	char temp[8];
2116 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
2117 	for (i = 0; i < (mstb->lct - 1); i++) {
2118 		int shift = (i % 2) ? 0 : 4;
2119 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
2120 		snprintf(temp, sizeof(temp), "-%d", port_num);
2121 		strlcat(proppath, temp, proppath_size);
2122 	}
2123 	snprintf(temp, sizeof(temp), "-%d", pnum);
2124 	strlcat(proppath, temp, proppath_size);
2125 }
2126 
2127 /**
2128  * drm_dp_mst_connector_late_register() - Late MST connector registration
2129  * @connector: The MST connector
2130  * @port: The MST port for this connector
2131  *
2132  * Helper to register the remote aux device for this MST port. Drivers should
2133  * call this from their mst connector's late_register hook to enable MST aux
2134  * devices.
2135  *
2136  * Return: 0 on success, negative error code on failure.
2137  */
drm_dp_mst_connector_late_register(struct drm_connector * connector,struct drm_dp_mst_port * port)2138 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
2139 				       struct drm_dp_mst_port *port)
2140 {
2141 	DRM_DEBUG_KMS("registering %s remote bus for %s\n",
2142 		      port->aux.name, device_xname(connector->dev->dev));
2143 
2144 	port->aux.dev = connector->kdev;
2145 	return drm_dp_aux_register_devnode(&port->aux);
2146 }
2147 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
2148 
2149 /**
2150  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
2151  * @connector: The MST connector
2152  * @port: The MST port for this connector
2153  *
2154  * Helper to unregister the remote aux device for this MST port, registered by
2155  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
2156  * connector's early_unregister hook.
2157  */
drm_dp_mst_connector_early_unregister(struct drm_connector * connector,struct drm_dp_mst_port * port)2158 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
2159 					   struct drm_dp_mst_port *port)
2160 {
2161 	DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
2162 		      port->aux.name, device_xname(connector->dev->dev));
2163 	drm_dp_aux_unregister_devnode(&port->aux);
2164 }
2165 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
2166 
2167 static void
drm_dp_mst_port_add_connector(struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)2168 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
2169 			      struct drm_dp_mst_port *port)
2170 {
2171 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2172 	char proppath[255];
2173 	int ret;
2174 
2175 	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
2176 	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
2177 	if (!port->connector) {
2178 		ret = -ENOMEM;
2179 		goto error;
2180 	}
2181 
2182 	if (port->pdt != DP_PEER_DEVICE_NONE &&
2183 	    drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
2184 		port->cached_edid = drm_get_edid(port->connector,
2185 						 &port->aux.ddc);
2186 		drm_connector_set_tile_property(port->connector);
2187 	}
2188 
2189 	mgr->cbs->register_connector(port->connector);
2190 	return;
2191 
2192 error:
2193 	DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
2194 }
2195 
2196 /*
2197  * Drop a topology reference, and unlink the port from the in-memory topology
2198  * layout
2199  */
2200 static void
drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)2201 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
2202 				struct drm_dp_mst_port *port)
2203 {
2204 	mutex_lock(&mgr->lock);
2205 	port->parent->num_ports--;
2206 	list_del(&port->next);
2207 	mutex_unlock(&mgr->lock);
2208 	drm_dp_mst_topology_put_port(port);
2209 }
2210 
2211 static struct drm_dp_mst_port *
drm_dp_mst_add_port(struct drm_device * dev,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,u8 port_number)2212 drm_dp_mst_add_port(struct drm_device *dev,
2213 		    struct drm_dp_mst_topology_mgr *mgr,
2214 		    struct drm_dp_mst_branch *mstb, u8 port_number)
2215 {
2216 	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
2217 
2218 	if (!port)
2219 		return NULL;
2220 
2221 	kref_init(&port->topology_kref);
2222 	kref_init(&port->malloc_kref);
2223 	port->parent = mstb;
2224 	port->port_num = port_number;
2225 	port->mgr = mgr;
2226 	port->aux.name = "DPMST";
2227 	port->aux.dev = dev->dev;
2228 	port->aux.is_remote = true;
2229 
2230 	/* initialize the MST downstream port's AUX crc work queue */
2231 	drm_dp_remote_aux_init(&port->aux);
2232 
2233 	/*
2234 	 * Make sure the memory allocation for our parent branch stays
2235 	 * around until our own memory allocation is released
2236 	 */
2237 	drm_dp_mst_get_mstb_malloc(mstb);
2238 
2239 	return port;
2240 }
2241 
2242 static int
drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch * mstb,struct drm_device * dev,struct drm_dp_link_addr_reply_port * port_msg)2243 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
2244 				    struct drm_device *dev,
2245 				    struct drm_dp_link_addr_reply_port *port_msg)
2246 {
2247 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2248 	struct drm_dp_mst_port *port;
2249 	int old_ddps = 0, ret;
2250 	u8 new_pdt = DP_PEER_DEVICE_NONE;
2251 	bool new_mcs = 0;
2252 	bool created = false, send_link_addr = false, changed = false;
2253 
2254 	port = drm_dp_get_port(mstb, port_msg->port_number);
2255 	if (!port) {
2256 		port = drm_dp_mst_add_port(dev, mgr, mstb,
2257 					   port_msg->port_number);
2258 		if (!port)
2259 			return -ENOMEM;
2260 		created = true;
2261 		changed = true;
2262 	} else if (!port->input && port_msg->input_port && port->connector) {
2263 		/* Since port->connector can't be changed here, we create a
2264 		 * new port if input_port changes from 0 to 1
2265 		 */
2266 		drm_dp_mst_topology_unlink_port(mgr, port);
2267 		drm_dp_mst_topology_put_port(port);
2268 		port = drm_dp_mst_add_port(dev, mgr, mstb,
2269 					   port_msg->port_number);
2270 		if (!port)
2271 			return -ENOMEM;
2272 		changed = true;
2273 		created = true;
2274 	} else if (port->input && !port_msg->input_port) {
2275 		changed = true;
2276 	} else if (port->connector) {
2277 		/* We're updating a port that's exposed to userspace, so do it
2278 		 * under lock
2279 		 */
2280 		drm_modeset_lock(&mgr->base.lock, NULL);
2281 
2282 		old_ddps = port->ddps;
2283 		changed = port->ddps != port_msg->ddps ||
2284 			(port->ddps &&
2285 			 (port->ldps != port_msg->legacy_device_plug_status ||
2286 			  port->dpcd_rev != port_msg->dpcd_revision ||
2287 			  port->mcs != port_msg->mcs ||
2288 			  port->pdt != port_msg->peer_device_type ||
2289 			  port->num_sdp_stream_sinks !=
2290 			  port_msg->num_sdp_stream_sinks));
2291 	}
2292 
2293 	port->input = port_msg->input_port;
2294 	if (!port->input)
2295 		new_pdt = port_msg->peer_device_type;
2296 	new_mcs = port_msg->mcs;
2297 	port->ddps = port_msg->ddps;
2298 	port->ldps = port_msg->legacy_device_plug_status;
2299 	port->dpcd_rev = port_msg->dpcd_revision;
2300 	port->num_sdp_streams = port_msg->num_sdp_streams;
2301 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
2302 
2303 	/* manage mstb port lists with mgr lock - take a reference
2304 	   for this list */
2305 	if (created) {
2306 		mutex_lock(&mgr->lock);
2307 		drm_dp_mst_topology_get_port(port);
2308 		list_add(&port->next, &mstb->ports);
2309 		mstb->num_ports++;
2310 		mutex_unlock(&mgr->lock);
2311 	}
2312 
2313 	if (old_ddps != port->ddps) {
2314 		if (port->ddps) {
2315 			if (!port->input) {
2316 				drm_dp_send_enum_path_resources(mgr, mstb,
2317 								port);
2318 			}
2319 		} else {
2320 			port->available_pbn = 0;
2321 		}
2322 	}
2323 
2324 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2325 	if (ret == 1) {
2326 		send_link_addr = true;
2327 	} else if (ret < 0) {
2328 		DRM_ERROR("Failed to change PDT on port %p: %d\n",
2329 			  port, ret);
2330 		goto fail;
2331 	}
2332 
2333 	/*
2334 	 * If this port wasn't just created, then we're reprobing because
2335 	 * we're coming out of suspend. In this case, always resend the link
2336 	 * address if there's an MSTB on this port
2337 	 */
2338 	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
2339 	    port->mcs)
2340 		send_link_addr = true;
2341 
2342 	if (port->connector)
2343 		drm_modeset_unlock(&mgr->base.lock);
2344 	else if (!port->input)
2345 		drm_dp_mst_port_add_connector(mstb, port);
2346 
2347 	if (send_link_addr && port->mstb) {
2348 		ret = drm_dp_send_link_address(mgr, port->mstb);
2349 		if (ret == 1) /* MSTB below us changed */
2350 			changed = true;
2351 		else if (ret < 0)
2352 			goto fail_put;
2353 	}
2354 
2355 	/* put reference to this port */
2356 	drm_dp_mst_topology_put_port(port);
2357 	return changed;
2358 
2359 fail:
2360 	drm_dp_mst_topology_unlink_port(mgr, port);
2361 	if (port->connector)
2362 		drm_modeset_unlock(&mgr->base.lock);
2363 fail_put:
2364 	drm_dp_mst_topology_put_port(port);
2365 	return ret;
2366 }
2367 
2368 static void
drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch * mstb,struct drm_dp_connection_status_notify * conn_stat)2369 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2370 			    struct drm_dp_connection_status_notify *conn_stat)
2371 {
2372 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
2373 	struct drm_dp_mst_port *port;
2374 	int old_ddps, old_input, ret, i;
2375 	u8 new_pdt;
2376 	bool new_mcs;
2377 	bool dowork = false, create_connector = false;
2378 
2379 	port = drm_dp_get_port(mstb, conn_stat->port_number);
2380 	if (!port)
2381 		return;
2382 
2383 	if (port->connector) {
2384 		if (!port->input && conn_stat->input_port) {
2385 			/*
2386 			 * We can't remove a connector from an already exposed
2387 			 * port, so just throw the port out and make sure we
2388 			 * reprobe the link address of it's parent MSTB
2389 			 */
2390 			drm_dp_mst_topology_unlink_port(mgr, port);
2391 			mstb->link_address_sent = false;
2392 			dowork = true;
2393 			goto out;
2394 		}
2395 
2396 		/* Locking is only needed if the port's exposed to userspace */
2397 		drm_modeset_lock(&mgr->base.lock, NULL);
2398 	} else if (port->input && !conn_stat->input_port) {
2399 		create_connector = true;
2400 		/* Reprobe link address so we get num_sdp_streams */
2401 		mstb->link_address_sent = false;
2402 		dowork = true;
2403 	}
2404 
2405 	old_ddps = port->ddps;
2406 	old_input = port->input;
2407 	port->input = conn_stat->input_port;
2408 	port->ldps = conn_stat->legacy_device_plug_status;
2409 	port->ddps = conn_stat->displayport_device_plug_status;
2410 
2411 	if (old_ddps != port->ddps) {
2412 		if (port->ddps) {
2413 			dowork = true;
2414 		} else {
2415 			port->available_pbn = 0;
2416 		}
2417 	}
2418 
2419 	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
2420 	new_mcs = conn_stat->message_capability_status;
2421 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
2422 	if (ret == 1) {
2423 		dowork = true;
2424 	} else if (ret < 0) {
2425 		DRM_ERROR("Failed to change PDT for port %p: %d\n",
2426 			  port, ret);
2427 		dowork = false;
2428 	}
2429 
2430 	if (!old_input && old_ddps != port->ddps && !port->ddps) {
2431 		for (i = 0; i < mgr->max_payloads; i++) {
2432 			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2433 			struct drm_dp_mst_port *port_validated;
2434 
2435 			if (!vcpi)
2436 				continue;
2437 
2438 			port_validated =
2439 				container_of(vcpi, struct drm_dp_mst_port, vcpi);
2440 			port_validated =
2441 				drm_dp_mst_topology_get_port_validated(mgr, port_validated);
2442 			if (!port_validated) {
2443 				mutex_lock(&mgr->payload_lock);
2444 				vcpi->num_slots = 0;
2445 				mutex_unlock(&mgr->payload_lock);
2446 			} else {
2447 				drm_dp_mst_topology_put_port(port_validated);
2448 			}
2449 		}
2450 	}
2451 
2452 	if (port->connector)
2453 		drm_modeset_unlock(&mgr->base.lock);
2454 	else if (create_connector)
2455 		drm_dp_mst_port_add_connector(mstb, port);
2456 
2457 out:
2458 	drm_dp_mst_topology_put_port(port);
2459 	if (dowork)
2460 		queue_work(system_long_wq, &mstb->mgr->work);
2461 }
2462 
drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr * mgr,u8 lct,u8 * rad)2463 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2464 							       u8 lct, u8 *rad)
2465 {
2466 	struct drm_dp_mst_branch *mstb;
2467 	struct drm_dp_mst_port *port;
2468 	int i, ret;
2469 	/* find the port by iterating down */
2470 
2471 	mutex_lock(&mgr->lock);
2472 	mstb = mgr->mst_primary;
2473 
2474 	if (!mstb)
2475 		goto out;
2476 
2477 	for (i = 0; i < lct - 1; i++) {
2478 		int shift = (i % 2) ? 0 : 4;
2479 		int port_num = (rad[i / 2] >> shift) & 0xf;
2480 
2481 		list_for_each_entry(port, &mstb->ports, next) {
2482 			if (port->port_num == port_num) {
2483 				mstb = port->mstb;
2484 				if (!mstb) {
2485 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2486 					goto out;
2487 				}
2488 
2489 				break;
2490 			}
2491 		}
2492 	}
2493 	ret = drm_dp_mst_topology_try_get_mstb(mstb);
2494 	if (!ret)
2495 		mstb = NULL;
2496 out:
2497 	mutex_unlock(&mgr->lock);
2498 	return mstb;
2499 }
2500 
get_mst_branch_device_by_guid_helper(struct drm_dp_mst_branch * mstb,const uint8_t * guid)2501 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2502 	struct drm_dp_mst_branch *mstb,
2503 	const uint8_t *guid)
2504 {
2505 	struct drm_dp_mst_branch *found_mstb;
2506 	struct drm_dp_mst_port *port;
2507 
2508 	if (memcmp(mstb->guid, guid, 16) == 0)
2509 		return mstb;
2510 
2511 
2512 	list_for_each_entry(port, &mstb->ports, next) {
2513 		if (!port->mstb)
2514 			continue;
2515 
2516 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2517 
2518 		if (found_mstb)
2519 			return found_mstb;
2520 	}
2521 
2522 	return NULL;
2523 }
2524 
2525 static struct drm_dp_mst_branch *
drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr * mgr,const uint8_t * guid)2526 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2527 				     const uint8_t *guid)
2528 {
2529 	struct drm_dp_mst_branch *mstb;
2530 	int ret;
2531 
2532 	/* find the port by iterating down */
2533 	mutex_lock(&mgr->lock);
2534 
2535 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2536 	if (mstb) {
2537 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
2538 		if (!ret)
2539 			mstb = NULL;
2540 	}
2541 
2542 	mutex_unlock(&mgr->lock);
2543 	return mstb;
2544 }
2545 
drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2546 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2547 					       struct drm_dp_mst_branch *mstb)
2548 {
2549 	struct drm_dp_mst_port *port;
2550 	int ret;
2551 	bool changed = false;
2552 
2553 	if (!mstb->link_address_sent) {
2554 		ret = drm_dp_send_link_address(mgr, mstb);
2555 		if (ret == 1)
2556 			changed = true;
2557 		else if (ret < 0)
2558 			return ret;
2559 	}
2560 
2561 	list_for_each_entry(port, &mstb->ports, next) {
2562 		struct drm_dp_mst_branch *mstb_child = NULL;
2563 
2564 		if (port->input || !port->ddps)
2565 			continue;
2566 
2567 		if (!port->available_pbn) {
2568 			drm_modeset_lock(&mgr->base.lock, NULL);
2569 			drm_dp_send_enum_path_resources(mgr, mstb, port);
2570 			drm_modeset_unlock(&mgr->base.lock);
2571 			changed = true;
2572 		}
2573 
2574 		if (port->mstb)
2575 			mstb_child = drm_dp_mst_topology_get_mstb_validated(
2576 			    mgr, port->mstb);
2577 
2578 		if (mstb_child) {
2579 			ret = drm_dp_check_and_send_link_address(mgr,
2580 								 mstb_child);
2581 			drm_dp_mst_topology_put_mstb(mstb_child);
2582 			if (ret == 1)
2583 				changed = true;
2584 			else if (ret < 0)
2585 				return ret;
2586 		}
2587 	}
2588 
2589 	return changed;
2590 }
2591 
drm_dp_mst_link_probe_work(struct work_struct * work)2592 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2593 {
2594 	struct drm_dp_mst_topology_mgr *mgr =
2595 		container_of(work, struct drm_dp_mst_topology_mgr, work);
2596 	struct drm_device *dev = mgr->dev;
2597 	struct drm_dp_mst_branch *mstb;
2598 	int ret;
2599 	bool clear_payload_id_table;
2600 
2601 	mutex_lock(&mgr->probe_lock);
2602 
2603 	mutex_lock(&mgr->lock);
2604 	clear_payload_id_table = !mgr->payload_id_table_cleared;
2605 	mgr->payload_id_table_cleared = true;
2606 
2607 	mstb = mgr->mst_primary;
2608 	if (mstb) {
2609 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
2610 		if (!ret)
2611 			mstb = NULL;
2612 	}
2613 	mutex_unlock(&mgr->lock);
2614 	if (!mstb) {
2615 		mutex_unlock(&mgr->probe_lock);
2616 		return;
2617 	}
2618 
2619 	/*
2620 	 * Certain branch devices seem to incorrectly report an available_pbn
2621 	 * of 0 on downstream sinks, even after clearing the
2622 	 * DP_PAYLOAD_ALLOCATE_* registers in
2623 	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
2624 	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
2625 	 * things work again.
2626 	 */
2627 	if (clear_payload_id_table) {
2628 		DRM_DEBUG_KMS("Clearing payload ID table\n");
2629 		drm_dp_send_clear_payload_id_table(mgr, mstb);
2630 	}
2631 
2632 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
2633 	drm_dp_mst_topology_put_mstb(mstb);
2634 
2635 	mutex_unlock(&mgr->probe_lock);
2636 	if (ret)
2637 		drm_kms_helper_hotplug_event(dev);
2638 }
2639 
drm_dp_validate_guid(struct drm_dp_mst_topology_mgr * mgr,u8 * guid)2640 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2641 				 u8 *guid)
2642 {
2643 	u64 salt;
2644 
2645 	if (memchr_inv(guid, 0, 16))
2646 		return true;
2647 
2648 	salt = get_jiffies_64();
2649 
2650 	memcpy(&guid[0], &salt, sizeof(u64));
2651 	memcpy(&guid[8], &salt, sizeof(u64));
2652 
2653 	return false;
2654 }
2655 
build_dpcd_read(struct drm_dp_sideband_msg_tx * msg,u8 port_num,u32 offset,u8 num_bytes)2656 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2657 {
2658 	struct drm_dp_sideband_msg_req_body req;
2659 
2660 	req.req_type = DP_REMOTE_DPCD_READ;
2661 	req.u.dpcd_read.port_number = port_num;
2662 	req.u.dpcd_read.dpcd_address = offset;
2663 	req.u.dpcd_read.num_bytes = num_bytes;
2664 	drm_dp_encode_sideband_req(&req, msg);
2665 
2666 	return 0;
2667 }
2668 
drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr * mgr,bool up,u8 * msg,int len)2669 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2670 				    bool up, u8 *msg, int len)
2671 {
2672 	int ret;
2673 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2674 	int tosend, total, offset;
2675 	int retries = 0;
2676 
2677 retry:
2678 	total = len;
2679 	offset = 0;
2680 	do {
2681 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2682 
2683 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2684 					&msg[offset],
2685 					tosend);
2686 		if (ret != tosend) {
2687 			if (ret == -EIO && retries < 5) {
2688 				retries++;
2689 				goto retry;
2690 			}
2691 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2692 
2693 			return -EIO;
2694 		}
2695 		offset += tosend;
2696 		total -= tosend;
2697 	} while (total > 0);
2698 	return 0;
2699 }
2700 
set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr * hdr,struct drm_dp_sideband_msg_tx * txmsg)2701 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2702 				  struct drm_dp_sideband_msg_tx *txmsg)
2703 {
2704 	struct drm_dp_mst_branch *mstb = txmsg->dst;
2705 	u8 req_type;
2706 
2707 	/* both msg slots are full */
2708 	if (txmsg->seqno == -1) {
2709 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2710 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2711 			return -EAGAIN;
2712 		}
2713 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2714 			txmsg->seqno = mstb->last_seqno;
2715 			mstb->last_seqno ^= 1;
2716 		} else if (mstb->tx_slots[0] == NULL)
2717 			txmsg->seqno = 0;
2718 		else
2719 			txmsg->seqno = 1;
2720 		mstb->tx_slots[txmsg->seqno] = txmsg;
2721 	}
2722 
2723 	req_type = txmsg->msg[0] & 0x7f;
2724 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2725 		req_type == DP_RESOURCE_STATUS_NOTIFY)
2726 		hdr->broadcast = 1;
2727 	else
2728 		hdr->broadcast = 0;
2729 	hdr->path_msg = txmsg->path_msg;
2730 	hdr->lct = mstb->lct;
2731 	hdr->lcr = mstb->lct - 1;
2732 	if (mstb->lct > 1)
2733 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2734 	hdr->seqno = txmsg->seqno;
2735 	return 0;
2736 }
2737 /*
2738  * process a single block of the next message in the sideband queue
2739  */
process_single_tx_qlock(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg,bool up)2740 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2741 				   struct drm_dp_sideband_msg_tx *txmsg,
2742 				   bool up)
2743 {
2744 	u8 chunk[48];
2745 	struct drm_dp_sideband_msg_hdr hdr;
2746 	int len, space, idx, tosend;
2747 	int ret;
2748 
2749 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2750 
2751 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2752 		txmsg->seqno = -1;
2753 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2754 	}
2755 
2756 	/* make hdr from dst mst - for replies use seqno
2757 	   otherwise assign one */
2758 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2759 	if (ret < 0)
2760 		return ret;
2761 
2762 	/* amount left to send in this message */
2763 	len = txmsg->cur_len - txmsg->cur_offset;
2764 
2765 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2766 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2767 
2768 	tosend = min(len, space);
2769 	if (len == txmsg->cur_len)
2770 		hdr.somt = 1;
2771 	if (space >= len)
2772 		hdr.eomt = 1;
2773 
2774 
2775 	hdr.msg_len = tosend + 1;
2776 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2777 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2778 	/* add crc at end */
2779 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2780 	idx += tosend + 1;
2781 
2782 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2783 	if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2784 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2785 
2786 		drm_printf(&p, "sideband msg failed to send\n");
2787 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2788 		return ret;
2789 	}
2790 
2791 	txmsg->cur_offset += tosend;
2792 	if (txmsg->cur_offset == txmsg->cur_len) {
2793 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2794 		return 1;
2795 	}
2796 	return 0;
2797 }
2798 
process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr * mgr)2799 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2800 {
2801 	struct drm_dp_sideband_msg_tx *txmsg;
2802 	int ret;
2803 
2804 	WARN_ON(!mutex_is_locked(&mgr->qlock));
2805 
2806 	/* construct a chunk from the first msg in the tx_msg queue */
2807 	if (list_empty(&mgr->tx_msg_downq))
2808 		return;
2809 
2810 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2811 	ret = process_single_tx_qlock(mgr, txmsg, false);
2812 	if (ret == 1) {
2813 		/* txmsg is sent it should be in the slots now */
2814 		mgr->is_waiting_for_dwn_reply = true;
2815 		list_del(&txmsg->next);
2816 	} else if (ret) {
2817 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2818 		mgr->is_waiting_for_dwn_reply = false;
2819 		list_del(&txmsg->next);
2820 		if (txmsg->seqno != -1)
2821 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2822 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2823 		DRM_WAKEUP_ALL(&mgr->tx_waitq, &mgr->qlock);
2824 	}
2825 }
2826 
2827 /* called holding qlock */
process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)2828 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2829 				       struct drm_dp_sideband_msg_tx *txmsg)
2830 {
2831 	int ret;
2832 
2833 	/* construct a chunk from the first msg in the tx_msg queue */
2834 	ret = process_single_tx_qlock(mgr, txmsg, true);
2835 
2836 	if (ret != 1)
2837 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2838 
2839 	if (txmsg->seqno != -1) {
2840 		WARN_ON((unsigned int)txmsg->seqno >
2841 			ARRAY_SIZE(txmsg->dst->tx_slots));
2842 		txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2843 	}
2844 }
2845 
drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_sideband_msg_tx * txmsg)2846 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2847 				 struct drm_dp_sideband_msg_tx *txmsg)
2848 {
2849 	mutex_lock(&mgr->qlock);
2850 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2851 
2852 	if (drm_debug_enabled(DRM_UT_DP)) {
2853 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2854 
2855 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2856 	}
2857 
2858 	if (list_is_singular(&mgr->tx_msg_downq) &&
2859 	    !mgr->is_waiting_for_dwn_reply)
2860 		process_single_down_tx_qlock(mgr);
2861 	mutex_unlock(&mgr->qlock);
2862 }
2863 
2864 static void
drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply * reply)2865 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2866 {
2867 	struct drm_dp_link_addr_reply_port *port_reply;
2868 	int i;
2869 
2870 	for (i = 0; i < reply->nports; i++) {
2871 		port_reply = &reply->ports[i];
2872 		DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2873 			      i,
2874 			      port_reply->input_port,
2875 			      port_reply->peer_device_type,
2876 			      port_reply->port_number,
2877 			      port_reply->dpcd_revision,
2878 			      port_reply->mcs,
2879 			      port_reply->ddps,
2880 			      port_reply->legacy_device_plug_status,
2881 			      port_reply->num_sdp_streams,
2882 			      port_reply->num_sdp_stream_sinks);
2883 	}
2884 }
2885 
drm_dp_send_link_address(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2886 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2887 				     struct drm_dp_mst_branch *mstb)
2888 {
2889 	struct drm_dp_sideband_msg_tx *txmsg;
2890 	struct drm_dp_link_address_ack_reply *reply;
2891 	struct drm_dp_mst_port *port, *tmp;
2892 	int i, len __unused, ret, port_mask = 0;
2893 	bool changed = false;
2894 
2895 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2896 	if (!txmsg)
2897 		return -ENOMEM;
2898 
2899 	txmsg->dst = mstb;
2900 	len = build_link_address(txmsg);
2901 
2902 	mstb->link_address_sent = true;
2903 	drm_dp_queue_down_tx(mgr, txmsg);
2904 
2905 	/* FIXME: Actually do some real error handling here */
2906 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2907 	if (ret <= 0) {
2908 		DRM_ERROR("Sending link address failed with %d\n", ret);
2909 		goto out;
2910 	}
2911 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2912 		DRM_ERROR("link address NAK received\n");
2913 		ret = -EIO;
2914 		goto out;
2915 	}
2916 
2917 	reply = &txmsg->reply.u.link_addr;
2918 	DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2919 	drm_dp_dump_link_address(reply);
2920 
2921 	drm_dp_check_mstb_guid(mstb, reply->guid);
2922 
2923 	for (i = 0; i < reply->nports; i++) {
2924 		port_mask |= BIT(reply->ports[i].port_number);
2925 		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2926 							  &reply->ports[i]);
2927 		if (ret == 1)
2928 			changed = true;
2929 		else if (ret < 0)
2930 			goto out;
2931 	}
2932 
2933 	/* Prune any ports that are currently a part of mstb in our in-memory
2934 	 * topology, but were not seen in this link address. Usually this
2935 	 * means that they were removed while the topology was out of sync,
2936 	 * e.g. during suspend/resume
2937 	 */
2938 	mutex_lock(&mgr->lock);
2939 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
2940 		if (port_mask & BIT(port->port_num))
2941 			continue;
2942 
2943 		DRM_DEBUG_KMS("port %d was not in link address, removing\n",
2944 			      port->port_num);
2945 		list_del(&port->next);
2946 		drm_dp_mst_topology_put_port(port);
2947 		changed = true;
2948 	}
2949 	mutex_unlock(&mgr->lock);
2950 
2951 out:
2952 	if (ret <= 0)
2953 		mstb->link_address_sent = false;
2954 	kfree(txmsg);
2955 	return ret < 0 ? ret : changed;
2956 }
2957 
drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb)2958 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
2959 					struct drm_dp_mst_branch *mstb)
2960 {
2961 	struct drm_dp_sideband_msg_tx *txmsg;
2962 	int len __unused, ret;
2963 
2964 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2965 	if (!txmsg)
2966 		return;
2967 
2968 	txmsg->dst = mstb;
2969 	len = build_clear_payload_id_table(txmsg);
2970 
2971 	drm_dp_queue_down_tx(mgr, txmsg);
2972 
2973 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2974 	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2975 		DRM_DEBUG_KMS("clear payload table id nak received\n");
2976 
2977 	kfree(txmsg);
2978 }
2979 
2980 static int
drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,struct drm_dp_mst_port * port)2981 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2982 				struct drm_dp_mst_branch *mstb,
2983 				struct drm_dp_mst_port *port)
2984 {
2985 	struct drm_dp_enum_path_resources_ack_reply *path_res;
2986 	struct drm_dp_sideband_msg_tx *txmsg;
2987 	int len __unused;
2988 	int ret;
2989 
2990 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2991 	if (!txmsg)
2992 		return -ENOMEM;
2993 
2994 	txmsg->dst = mstb;
2995 	len = build_enum_path_resources(txmsg, port->port_num);
2996 
2997 	drm_dp_queue_down_tx(mgr, txmsg);
2998 
2999 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3000 	if (ret > 0) {
3001 		path_res = &txmsg->reply.u.path_resources;
3002 
3003 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3004 			DRM_DEBUG_KMS("enum path resources nak received\n");
3005 		} else {
3006 			if (port->port_num != path_res->port_number)
3007 				DRM_ERROR("got incorrect port in response\n");
3008 
3009 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
3010 				      path_res->port_number,
3011 				      path_res->full_payload_bw_number,
3012 				      path_res->avail_payload_bw_number);
3013 			port->available_pbn =
3014 				path_res->avail_payload_bw_number;
3015 			port->fec_capable = path_res->fec_capable;
3016 		}
3017 	}
3018 
3019 	kfree(txmsg);
3020 	return 0;
3021 }
3022 
drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch * mstb)3023 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3024 {
3025 	if (!mstb->port_parent)
3026 		return NULL;
3027 
3028 	if (mstb->port_parent->mstb != mstb)
3029 		return mstb->port_parent;
3030 
3031 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3032 }
3033 
3034 /*
3035  * Searches upwards in the topology starting from mstb to try to find the
3036  * closest available parent of mstb that's still connected to the rest of the
3037  * topology. This can be used in order to perform operations like releasing
3038  * payloads, where the branch device which owned the payload may no longer be
3039  * around and thus would require that the payload on the last living relative
3040  * be freed instead.
3041  */
3042 static struct drm_dp_mst_branch *
drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int * port_num)3043 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3044 					struct drm_dp_mst_branch *mstb,
3045 					int *port_num)
3046 {
3047 	struct drm_dp_mst_branch *rmstb = NULL;
3048 	struct drm_dp_mst_port *found_port;
3049 
3050 	mutex_lock(&mgr->lock);
3051 	if (!mgr->mst_primary)
3052 		goto out;
3053 
3054 	do {
3055 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3056 		if (!found_port)
3057 			break;
3058 
3059 		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
3060 			rmstb = found_port->parent;
3061 			*port_num = found_port->port_num;
3062 		} else {
3063 			/* Search again, starting from this parent */
3064 			mstb = found_port->parent;
3065 		}
3066 	} while (!rmstb);
3067 out:
3068 	mutex_unlock(&mgr->lock);
3069 	return rmstb;
3070 }
3071 
drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,int pbn)3072 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3073 				   struct drm_dp_mst_port *port,
3074 				   int id,
3075 				   int pbn)
3076 {
3077 	struct drm_dp_sideband_msg_tx *txmsg;
3078 	struct drm_dp_mst_branch *mstb;
3079 	int len __unused, ret, port_num;
3080 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
3081 	int i;
3082 
3083 	port_num = port->port_num;
3084 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3085 	if (!mstb) {
3086 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
3087 							       port->parent,
3088 							       &port_num);
3089 
3090 		if (!mstb)
3091 			return -EINVAL;
3092 	}
3093 
3094 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3095 	if (!txmsg) {
3096 		ret = -ENOMEM;
3097 		goto fail_put;
3098 	}
3099 
3100 	for (i = 0; i < port->num_sdp_streams; i++)
3101 		sinks[i] = i;
3102 
3103 	txmsg->dst = mstb;
3104 	len = build_allocate_payload(txmsg, port_num,
3105 				     id,
3106 				     pbn, port->num_sdp_streams, sinks);
3107 
3108 	drm_dp_queue_down_tx(mgr, txmsg);
3109 
3110 	/*
3111 	 * FIXME: there is a small chance that between getting the last
3112 	 * connected mstb and sending the payload message, the last connected
3113 	 * mstb could also be removed from the topology. In the future, this
3114 	 * needs to be fixed by restarting the
3115 	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
3116 	 * timeout if the topology is still connected to the system.
3117 	 */
3118 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3119 	if (ret > 0) {
3120 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3121 			ret = -EINVAL;
3122 		else
3123 			ret = 0;
3124 	}
3125 	kfree(txmsg);
3126 fail_put:
3127 	drm_dp_mst_topology_put_mstb(mstb);
3128 	return ret;
3129 }
3130 
drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,bool power_up)3131 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
3132 				 struct drm_dp_mst_port *port, bool power_up)
3133 {
3134 	struct drm_dp_sideband_msg_tx *txmsg;
3135 	int len __unused, ret;
3136 
3137 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
3138 	if (!port)
3139 		return -EINVAL;
3140 
3141 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3142 	if (!txmsg) {
3143 		drm_dp_mst_topology_put_port(port);
3144 		return -ENOMEM;
3145 	}
3146 
3147 	txmsg->dst = port->parent;
3148 	len = build_power_updown_phy(txmsg, port->port_num, power_up);
3149 	drm_dp_queue_down_tx(mgr, txmsg);
3150 
3151 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
3152 	if (ret > 0) {
3153 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3154 			ret = -EINVAL;
3155 		else
3156 			ret = 0;
3157 	}
3158 	kfree(txmsg);
3159 	drm_dp_mst_topology_put_port(port);
3160 
3161 	return ret;
3162 }
3163 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
3164 
drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr * mgr,int id,struct drm_dp_payload * payload)3165 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3166 				       int id,
3167 				       struct drm_dp_payload *payload)
3168 {
3169 	int ret;
3170 
3171 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
3172 	if (ret < 0) {
3173 		payload->payload_state = 0;
3174 		return ret;
3175 	}
3176 	payload->payload_state = DP_PAYLOAD_LOCAL;
3177 	return 0;
3178 }
3179 
drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,struct drm_dp_payload * payload)3180 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3181 				       struct drm_dp_mst_port *port,
3182 				       int id,
3183 				       struct drm_dp_payload *payload)
3184 {
3185 	int ret;
3186 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
3187 	if (ret < 0)
3188 		return ret;
3189 	payload->payload_state = DP_PAYLOAD_REMOTE;
3190 	return ret;
3191 }
3192 
drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int id,struct drm_dp_payload * payload)3193 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
3194 					struct drm_dp_mst_port *port,
3195 					int id,
3196 					struct drm_dp_payload *payload)
3197 {
3198 	DRM_DEBUG_KMS("\n");
3199 	/* it's okay for these to fail */
3200 	if (port) {
3201 		drm_dp_payload_send_msg(mgr, port, id, 0);
3202 	}
3203 
3204 	drm_dp_dpcd_write_payload(mgr, id, payload);
3205 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
3206 	return 0;
3207 }
3208 
drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr * mgr,int id,struct drm_dp_payload * payload)3209 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
3210 					int id,
3211 					struct drm_dp_payload *payload)
3212 {
3213 	payload->payload_state = 0;
3214 	return 0;
3215 }
3216 
3217 /**
3218  * drm_dp_update_payload_part1() - Execute payload update part 1
3219  * @mgr: manager to use.
3220  *
3221  * This iterates over all proposed virtual channels, and tries to
3222  * allocate space in the link for them. For 0->slots transitions,
3223  * this step just writes the VCPI to the MST device. For slots->0
3224  * transitions, this writes the updated VCPIs and removes the
3225  * remote VC payloads.
3226  *
3227  * after calling this the driver should generate ACT and payload
3228  * packets.
3229  */
drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr * mgr)3230 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
3231 {
3232 	struct drm_dp_payload req_payload;
3233 	struct drm_dp_mst_port *port;
3234 	int i, j;
3235 	int cur_slots = 1;
3236 
3237 	mutex_lock(&mgr->payload_lock);
3238 	for (i = 0; i < mgr->max_payloads; i++) {
3239 		struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3240 		struct drm_dp_payload *payload = &mgr->payloads[i];
3241 		bool put_port = false;
3242 
3243 		/* solve the current payloads - compare to the hw ones
3244 		   - update the hw view */
3245 		req_payload.start_slot = cur_slots;
3246 		if (vcpi) {
3247 			port = container_of(vcpi, struct drm_dp_mst_port,
3248 					    vcpi);
3249 
3250 			/* Validated ports don't matter if we're releasing
3251 			 * VCPI
3252 			 */
3253 			if (vcpi->num_slots) {
3254 				port = drm_dp_mst_topology_get_port_validated(
3255 				    mgr, port);
3256 				if (!port) {
3257 					mutex_unlock(&mgr->payload_lock);
3258 					return -EINVAL;
3259 				}
3260 				put_port = true;
3261 			}
3262 
3263 			req_payload.num_slots = vcpi->num_slots;
3264 			req_payload.vcpi = vcpi->vcpi;
3265 		} else {
3266 			port = NULL;
3267 			req_payload.num_slots = 0;
3268 		}
3269 
3270 		payload->start_slot = req_payload.start_slot;
3271 		/* work out what is required to happen with this payload */
3272 		if (payload->num_slots != req_payload.num_slots) {
3273 
3274 			/* need to push an update for this payload */
3275 			if (req_payload.num_slots) {
3276 				drm_dp_create_payload_step1(mgr, vcpi->vcpi,
3277 							    &req_payload);
3278 				payload->num_slots = req_payload.num_slots;
3279 				payload->vcpi = req_payload.vcpi;
3280 
3281 			} else if (payload->num_slots) {
3282 				payload->num_slots = 0;
3283 				drm_dp_destroy_payload_step1(mgr, port,
3284 							     payload->vcpi,
3285 							     payload);
3286 				req_payload.payload_state =
3287 					payload->payload_state;
3288 				payload->start_slot = 0;
3289 			}
3290 			payload->payload_state = req_payload.payload_state;
3291 		}
3292 		cur_slots += req_payload.num_slots;
3293 
3294 		if (put_port)
3295 			drm_dp_mst_topology_put_port(port);
3296 	}
3297 
3298 	for (i = 0; i < mgr->max_payloads; /* do nothing */) {
3299 		if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
3300 			i++;
3301 			continue;
3302 		}
3303 
3304 		DRM_DEBUG_KMS("removing payload %d\n", i);
3305 		for (j = i; j < mgr->max_payloads - 1; j++) {
3306 			mgr->payloads[j] = mgr->payloads[j + 1];
3307 			mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
3308 
3309 			if (mgr->proposed_vcpis[j] &&
3310 			    mgr->proposed_vcpis[j]->num_slots) {
3311 				set_bit(j + 1, &mgr->payload_mask);
3312 			} else {
3313 				clear_bit(j + 1, &mgr->payload_mask);
3314 			}
3315 		}
3316 
3317 		memset(&mgr->payloads[mgr->max_payloads - 1], 0,
3318 		       sizeof(struct drm_dp_payload));
3319 		mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
3320 		clear_bit(mgr->max_payloads, &mgr->payload_mask);
3321 	}
3322 	mutex_unlock(&mgr->payload_lock);
3323 
3324 	return 0;
3325 }
3326 EXPORT_SYMBOL(drm_dp_update_payload_part1);
3327 
3328 /**
3329  * drm_dp_update_payload_part2() - Execute payload update part 2
3330  * @mgr: manager to use.
3331  *
3332  * This iterates over all proposed virtual channels, and tries to
3333  * allocate space in the link for them. For 0->slots transitions,
3334  * this step writes the remote VC payload commands. For slots->0
3335  * this just resets some internal state.
3336  */
drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr * mgr)3337 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
3338 {
3339 	struct drm_dp_mst_port *port;
3340 	int i;
3341 	int ret = 0;
3342 	mutex_lock(&mgr->payload_lock);
3343 	for (i = 0; i < mgr->max_payloads; i++) {
3344 
3345 		if (!mgr->proposed_vcpis[i])
3346 			continue;
3347 
3348 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3349 
3350 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
3351 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
3352 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3353 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
3354 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
3355 		}
3356 		if (ret) {
3357 			mutex_unlock(&mgr->payload_lock);
3358 			return ret;
3359 		}
3360 	}
3361 	mutex_unlock(&mgr->payload_lock);
3362 	return 0;
3363 }
3364 EXPORT_SYMBOL(drm_dp_update_payload_part2);
3365 
drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)3366 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
3367 				 struct drm_dp_mst_port *port,
3368 				 int offset, int size, u8 *bytes)
3369 {
3370 	int len __unused;
3371 	int ret = 0;
3372 	struct drm_dp_sideband_msg_tx *txmsg;
3373 	struct drm_dp_mst_branch *mstb;
3374 
3375 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3376 	if (!mstb)
3377 		return -EINVAL;
3378 
3379 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3380 	if (!txmsg) {
3381 		ret = -ENOMEM;
3382 		goto fail_put;
3383 	}
3384 
3385 	len = build_dpcd_read(txmsg, port->port_num, offset, size);
3386 	txmsg->dst = port->parent;
3387 
3388 	drm_dp_queue_down_tx(mgr, txmsg);
3389 
3390 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3391 	if (ret < 0)
3392 		goto fail_free;
3393 
3394 	/* DPCD read should never be NACKed */
3395 	if (txmsg->reply.reply_type == 1) {
3396 		DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
3397 			  mstb, port->port_num, offset, size);
3398 		ret = -EIO;
3399 		goto fail_free;
3400 	}
3401 
3402 	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
3403 		ret = -EPROTO;
3404 		goto fail_free;
3405 	}
3406 
3407 	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
3408 		    size);
3409 	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
3410 
3411 fail_free:
3412 	kfree(txmsg);
3413 fail_put:
3414 	drm_dp_mst_topology_put_mstb(mstb);
3415 
3416 	return ret;
3417 }
3418 
drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int offset,int size,u8 * bytes)3419 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
3420 				  struct drm_dp_mst_port *port,
3421 				  int offset, int size, u8 *bytes)
3422 {
3423 	int len __unused;
3424 	int ret;
3425 	struct drm_dp_sideband_msg_tx *txmsg;
3426 	struct drm_dp_mst_branch *mstb;
3427 
3428 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3429 	if (!mstb)
3430 		return -EINVAL;
3431 
3432 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3433 	if (!txmsg) {
3434 		ret = -ENOMEM;
3435 		goto fail_put;
3436 	}
3437 
3438 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
3439 	txmsg->dst = mstb;
3440 
3441 	drm_dp_queue_down_tx(mgr, txmsg);
3442 
3443 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3444 	if (ret > 0) {
3445 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3446 			ret = -EIO;
3447 		else
3448 			ret = 0;
3449 	}
3450 	kfree(txmsg);
3451 fail_put:
3452 	drm_dp_mst_topology_put_mstb(mstb);
3453 	return ret;
3454 }
3455 
drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx * msg,u8 req_type)3456 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
3457 {
3458 	struct drm_dp_sideband_msg_reply_body reply;
3459 
3460 	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
3461 	reply.req_type = req_type;
3462 	drm_dp_encode_sideband_reply(&reply, msg);
3463 	return 0;
3464 }
3465 
drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_branch * mstb,int req_type,int seqno,bool broadcast)3466 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3467 				    struct drm_dp_mst_branch *mstb,
3468 				    int req_type, int seqno, bool broadcast)
3469 {
3470 	struct drm_dp_sideband_msg_tx *txmsg;
3471 
3472 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3473 	if (!txmsg)
3474 		return -ENOMEM;
3475 
3476 	txmsg->dst = mstb;
3477 	txmsg->seqno = seqno;
3478 	drm_dp_encode_up_ack_reply(txmsg, req_type);
3479 
3480 	mutex_lock(&mgr->qlock);
3481 
3482 	process_single_up_tx_qlock(mgr, txmsg);
3483 
3484 	mutex_unlock(&mgr->qlock);
3485 
3486 	kfree(txmsg);
3487 	return 0;
3488 }
3489 
drm_dp_get_vc_payload_bw(u8 dp_link_bw,u8 dp_link_count)3490 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
3491 {
3492 	if (dp_link_bw == 0 || dp_link_count == 0)
3493 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
3494 			      dp_link_bw, dp_link_count);
3495 
3496 	return dp_link_bw * dp_link_count / 2;
3497 }
3498 
3499 /**
3500  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
3501  * @mgr: manager to set state for
3502  * @mst_state: true to enable MST on this connector - false to disable.
3503  *
3504  * This is called by the driver when it detects an MST capable device plugged
3505  * into a DP MST capable port, or when a DP MST capable device is unplugged.
3506  */
drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr * mgr,bool mst_state)3507 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
3508 {
3509 	int ret = 0;
3510 	int i = 0;
3511 	struct drm_dp_mst_branch *mstb = NULL;
3512 
3513 	mutex_lock(&mgr->lock);
3514 	if (mst_state == mgr->mst_state)
3515 		goto out_unlock;
3516 
3517 	mgr->mst_state = mst_state;
3518 	/* set the device into MST mode */
3519 	if (mst_state) {
3520 		WARN_ON(mgr->mst_primary);
3521 
3522 		/* get dpcd info */
3523 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3524 		if (ret != DP_RECEIVER_CAP_SIZE) {
3525 			DRM_DEBUG_KMS("failed to read DPCD\n");
3526 			goto out_unlock;
3527 		}
3528 
3529 		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3530 							mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3531 		if (mgr->pbn_div == 0) {
3532 			ret = -EINVAL;
3533 			goto out_unlock;
3534 		}
3535 
3536 		/* add initial branch device at LCT 1 */
3537 		mstb = drm_dp_add_mst_branch_device(1, NULL);
3538 		if (mstb == NULL) {
3539 			ret = -ENOMEM;
3540 			goto out_unlock;
3541 		}
3542 		mstb->mgr = mgr;
3543 
3544 		/* give this the main reference */
3545 		mgr->mst_primary = mstb;
3546 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3547 
3548 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3549 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3550 		if (ret < 0) {
3551 			goto out_unlock;
3552 		}
3553 
3554 		{
3555 			struct drm_dp_payload reset_pay;
3556 			reset_pay.start_slot = 0;
3557 			reset_pay.num_slots = 0x3f;
3558 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3559 		}
3560 
3561 		queue_work(system_long_wq, &mgr->work);
3562 
3563 		ret = 0;
3564 	} else {
3565 		/* disable MST on the device */
3566 		mstb = mgr->mst_primary;
3567 		mgr->mst_primary = NULL;
3568 		/* this can fail if the device is gone */
3569 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3570 		ret = 0;
3571 		mutex_lock(&mgr->payload_lock);
3572 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3573 		mgr->payload_mask = 0;
3574 		set_bit(0, &mgr->payload_mask);
3575 		for (i = 0; i < mgr->max_payloads; i++) {
3576 			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
3577 
3578 			if (vcpi) {
3579 				vcpi->vcpi = 0;
3580 				vcpi->num_slots = 0;
3581 			}
3582 			mgr->proposed_vcpis[i] = NULL;
3583 		}
3584 		mgr->vcpi_mask = 0;
3585 		mutex_unlock(&mgr->payload_lock);
3586 
3587 		mgr->payload_id_table_cleared = false;
3588 	}
3589 
3590 out_unlock:
3591 	mutex_unlock(&mgr->lock);
3592 	if (mstb)
3593 		drm_dp_mst_topology_put_mstb(mstb);
3594 	return ret;
3595 
3596 }
3597 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3598 
3599 static void
drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch * mstb)3600 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
3601 {
3602 	struct drm_dp_mst_port *port;
3603 
3604 	/* The link address will need to be re-sent on resume */
3605 	mstb->link_address_sent = false;
3606 
3607 	list_for_each_entry(port, &mstb->ports, next) {
3608 		/* The PBN for each port will also need to be re-probed */
3609 		port->available_pbn = 0;
3610 
3611 		if (port->mstb)
3612 			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
3613 	}
3614 }
3615 
3616 /**
3617  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3618  * @mgr: manager to suspend
3619  *
3620  * This function tells the MST device that we can't handle UP messages
3621  * anymore. This should stop it from sending any since we are suspended.
3622  */
drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr * mgr)3623 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3624 {
3625 	mutex_lock(&mgr->lock);
3626 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3627 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
3628 	mutex_unlock(&mgr->lock);
3629 	flush_work(&mgr->up_req_work);
3630 	flush_work(&mgr->work);
3631 	flush_work(&mgr->delayed_destroy_work);
3632 
3633 	mutex_lock(&mgr->lock);
3634 	if (mgr->mst_state && mgr->mst_primary)
3635 		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
3636 	mutex_unlock(&mgr->lock);
3637 }
3638 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3639 
3640 /**
3641  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3642  * @mgr: manager to resume
3643  * @sync: whether or not to perform topology reprobing synchronously
3644  *
3645  * This will fetch DPCD and see if the device is still there,
3646  * if it is, it will rewrite the MSTM control bits, and return.
3647  *
3648  * If the device fails this returns -1, and the driver should do
3649  * a full MST reprobe, in case we were undocked.
3650  *
3651  * During system resume (where it is assumed that the driver will be calling
3652  * drm_atomic_helper_resume()) this function should be called beforehand with
3653  * @sync set to true. In contexts like runtime resume where the driver is not
3654  * expected to be calling drm_atomic_helper_resume(), this function should be
3655  * called with @sync set to false in order to avoid deadlocking.
3656  *
3657  * Returns: -1 if the MST topology was removed while we were suspended, 0
3658  * otherwise.
3659  */
drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr * mgr,bool sync)3660 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
3661 				   bool sync)
3662 {
3663 	int ret;
3664 	u8 guid[16];
3665 
3666 	mutex_lock(&mgr->lock);
3667 	if (!mgr->mst_primary)
3668 		goto out_fail;
3669 
3670 	ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
3671 			       DP_RECEIVER_CAP_SIZE);
3672 	if (ret != DP_RECEIVER_CAP_SIZE) {
3673 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3674 		goto out_fail;
3675 	}
3676 
3677 	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3678 				 DP_MST_EN |
3679 				 DP_UP_REQ_EN |
3680 				 DP_UPSTREAM_IS_SRC);
3681 	if (ret < 0) {
3682 		DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3683 		goto out_fail;
3684 	}
3685 
3686 	/* Some hubs forget their guids after they resume */
3687 	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3688 	if (ret != 16) {
3689 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3690 		goto out_fail;
3691 	}
3692 	drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3693 
3694 	/*
3695 	 * For the final step of resuming the topology, we need to bring the
3696 	 * state of our in-memory topology back into sync with reality. So,
3697 	 * restart the probing process as if we're probing a new hub
3698 	 */
3699 	queue_work(system_long_wq, &mgr->work);
3700 	mutex_unlock(&mgr->lock);
3701 
3702 	if (sync) {
3703 		DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
3704 		flush_work(&mgr->work);
3705 	}
3706 
3707 	return 0;
3708 
3709 out_fail:
3710 	mutex_unlock(&mgr->lock);
3711 	return -1;
3712 }
3713 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3714 
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr * mgr,bool up)3715 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3716 {
3717 	int len;
3718 	u8 replyblock[32];
3719 	int replylen, origlen __unused, curreply;
3720 	int ret;
3721 	struct drm_dp_sideband_msg_rx *msg;
3722 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3723 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3724 
3725 	len = min(mgr->max_dpcd_transaction_bytes, 16);
3726 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
3727 			       replyblock, len);
3728 	if (ret != len) {
3729 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3730 		return false;
3731 	}
3732 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3733 	if (!ret) {
3734 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3735 		return false;
3736 	}
3737 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3738 
3739 	origlen = replylen;
3740 	replylen -= len;
3741 	curreply = len;
3742 	while (replylen > 0) {
3743 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3744 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3745 				    replyblock, len);
3746 		if (ret != len) {
3747 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3748 				      len, ret);
3749 			return false;
3750 		}
3751 
3752 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3753 		if (!ret) {
3754 			DRM_DEBUG_KMS("failed to build sideband msg\n");
3755 			return false;
3756 		}
3757 
3758 		curreply += len;
3759 		replylen -= len;
3760 	}
3761 	return true;
3762 }
3763 
drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr * mgr)3764 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3765 {
3766 	struct drm_dp_sideband_msg_tx *txmsg;
3767 	struct drm_dp_mst_branch *mstb;
3768 	struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3769 	int slot = -1;
3770 
3771 	if (!drm_dp_get_one_sb_msg(mgr, false))
3772 		goto clear_down_rep_recv;
3773 
3774 	if (!mgr->down_rep_recv.have_eomt)
3775 		return 0;
3776 
3777 	mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3778 	if (!mstb) {
3779 		DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3780 			      hdr->lct);
3781 		goto clear_down_rep_recv;
3782 	}
3783 
3784 	/* find the message */
3785 	slot = hdr->seqno;
3786 	mutex_lock(&mgr->qlock);
3787 	txmsg = mstb->tx_slots[slot];
3788 	/* remove from slots */
3789 	mutex_unlock(&mgr->qlock);
3790 
3791 	if (!txmsg) {
3792 		DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3793 			      mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3794 			      mgr->down_rep_recv.msg[0]);
3795 		goto no_msg;
3796 	}
3797 
3798 	drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3799 
3800 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3801 		DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3802 			      txmsg->reply.req_type,
3803 			      drm_dp_mst_req_type_str(txmsg->reply.req_type),
3804 			      txmsg->reply.u.nak.reason,
3805 			      drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3806 			      txmsg->reply.u.nak.nak_data);
3807 
3808 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3809 	drm_dp_mst_topology_put_mstb(mstb);
3810 
3811 	mutex_lock(&mgr->qlock);
3812 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3813 	mstb->tx_slots[slot] = NULL;
3814 	mgr->is_waiting_for_dwn_reply = false;
3815 	DRM_WAKEUP_ALL(&mgr->tx_waitq, &mgr->qlock);
3816 	mutex_unlock(&mgr->qlock);
3817 
3818 	return 0;
3819 
3820 no_msg:
3821 	drm_dp_mst_topology_put_mstb(mstb);
3822 clear_down_rep_recv:
3823 	mutex_lock(&mgr->qlock);
3824 	mgr->is_waiting_for_dwn_reply = false;
3825 	mutex_unlock(&mgr->qlock);
3826 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3827 
3828 	return 0;
3829 }
3830 
3831 static inline bool
drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_pending_up_req * up_req)3832 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
3833 			  struct drm_dp_pending_up_req *up_req)
3834 {
3835 	struct drm_dp_mst_branch *mstb = NULL;
3836 	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
3837 	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
3838 	bool hotplug = false;
3839 
3840 	if (hdr->broadcast) {
3841 		const u8 *guid = NULL;
3842 
3843 		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
3844 			guid = msg->u.conn_stat.guid;
3845 		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
3846 			guid = msg->u.resource_stat.guid;
3847 
3848 		if (guid)
3849 			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3850 	} else {
3851 		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3852 	}
3853 
3854 	if (!mstb) {
3855 		DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3856 			      hdr->lct);
3857 		return false;
3858 	}
3859 
3860 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
3861 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
3862 		drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
3863 		hotplug = true;
3864 	}
3865 
3866 	drm_dp_mst_topology_put_mstb(mstb);
3867 	return hotplug;
3868 }
3869 
drm_dp_mst_up_req_work(struct work_struct * work)3870 static void drm_dp_mst_up_req_work(struct work_struct *work)
3871 {
3872 	struct drm_dp_mst_topology_mgr *mgr =
3873 		container_of(work, struct drm_dp_mst_topology_mgr,
3874 			     up_req_work);
3875 	struct drm_dp_pending_up_req *up_req;
3876 	bool send_hotplug = false;
3877 
3878 	mutex_lock(&mgr->probe_lock);
3879 	while (true) {
3880 		mutex_lock(&mgr->up_req_lock);
3881 		up_req = list_first_entry_or_null(&mgr->up_req_list,
3882 						  struct drm_dp_pending_up_req,
3883 						  next);
3884 		if (up_req)
3885 			list_del(&up_req->next);
3886 		mutex_unlock(&mgr->up_req_lock);
3887 
3888 		if (!up_req)
3889 			break;
3890 
3891 		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
3892 		kfree(up_req);
3893 	}
3894 	mutex_unlock(&mgr->probe_lock);
3895 
3896 	if (send_hotplug)
3897 		drm_kms_helper_hotplug_event(mgr->dev);
3898 }
3899 
drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr * mgr)3900 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3901 {
3902 	struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3903 	struct drm_dp_pending_up_req *up_req;
3904 	bool seqno;
3905 
3906 	if (!drm_dp_get_one_sb_msg(mgr, true))
3907 		goto out;
3908 
3909 	if (!mgr->up_req_recv.have_eomt)
3910 		return 0;
3911 
3912 	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
3913 	if (!up_req) {
3914 		DRM_ERROR("Not enough memory to process MST up req\n");
3915 		return -ENOMEM;
3916 	}
3917 	INIT_LIST_HEAD(&up_req->next);
3918 
3919 	seqno = hdr->seqno;
3920 	drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
3921 
3922 	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
3923 	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
3924 		DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
3925 			      up_req->msg.req_type);
3926 		kfree(up_req);
3927 		goto out;
3928 	}
3929 
3930 	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
3931 				 seqno, false);
3932 
3933 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3934 		const struct drm_dp_connection_status_notify *conn_stat =
3935 			&up_req->msg.u.conn_stat;
3936 
3937 		DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3938 			      conn_stat->port_number,
3939 			      conn_stat->legacy_device_plug_status,
3940 			      conn_stat->displayport_device_plug_status,
3941 			      conn_stat->message_capability_status,
3942 			      conn_stat->input_port,
3943 			      conn_stat->peer_device_type);
3944 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3945 		const struct drm_dp_resource_status_notify *res_stat =
3946 			&up_req->msg.u.resource_stat;
3947 
3948 		DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3949 			      res_stat->port_number,
3950 			      res_stat->available_pbn);
3951 	}
3952 
3953 	up_req->hdr = *hdr;
3954 	mutex_lock(&mgr->up_req_lock);
3955 	list_add_tail(&up_req->next, &mgr->up_req_list);
3956 	mutex_unlock(&mgr->up_req_lock);
3957 	queue_work(system_long_wq, &mgr->up_req_work);
3958 
3959 out:
3960 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3961 	return 0;
3962 }
3963 
3964 /**
3965  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3966  * @mgr: manager to notify irq for.
3967  * @esi: 4 bytes from SINK_COUNT_ESI
3968  * @handled: whether the hpd interrupt was consumed or not
3969  *
3970  * This should be called from the driver when it detects a short IRQ,
3971  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3972  * topology manager will process the sideband messages received as a result
3973  * of this.
3974  */
drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr * mgr,u8 * esi,bool * handled)3975 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3976 {
3977 	int ret = 0;
3978 	int sc;
3979 	*handled = false;
3980 	sc = esi[0] & 0x3f;
3981 
3982 	if (sc != mgr->sink_count) {
3983 		mgr->sink_count = sc;
3984 		*handled = true;
3985 	}
3986 
3987 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3988 		ret = drm_dp_mst_handle_down_rep(mgr);
3989 		*handled = true;
3990 	}
3991 
3992 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
3993 		ret |= drm_dp_mst_handle_up_req(mgr);
3994 		*handled = true;
3995 	}
3996 
3997 	drm_dp_mst_kick_tx(mgr);
3998 	return ret;
3999 }
4000 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
4001 
4002 /**
4003  * drm_dp_mst_detect_port() - get connection status for an MST port
4004  * @connector: DRM connector for this port
4005  * @ctx: The acquisition context to use for grabbing locks
4006  * @mgr: manager for this port
4007  * @port: pointer to a port
4008  *
4009  * This returns the current connection state for a port.
4010  */
4011 int
drm_dp_mst_detect_port(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4012 drm_dp_mst_detect_port(struct drm_connector *connector,
4013 		       struct drm_modeset_acquire_ctx *ctx,
4014 		       struct drm_dp_mst_topology_mgr *mgr,
4015 		       struct drm_dp_mst_port *port)
4016 {
4017 	int ret;
4018 
4019 	/* we need to search for the port in the mgr in case it's gone */
4020 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4021 	if (!port)
4022 		return connector_status_disconnected;
4023 
4024 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
4025 	if (ret)
4026 		goto out;
4027 
4028 	ret = connector_status_disconnected;
4029 
4030 	if (!port->ddps)
4031 		goto out;
4032 
4033 	switch (port->pdt) {
4034 	case DP_PEER_DEVICE_NONE:
4035 	case DP_PEER_DEVICE_MST_BRANCHING:
4036 		if (!port->mcs)
4037 			ret = connector_status_connected;
4038 		break;
4039 
4040 	case DP_PEER_DEVICE_SST_SINK:
4041 		ret = connector_status_connected;
4042 		/* for logical ports - cache the EDID */
4043 		if (port->port_num >= 8 && !port->cached_edid) {
4044 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
4045 		}
4046 		break;
4047 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
4048 		if (port->ldps)
4049 			ret = connector_status_connected;
4050 		break;
4051 	}
4052 out:
4053 	drm_dp_mst_topology_put_port(port);
4054 	return ret;
4055 }
4056 EXPORT_SYMBOL(drm_dp_mst_detect_port);
4057 
4058 /**
4059  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
4060  * @mgr: manager for this port
4061  * @port: unverified pointer to a port.
4062  *
4063  * This returns whether the port supports audio or not.
4064  */
drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4065 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
4066 					struct drm_dp_mst_port *port)
4067 {
4068 	bool ret = false;
4069 
4070 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4071 	if (!port)
4072 		return ret;
4073 	ret = port->has_audio;
4074 	drm_dp_mst_topology_put_port(port);
4075 	return ret;
4076 }
4077 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
4078 
4079 /**
4080  * drm_dp_mst_get_edid() - get EDID for an MST port
4081  * @connector: toplevel connector to get EDID for
4082  * @mgr: manager for this port
4083  * @port: unverified pointer to a port.
4084  *
4085  * This returns an EDID for the port connected to a connector,
4086  * It validates the pointer still exists so the caller doesn't require a
4087  * reference.
4088  */
drm_dp_mst_get_edid(struct drm_connector * connector,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4089 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4090 {
4091 	struct edid *edid = NULL;
4092 
4093 	/* we need to search for the port in the mgr in case it's gone */
4094 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4095 	if (!port)
4096 		return NULL;
4097 
4098 	if (port->cached_edid)
4099 		edid = drm_edid_duplicate(port->cached_edid);
4100 	else {
4101 		edid = drm_get_edid(connector, &port->aux.ddc);
4102 	}
4103 	port->has_audio = drm_detect_monitor_audio(edid);
4104 	drm_dp_mst_topology_put_port(port);
4105 	return edid;
4106 }
4107 EXPORT_SYMBOL(drm_dp_mst_get_edid);
4108 
4109 /**
4110  * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
4111  * @mgr: manager to use
4112  * @pbn: payload bandwidth to convert into slots.
4113  *
4114  * Calculate the number of VCPI slots that will be required for the given PBN
4115  * value. This function is deprecated, and should not be used in atomic
4116  * drivers.
4117  *
4118  * RETURNS:
4119  * The total slots required for this port, or error.
4120  */
drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr * mgr,int pbn)4121 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
4122 			   int pbn)
4123 {
4124 	int num_slots;
4125 
4126 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
4127 
4128 	/* max. time slots - one slot for MTP header */
4129 	if (num_slots > 63)
4130 		return -ENOSPC;
4131 	return num_slots;
4132 }
4133 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
4134 
drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_vcpi * vcpi,int pbn,int slots)4135 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4136 			    struct drm_dp_vcpi *vcpi, int pbn, int slots)
4137 {
4138 	int ret;
4139 
4140 	/* max. time slots - one slot for MTP header */
4141 	if (slots > 63)
4142 		return -ENOSPC;
4143 
4144 	vcpi->pbn = pbn;
4145 	vcpi->aligned_pbn = slots * mgr->pbn_div;
4146 	vcpi->num_slots = slots;
4147 
4148 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
4149 	if (ret < 0)
4150 		return ret;
4151 	return 0;
4152 }
4153 
4154 /**
4155  * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
4156  * @state: global atomic state
4157  * @mgr: MST topology manager for the port
4158  * @port: port to find vcpi slots for
4159  * @pbn: bandwidth required for the mode in PBN
4160  * @pbn_div: divider for DSC mode that takes FEC into account
4161  *
4162  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
4163  * may have had. Any atomic drivers which support MST must call this function
4164  * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
4165  * current VCPI allocation for the new state, but only when
4166  * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
4167  * to ensure compatibility with userspace applications that still use the
4168  * legacy modesetting UAPI.
4169  *
4170  * Allocations set by this function are not checked against the bandwidth
4171  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
4172  *
4173  * Additionally, it is OK to call this function multiple times on the same
4174  * @port as needed. It is not OK however, to call this function and
4175  * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
4176  *
4177  * See also:
4178  * drm_dp_atomic_release_vcpi_slots()
4179  * drm_dp_mst_atomic_check()
4180  *
4181  * Returns:
4182  * Total slots in the atomic state assigned for this port, or a negative error
4183  * code if the port no longer exists
4184  */
drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int pbn,int pbn_div)4185 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
4186 				  struct drm_dp_mst_topology_mgr *mgr,
4187 				  struct drm_dp_mst_port *port, int pbn,
4188 				  int pbn_div)
4189 {
4190 	struct drm_dp_mst_topology_state *topology_state;
4191 	struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
4192 	int prev_slots, prev_bw, req_slots;
4193 
4194 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4195 	if (IS_ERR(topology_state))
4196 		return PTR_ERR(topology_state);
4197 
4198 	/* Find the current allocation for this port, if any */
4199 	list_for_each_entry(pos, &topology_state->vcpis, next) {
4200 		if (pos->port == port) {
4201 			vcpi = pos;
4202 			prev_slots = vcpi->vcpi;
4203 			prev_bw = vcpi->pbn;
4204 
4205 			/*
4206 			 * This should never happen, unless the driver tries
4207 			 * releasing and allocating the same VCPI allocation,
4208 			 * which is an error
4209 			 */
4210 			if (WARN_ON(!prev_slots)) {
4211 				DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
4212 					  port);
4213 				return -EINVAL;
4214 			}
4215 
4216 			break;
4217 		}
4218 	}
4219 	if (!vcpi) {
4220 		prev_slots = 0;
4221 		prev_bw = 0;
4222 	}
4223 
4224 	if (pbn_div <= 0)
4225 		pbn_div = mgr->pbn_div;
4226 
4227 	req_slots = DIV_ROUND_UP(pbn, pbn_div);
4228 
4229 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
4230 			 port->connector->base.id, port->connector->name,
4231 			 port, prev_slots, req_slots);
4232 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
4233 			 port->connector->base.id, port->connector->name,
4234 			 port, prev_bw, pbn);
4235 
4236 	/* Add the new allocation to the state */
4237 	if (!vcpi) {
4238 		vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
4239 		if (!vcpi)
4240 			return -ENOMEM;
4241 
4242 		drm_dp_mst_get_port_malloc(port);
4243 		vcpi->port = port;
4244 		list_add(&vcpi->next, &topology_state->vcpis);
4245 	}
4246 	vcpi->vcpi = req_slots;
4247 	vcpi->pbn = pbn;
4248 
4249 	return req_slots;
4250 }
4251 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
4252 
4253 /**
4254  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
4255  * @state: global atomic state
4256  * @mgr: MST topology manager for the port
4257  * @port: The port to release the VCPI slots from
4258  *
4259  * Releases any VCPI slots that have been allocated to a port in the atomic
4260  * state. Any atomic drivers which support MST must call this function in
4261  * their &drm_connector_helper_funcs.atomic_check() callback when the
4262  * connector will no longer have VCPI allocated (e.g. because its CRTC was
4263  * removed) when it had VCPI allocated in the previous atomic state.
4264  *
4265  * It is OK to call this even if @port has been removed from the system.
4266  * Additionally, it is OK to call this function multiple times on the same
4267  * @port as needed. It is not OK however, to call this function and
4268  * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
4269  * phase.
4270  *
4271  * See also:
4272  * drm_dp_atomic_find_vcpi_slots()
4273  * drm_dp_mst_atomic_check()
4274  *
4275  * Returns:
4276  * 0 if all slots for this port were added back to
4277  * &drm_dp_mst_topology_state.avail_slots or negative error code
4278  */
drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4279 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
4280 				     struct drm_dp_mst_topology_mgr *mgr,
4281 				     struct drm_dp_mst_port *port)
4282 {
4283 	struct drm_dp_mst_topology_state *topology_state;
4284 	struct drm_dp_vcpi_allocation *pos;
4285 	bool found = false;
4286 
4287 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
4288 	if (IS_ERR(topology_state))
4289 		return PTR_ERR(topology_state);
4290 
4291 	list_for_each_entry(pos, &topology_state->vcpis, next) {
4292 		if (pos->port == port) {
4293 			found = true;
4294 			break;
4295 		}
4296 	}
4297 	if (WARN_ON(!found)) {
4298 		DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
4299 			  port, &topology_state->base);
4300 		return -EINVAL;
4301 	}
4302 
4303 	DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
4304 	if (pos->vcpi) {
4305 		drm_dp_mst_put_port_malloc(port);
4306 		pos->vcpi = 0;
4307 	}
4308 
4309 	return 0;
4310 }
4311 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
4312 
4313 /**
4314  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
4315  * @mgr: manager for this port
4316  * @port: port to allocate a virtual channel for.
4317  * @pbn: payload bandwidth number to request
4318  * @slots: returned number of slots for this PBN.
4319  */
drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,int pbn,int slots)4320 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4321 			      struct drm_dp_mst_port *port, int pbn, int slots)
4322 {
4323 	int ret;
4324 
4325 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4326 	if (!port)
4327 		return false;
4328 
4329 	if (slots < 0)
4330 		return false;
4331 
4332 	if (port->vcpi.vcpi > 0) {
4333 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
4334 			      port->vcpi.vcpi, port->vcpi.pbn, pbn);
4335 		if (pbn == port->vcpi.pbn) {
4336 			drm_dp_mst_topology_put_port(port);
4337 			return true;
4338 		}
4339 	}
4340 
4341 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
4342 	if (ret) {
4343 		DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
4344 			      DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
4345 		goto out;
4346 	}
4347 	DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
4348 		      pbn, port->vcpi.num_slots);
4349 
4350 	/* Keep port allocated until its payload has been removed */
4351 	drm_dp_mst_get_port_malloc(port);
4352 	drm_dp_mst_topology_put_port(port);
4353 	return true;
4354 out:
4355 	return false;
4356 }
4357 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
4358 
drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4359 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4360 {
4361 	int slots = 0;
4362 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
4363 	if (!port)
4364 		return slots;
4365 
4366 	slots = port->vcpi.num_slots;
4367 	drm_dp_mst_topology_put_port(port);
4368 	return slots;
4369 }
4370 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
4371 
4372 /**
4373  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
4374  * @mgr: manager for this port
4375  * @port: unverified pointer to a port.
4376  *
4377  * This just resets the number of slots for the ports VCPI for later programming.
4378  */
drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4379 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
4380 {
4381 	/*
4382 	 * A port with VCPI will remain allocated until its VCPI is
4383 	 * released, no verified ref needed
4384 	 */
4385 
4386 	port->vcpi.num_slots = 0;
4387 }
4388 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
4389 
4390 /**
4391  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
4392  * @mgr: manager for this port
4393  * @port: port to deallocate vcpi for
4394  *
4395  * This can be called unconditionally, regardless of whether
4396  * drm_dp_mst_allocate_vcpi() succeeded or not.
4397  */
drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port)4398 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
4399 				struct drm_dp_mst_port *port)
4400 {
4401 	if (!port->vcpi.vcpi)
4402 		return;
4403 
4404 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
4405 	port->vcpi.num_slots = 0;
4406 	port->vcpi.pbn = 0;
4407 	port->vcpi.aligned_pbn = 0;
4408 	port->vcpi.vcpi = 0;
4409 	drm_dp_mst_put_port_malloc(port);
4410 }
4411 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
4412 
drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr * mgr,int id,struct drm_dp_payload * payload)4413 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
4414 				     int id, struct drm_dp_payload *payload)
4415 {
4416 	u8 payload_alloc[3], status;
4417 	int ret;
4418 	int retries = 0;
4419 
4420 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
4421 			   DP_PAYLOAD_TABLE_UPDATED);
4422 
4423 	payload_alloc[0] = id;
4424 	payload_alloc[1] = payload->start_slot;
4425 	payload_alloc[2] = payload->num_slots;
4426 
4427 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
4428 	if (ret != 3) {
4429 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
4430 		goto fail;
4431 	}
4432 
4433 retry:
4434 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4435 	if (ret < 0) {
4436 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4437 		goto fail;
4438 	}
4439 
4440 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
4441 		retries++;
4442 		if (retries < 20) {
4443 			usleep_range(10000, 20000);
4444 			goto retry;
4445 		}
4446 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
4447 		ret = -EINVAL;
4448 		goto fail;
4449 	}
4450 	ret = 0;
4451 fail:
4452 	return ret;
4453 }
4454 
4455 
4456 /**
4457  * drm_dp_check_act_status() - Check ACT handled status.
4458  * @mgr: manager to use
4459  *
4460  * Check the payload status bits in the DPCD for ACT handled completion.
4461  */
drm_dp_check_act_status(struct drm_dp_mst_topology_mgr * mgr)4462 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
4463 {
4464 	u8 status;
4465 	int ret;
4466 	int count = 0;
4467 
4468 	do {
4469 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
4470 
4471 		if (ret < 0) {
4472 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
4473 			goto fail;
4474 		}
4475 
4476 		if (status & DP_PAYLOAD_ACT_HANDLED)
4477 			break;
4478 		count++;
4479 		udelay(100);
4480 
4481 	} while (count < 30);
4482 
4483 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
4484 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
4485 		ret = -EINVAL;
4486 		goto fail;
4487 	}
4488 	return 0;
4489 fail:
4490 	return ret;
4491 }
4492 EXPORT_SYMBOL(drm_dp_check_act_status);
4493 
4494 /**
4495  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4496  * @clock: dot clock for the mode
4497  * @bpp: bpp for the mode.
4498  * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
4499  *
4500  * This uses the formula in the spec to calculate the PBN value for a mode.
4501  */
drm_dp_calc_pbn_mode(int clock,int bpp,bool dsc)4502 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
4503 {
4504 	/*
4505 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
4506 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
4507 	 * common multiplier to render an integer PBN for all link rate/lane
4508 	 * counts combinations
4509 	 * calculate
4510 	 * peak_kbps *= (1006/1000)
4511 	 * peak_kbps *= (64/54)
4512 	 * peak_kbps *= 8    convert to bytes
4513 	 *
4514 	 * If the bpp is in units of 1/16, further divide by 16. Put this
4515 	 * factor in the numerator rather than the denominator to avoid
4516 	 * integer overflow
4517 	 */
4518 
4519 	if (dsc)
4520 		return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
4521 					8 * 54 * 1000 * 1000);
4522 
4523 	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
4524 				8 * 54 * 1000 * 1000);
4525 }
4526 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
4527 
4528 /* we want to kick the TX after we've ack the up/down IRQs. */
drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr * mgr)4529 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
4530 {
4531 	queue_work(system_long_wq, &mgr->tx_work);
4532 }
4533 
4534 #if IS_ENABLED(CONFIG_DEBUG_FS)
drm_dp_mst_dump_mstb(struct seq_file * m,struct drm_dp_mst_branch * mstb)4535 static void drm_dp_mst_dump_mstb(struct seq_file *m,
4536 				 struct drm_dp_mst_branch *mstb)
4537 {
4538 	struct drm_dp_mst_port *port;
4539 	int tabs = mstb->lct;
4540 	char prefix[10];
4541 	int i;
4542 
4543 	for (i = 0; i < tabs; i++)
4544 		prefix[i] = '\t';
4545 	prefix[i] = '\0';
4546 
4547 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
4548 	list_for_each_entry(port, &mstb->ports, next) {
4549 		seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
4550 		if (port->mstb)
4551 			drm_dp_mst_dump_mstb(m, port->mstb);
4552 	}
4553 }
4554 
4555 #define DP_PAYLOAD_TABLE_SIZE		64
4556 
dump_dp_payload_table(struct drm_dp_mst_topology_mgr * mgr,char * buf)4557 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
4558 				  char *buf)
4559 {
4560 	int i;
4561 
4562 	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
4563 		if (drm_dp_dpcd_read(mgr->aux,
4564 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
4565 				     &buf[i], 16) != 16)
4566 			return false;
4567 	}
4568 	return true;
4569 }
4570 
fetch_monitor_name(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,char * name,int namelen)4571 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
4572 			       struct drm_dp_mst_port *port, char *name,
4573 			       int namelen)
4574 {
4575 	struct edid *mst_edid;
4576 
4577 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
4578 	drm_edid_get_monitor_name(mst_edid, name, namelen);
4579 }
4580 
4581 /**
4582  * drm_dp_mst_dump_topology(): dump topology to seq file.
4583  * @m: seq_file to dump output to
4584  * @mgr: manager to dump current topology for.
4585  *
4586  * helper to dump MST topology to a seq file for debugfs.
4587  */
drm_dp_mst_dump_topology(struct seq_file * m,struct drm_dp_mst_topology_mgr * mgr)4588 void drm_dp_mst_dump_topology(struct seq_file *m,
4589 			      struct drm_dp_mst_topology_mgr *mgr)
4590 {
4591 	int i;
4592 	struct drm_dp_mst_port *port;
4593 
4594 	mutex_lock(&mgr->lock);
4595 	if (mgr->mst_primary)
4596 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
4597 
4598 	/* dump VCPIs */
4599 	mutex_unlock(&mgr->lock);
4600 
4601 	mutex_lock(&mgr->payload_lock);
4602 	seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
4603 		mgr->max_payloads);
4604 
4605 	for (i = 0; i < mgr->max_payloads; i++) {
4606 		if (mgr->proposed_vcpis[i]) {
4607 			char name[14];
4608 
4609 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
4610 			fetch_monitor_name(mgr, port, name, sizeof(name));
4611 			seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
4612 				   port->port_num, port->vcpi.vcpi,
4613 				   port->vcpi.num_slots,
4614 				   (*name != 0) ? name :  "Unknown");
4615 		} else
4616 			seq_printf(m, "vcpi %d:unused\n", i);
4617 	}
4618 	for (i = 0; i < mgr->max_payloads; i++) {
4619 		seq_printf(m, "payload %d: %d, %d, %d\n",
4620 			   i,
4621 			   mgr->payloads[i].payload_state,
4622 			   mgr->payloads[i].start_slot,
4623 			   mgr->payloads[i].num_slots);
4624 
4625 
4626 	}
4627 	mutex_unlock(&mgr->payload_lock);
4628 
4629 	mutex_lock(&mgr->lock);
4630 	if (mgr->mst_primary) {
4631 		u8 buf[DP_PAYLOAD_TABLE_SIZE];
4632 		int ret;
4633 
4634 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
4635 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
4636 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
4637 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
4638 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
4639 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
4640 
4641 		/* dump the standard OUI branch header */
4642 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
4643 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
4644 		for (i = 0x3; i < 0x8 && buf[i]; i++)
4645 			seq_printf(m, "%c", buf[i]);
4646 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
4647 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
4648 		if (dump_dp_payload_table(mgr, buf))
4649 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
4650 	}
4651 
4652 	mutex_unlock(&mgr->lock);
4653 
4654 }
4655 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
4656 #endif	/* IS_ENABLED(CONFIG_DEBUG_FS) */
4657 
drm_dp_tx_work(struct work_struct * work)4658 static void drm_dp_tx_work(struct work_struct *work)
4659 {
4660 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
4661 
4662 	mutex_lock(&mgr->qlock);
4663 	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
4664 		process_single_down_tx_qlock(mgr);
4665 	mutex_unlock(&mgr->qlock);
4666 }
4667 
4668 static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port * port)4669 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
4670 {
4671 	if (port->connector)
4672 		port->mgr->cbs->destroy_connector(port->mgr, port->connector);
4673 
4674 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
4675 	drm_dp_mst_put_port_malloc(port);
4676 }
4677 
4678 static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch * mstb)4679 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
4680 {
4681 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
4682 	struct drm_dp_mst_port *port, *tmp;
4683 	bool wake_tx = false;
4684 
4685 	mutex_lock(&mgr->lock);
4686 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
4687 		list_del(&port->next);
4688 		drm_dp_mst_topology_put_port(port);
4689 	}
4690 	mutex_unlock(&mgr->lock);
4691 
4692 	/* drop any tx slots msg */
4693 	mutex_lock(&mstb->mgr->qlock);
4694 	if (mstb->tx_slots[0]) {
4695 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4696 		mstb->tx_slots[0] = NULL;
4697 		wake_tx = true;
4698 	}
4699 	if (mstb->tx_slots[1]) {
4700 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
4701 		mstb->tx_slots[1] = NULL;
4702 		wake_tx = true;
4703 	}
4704 	if (wake_tx)
4705 		DRM_WAKEUP_ALL(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
4706 	mutex_unlock(&mstb->mgr->qlock);
4707 
4708 	drm_dp_mst_put_mstb_malloc(mstb);
4709 }
4710 
drm_dp_delayed_destroy_work(struct work_struct * work)4711 static void drm_dp_delayed_destroy_work(struct work_struct *work)
4712 {
4713 	struct drm_dp_mst_topology_mgr *mgr =
4714 		container_of(work, struct drm_dp_mst_topology_mgr,
4715 			     delayed_destroy_work);
4716 	bool send_hotplug = false, go_again;
4717 
4718 	/*
4719 	 * Not a regular list traverse as we have to drop the destroy
4720 	 * connector lock before destroying the mstb/port, to avoid AB->BA
4721 	 * ordering between this lock and the config mutex.
4722 	 */
4723 	do {
4724 		go_again = false;
4725 
4726 		for (;;) {
4727 			struct drm_dp_mst_branch *mstb;
4728 
4729 			mutex_lock(&mgr->delayed_destroy_lock);
4730 			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
4731 							struct drm_dp_mst_branch,
4732 							destroy_next);
4733 			if (mstb)
4734 				list_del(&mstb->destroy_next);
4735 			mutex_unlock(&mgr->delayed_destroy_lock);
4736 
4737 			if (!mstb)
4738 				break;
4739 
4740 			drm_dp_delayed_destroy_mstb(mstb);
4741 			go_again = true;
4742 		}
4743 
4744 		for (;;) {
4745 			struct drm_dp_mst_port *port;
4746 
4747 			mutex_lock(&mgr->delayed_destroy_lock);
4748 			port = list_first_entry_or_null(&mgr->destroy_port_list,
4749 							struct drm_dp_mst_port,
4750 							next);
4751 			if (port)
4752 				list_del(&port->next);
4753 			mutex_unlock(&mgr->delayed_destroy_lock);
4754 
4755 			if (!port)
4756 				break;
4757 
4758 			drm_dp_delayed_destroy_port(port);
4759 			send_hotplug = true;
4760 			go_again = true;
4761 		}
4762 	} while (go_again);
4763 
4764 	if (send_hotplug)
4765 		drm_kms_helper_hotplug_event(mgr->dev);
4766 }
4767 
4768 static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj * obj)4769 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4770 {
4771 	struct drm_dp_mst_topology_state *state, *old_state =
4772 		to_dp_mst_topology_state(obj->state);
4773 	struct drm_dp_vcpi_allocation *pos, *vcpi;
4774 
4775 	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4776 	if (!state)
4777 		return NULL;
4778 
4779 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4780 
4781 	INIT_LIST_HEAD(&state->vcpis);
4782 
4783 	list_for_each_entry(pos, &old_state->vcpis, next) {
4784 		/* Prune leftover freed VCPI allocations */
4785 		if (!pos->vcpi)
4786 			continue;
4787 
4788 		vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4789 		if (!vcpi)
4790 			goto fail;
4791 
4792 		drm_dp_mst_get_port_malloc(vcpi->port);
4793 		list_add(&vcpi->next, &state->vcpis);
4794 	}
4795 
4796 	return &state->base;
4797 
4798 fail:
4799 	list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4800 		drm_dp_mst_put_port_malloc(pos->port);
4801 		kfree(pos);
4802 	}
4803 	kfree(state);
4804 
4805 	return NULL;
4806 }
4807 
drm_dp_mst_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)4808 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4809 				     struct drm_private_state *state)
4810 {
4811 	struct drm_dp_mst_topology_state *mst_state =
4812 		to_dp_mst_topology_state(state);
4813 	struct drm_dp_vcpi_allocation *pos, *tmp;
4814 
4815 	list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4816 		/* We only keep references to ports with non-zero VCPIs */
4817 		if (pos->vcpi)
4818 			drm_dp_mst_put_port_malloc(pos->port);
4819 		kfree(pos);
4820 	}
4821 
4822 	kfree(mst_state);
4823 }
4824 
drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port * port,struct drm_dp_mst_branch * branch)4825 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
4826 						 struct drm_dp_mst_branch *branch)
4827 {
4828 	while (port->parent) {
4829 		if (port->parent == branch)
4830 			return true;
4831 
4832 		if (port->parent->port_parent)
4833 			port = port->parent->port_parent;
4834 		else
4835 			break;
4836 	}
4837 	return false;
4838 }
4839 
4840 static inline
drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch * branch,struct drm_dp_mst_topology_state * mst_state)4841 int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
4842 				     struct drm_dp_mst_topology_state *mst_state)
4843 {
4844 	struct drm_dp_mst_port *port;
4845 	struct drm_dp_vcpi_allocation *vcpi;
4846 	int pbn_limit = 0, pbn_used = 0;
4847 
4848 	list_for_each_entry(port, &branch->ports, next) {
4849 		if (port->mstb)
4850 			if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
4851 				return -ENOSPC;
4852 
4853 		if (port->available_pbn > 0)
4854 			pbn_limit = port->available_pbn;
4855 	}
4856 	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
4857 			 branch, pbn_limit);
4858 
4859 	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4860 		if (!vcpi->pbn)
4861 			continue;
4862 
4863 		if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
4864 			pbn_used += vcpi->pbn;
4865 	}
4866 	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
4867 			 branch, pbn_used);
4868 
4869 	if (pbn_used > pbn_limit) {
4870 		DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
4871 				 branch);
4872 		return -ENOSPC;
4873 	}
4874 	return 0;
4875 }
4876 
4877 static inline int
drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_topology_state * mst_state)4878 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
4879 					 struct drm_dp_mst_topology_state *mst_state)
4880 {
4881 	struct drm_dp_vcpi_allocation *vcpi;
4882 	int avail_slots = 63, payload_count = 0;
4883 
4884 	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4885 		/* Releasing VCPI is always OK-even if the port is gone */
4886 		if (!vcpi->vcpi) {
4887 			DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4888 					 vcpi->port);
4889 			continue;
4890 		}
4891 
4892 		DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4893 				 vcpi->port, vcpi->vcpi);
4894 
4895 		avail_slots -= vcpi->vcpi;
4896 		if (avail_slots < 0) {
4897 			DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4898 					 vcpi->port, mst_state,
4899 					 avail_slots + vcpi->vcpi);
4900 			return -ENOSPC;
4901 		}
4902 
4903 		if (++payload_count > mgr->max_payloads) {
4904 			DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4905 					 mgr, mst_state, mgr->max_payloads);
4906 			return -EINVAL;
4907 		}
4908 	}
4909 	DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4910 			 mgr, mst_state, avail_slots,
4911 			 63 - avail_slots);
4912 
4913 	return 0;
4914 }
4915 
4916 /**
4917  * drm_dp_mst_add_affected_dsc_crtcs
4918  * @state: Pointer to the new struct drm_dp_mst_topology_state
4919  * @mgr: MST topology manager
4920  *
4921  * Whenever there is a change in mst topology
4922  * DSC configuration would have to be recalculated
4923  * therefore we need to trigger modeset on all affected
4924  * CRTCs in that topology
4925  *
4926  * See also:
4927  * drm_dp_mst_atomic_enable_dsc()
4928  */
drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)4929 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
4930 {
4931 	struct drm_dp_mst_topology_state *mst_state;
4932 	struct drm_dp_vcpi_allocation *pos;
4933 	struct drm_connector *connector;
4934 	struct drm_connector_state *conn_state;
4935 	struct drm_crtc *crtc;
4936 	struct drm_crtc_state *crtc_state;
4937 
4938 	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
4939 
4940 	if (IS_ERR(mst_state))
4941 		return -EINVAL;
4942 
4943 	list_for_each_entry(pos, &mst_state->vcpis, next) {
4944 
4945 		connector = pos->port->connector;
4946 
4947 		if (!connector)
4948 			return -EINVAL;
4949 
4950 		conn_state = drm_atomic_get_connector_state(state, connector);
4951 
4952 		if (IS_ERR(conn_state))
4953 			return PTR_ERR(conn_state);
4954 
4955 		crtc = conn_state->crtc;
4956 
4957 		if (WARN_ON(!crtc))
4958 			return -EINVAL;
4959 
4960 		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
4961 			continue;
4962 
4963 		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
4964 
4965 		if (IS_ERR(crtc_state))
4966 			return PTR_ERR(crtc_state);
4967 
4968 		DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
4969 				 mgr, crtc);
4970 
4971 		crtc_state->mode_changed = true;
4972 	}
4973 	return 0;
4974 }
4975 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
4976 
4977 /**
4978  * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
4979  * @state: Pointer to the new drm_atomic_state
4980  * @port: Pointer to the affected MST Port
4981  * @pbn: Newly recalculated bw required for link with DSC enabled
4982  * @pbn_div: Divider to calculate correct number of pbn per slot
4983  * @enable: Boolean flag to enable or disable DSC on the port
4984  *
4985  * This function enables DSC on the given Port
4986  * by recalculating its vcpi from pbn provided
4987  * and sets dsc_enable flag to keep track of which
4988  * ports have DSC enabled
4989  *
4990  */
drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state * state,struct drm_dp_mst_port * port,int pbn,int pbn_div,bool enable)4991 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
4992 				 struct drm_dp_mst_port *port,
4993 				 int pbn, int pbn_div,
4994 				 bool enable)
4995 {
4996 	struct drm_dp_mst_topology_state *mst_state;
4997 	struct drm_dp_vcpi_allocation *pos;
4998 	bool found = false;
4999 	int vcpi = 0;
5000 
5001 	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
5002 
5003 	if (IS_ERR(mst_state))
5004 		return PTR_ERR(mst_state);
5005 
5006 	list_for_each_entry(pos, &mst_state->vcpis, next) {
5007 		if (pos->port == port) {
5008 			found = true;
5009 			break;
5010 		}
5011 	}
5012 
5013 	if (!found) {
5014 		DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
5015 				 port, mst_state);
5016 		return -EINVAL;
5017 	}
5018 
5019 	if (pos->dsc_enabled == enable) {
5020 		DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
5021 				 port, enable, pos->vcpi);
5022 		vcpi = pos->vcpi;
5023 	}
5024 
5025 	if (enable) {
5026 		vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
5027 		DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
5028 				 port, vcpi);
5029 		if (vcpi < 0)
5030 			return -EINVAL;
5031 	}
5032 
5033 	pos->dsc_enabled = enable;
5034 
5035 	return vcpi;
5036 }
5037 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
5038 /**
5039  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
5040  * atomic update is valid
5041  * @state: Pointer to the new &struct drm_dp_mst_topology_state
5042  *
5043  * Checks the given topology state for an atomic update to ensure that it's
5044  * valid. This includes checking whether there's enough bandwidth to support
5045  * the new VCPI allocations in the atomic update.
5046  *
5047  * Any atomic drivers supporting DP MST must make sure to call this after
5048  * checking the rest of their state in their
5049  * &drm_mode_config_funcs.atomic_check() callback.
5050  *
5051  * See also:
5052  * drm_dp_atomic_find_vcpi_slots()
5053  * drm_dp_atomic_release_vcpi_slots()
5054  *
5055  * Returns:
5056  *
5057  * 0 if the new state is valid, negative error code otherwise.
5058  */
drm_dp_mst_atomic_check(struct drm_atomic_state * state)5059 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
5060 {
5061 	struct drm_dp_mst_topology_mgr *mgr;
5062 	struct drm_dp_mst_topology_state *mst_state;
5063 	int i, ret = 0;
5064 
5065 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
5066 		if (!mgr->mst_state)
5067 			continue;
5068 
5069 		ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
5070 		if (ret)
5071 			break;
5072 		ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
5073 		if (ret)
5074 			break;
5075 	}
5076 
5077 	return ret;
5078 }
5079 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
5080 
5081 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
5082 	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
5083 	.atomic_destroy_state = drm_dp_mst_destroy_state,
5084 };
5085 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
5086 
5087 /**
5088  * drm_atomic_get_mst_topology_state: get MST topology state
5089  *
5090  * @state: global atomic state
5091  * @mgr: MST topology manager, also the private object in this case
5092  *
5093  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
5094  * state vtable so that the private object state returned is that of a MST
5095  * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
5096  * to care of the locking, so warn if don't hold the connection_mutex.
5097  *
5098  * RETURNS:
5099  *
5100  * The MST topology state or error pointer.
5101  */
drm_atomic_get_mst_topology_state(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr * mgr)5102 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
5103 								    struct drm_dp_mst_topology_mgr *mgr)
5104 {
5105 	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
5106 }
5107 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
5108 
5109 /**
5110  * drm_dp_mst_topology_mgr_init - initialise a topology manager
5111  * @mgr: manager struct to initialise
5112  * @dev: device providing this structure - for i2c addition.
5113  * @aux: DP helper aux channel to talk to this device
5114  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
5115  * @max_payloads: maximum number of payloads this GPU can source
5116  * @conn_base_id: the connector object ID the MST device is connected to.
5117  *
5118  * Return 0 for success, or negative error code on failure
5119  */
drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr * mgr,struct drm_device * dev,struct drm_dp_aux * aux,int max_dpcd_transaction_bytes,int max_payloads,int conn_base_id)5120 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
5121 				 struct drm_device *dev, struct drm_dp_aux *aux,
5122 				 int max_dpcd_transaction_bytes,
5123 				 int max_payloads, int conn_base_id)
5124 {
5125 	struct drm_dp_mst_topology_state *mst_state;
5126 
5127 	mutex_init(&mgr->lock);
5128 	mutex_init(&mgr->qlock);
5129 	mutex_init(&mgr->payload_lock);
5130 	mutex_init(&mgr->delayed_destroy_lock);
5131 	mutex_init(&mgr->up_req_lock);
5132 	mutex_init(&mgr->probe_lock);
5133 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5134 	mutex_init(&mgr->topology_ref_history_lock);
5135 #endif
5136 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
5137 	INIT_LIST_HEAD(&mgr->destroy_port_list);
5138 	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
5139 	INIT_LIST_HEAD(&mgr->up_req_list);
5140 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
5141 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
5142 	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
5143 	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
5144 	DRM_INIT_WAITQUEUE(&mgr->tx_waitq, "dpmstwait");
5145 	mgr->dev = dev;
5146 	mgr->aux = aux;
5147 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
5148 	mgr->max_payloads = max_payloads;
5149 	mgr->conn_base_id = conn_base_id;
5150 	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
5151 	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
5152 		return -EINVAL;
5153 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
5154 	if (!mgr->payloads)
5155 		return -ENOMEM;
5156 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
5157 	if (!mgr->proposed_vcpis)
5158 		return -ENOMEM;
5159 	set_bit(0, &mgr->payload_mask);
5160 
5161 	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
5162 	if (mst_state == NULL)
5163 		return -ENOMEM;
5164 
5165 	mst_state->mgr = mgr;
5166 	INIT_LIST_HEAD(&mst_state->vcpis);
5167 
5168 	drm_atomic_private_obj_init(dev, &mgr->base,
5169 				    &mst_state->base,
5170 				    &drm_dp_mst_topology_state_funcs);
5171 
5172 	return 0;
5173 }
5174 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
5175 
5176 /**
5177  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
5178  * @mgr: manager to destroy
5179  */
drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr * mgr)5180 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
5181 {
5182 	drm_dp_mst_topology_mgr_set_mst(mgr, false);
5183 	flush_work(&mgr->work);
5184 	cancel_work_sync(&mgr->delayed_destroy_work);
5185 	mutex_lock(&mgr->payload_lock);
5186 	kfree(mgr->payloads);
5187 	mgr->payloads = NULL;
5188 	kfree(mgr->proposed_vcpis);
5189 	mgr->proposed_vcpis = NULL;
5190 	mutex_unlock(&mgr->payload_lock);
5191 	mgr->dev = NULL;
5192 	mgr->aux = NULL;
5193 	drm_atomic_private_obj_fini(&mgr->base);
5194 	mgr->funcs = NULL;
5195 
5196 	DRM_DESTROY_WAITQUEUE(&mgr->tx_waitq);
5197 	mutex_destroy(&mgr->delayed_destroy_lock);
5198 	mutex_destroy(&mgr->payload_lock);
5199 	mutex_destroy(&mgr->qlock);
5200 	mutex_destroy(&mgr->lock);
5201 	mutex_destroy(&mgr->up_req_lock);
5202 	mutex_destroy(&mgr->probe_lock);
5203 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
5204 	mutex_destroy(&mgr->topology_ref_history_lock);
5205 #endif
5206 }
5207 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
5208 
remote_i2c_read_ok(const struct i2c_msg msgs[],int num)5209 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
5210 {
5211 	int i;
5212 
5213 	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
5214 		return false;
5215 
5216 	for (i = 0; i < num - 1; i++) {
5217 		if (msgs[i].flags & I2C_M_RD ||
5218 		    msgs[i].len > 0xff)
5219 			return false;
5220 	}
5221 
5222 	return msgs[num - 1].flags & I2C_M_RD &&
5223 		msgs[num - 1].len <= 0xff;
5224 }
5225 
5226 /* I2C device */
drm_dp_mst_i2c_xfer(struct i2c_adapter * adapter,struct i2c_msg * msgs,int num)5227 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
5228 			       int num)
5229 {
5230 	struct drm_dp_aux *aux = adapter->algo_data;
5231 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
5232 	struct drm_dp_mst_branch *mstb;
5233 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
5234 	unsigned int i;
5235 	struct drm_dp_sideband_msg_req_body msg;
5236 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
5237 	int ret;
5238 
5239 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
5240 	if (!mstb)
5241 		return -EREMOTEIO;
5242 
5243 	if (!remote_i2c_read_ok(msgs, num)) {
5244 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
5245 		ret = -EIO;
5246 		goto out;
5247 	}
5248 
5249 	memset(&msg, 0, sizeof(msg));
5250 	msg.req_type = DP_REMOTE_I2C_READ;
5251 	msg.u.i2c_read.num_transactions = num - 1;
5252 	msg.u.i2c_read.port_number = port->port_num;
5253 	for (i = 0; i < num - 1; i++) {
5254 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
5255 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
5256 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
5257 		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
5258 	}
5259 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
5260 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
5261 
5262 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
5263 	if (!txmsg) {
5264 		ret = -ENOMEM;
5265 		goto out;
5266 	}
5267 
5268 	txmsg->dst = mstb;
5269 	drm_dp_encode_sideband_req(&msg, txmsg);
5270 
5271 	drm_dp_queue_down_tx(mgr, txmsg);
5272 
5273 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
5274 	if (ret > 0) {
5275 
5276 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
5277 			ret = -EREMOTEIO;
5278 			goto out;
5279 		}
5280 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
5281 			ret = -EIO;
5282 			goto out;
5283 		}
5284 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
5285 		ret = num;
5286 	}
5287 out:
5288 	kfree(txmsg);
5289 	drm_dp_mst_topology_put_mstb(mstb);
5290 	return ret;
5291 }
5292 
drm_dp_mst_i2c_functionality(struct i2c_adapter * adapter)5293 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
5294 {
5295 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
5296 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
5297 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
5298 	       I2C_FUNC_10BIT_ADDR;
5299 }
5300 
5301 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
5302 	.functionality = drm_dp_mst_i2c_functionality,
5303 	.master_xfer = drm_dp_mst_i2c_xfer,
5304 };
5305 
5306 /**
5307  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
5308  * @aux: DisplayPort AUX channel
5309  *
5310  * Returns 0 on success or a negative error code on failure.
5311  */
drm_dp_mst_register_i2c_bus(struct drm_dp_aux * aux)5312 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
5313 {
5314 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
5315 	aux->ddc.algo_data = aux;
5316 	aux->ddc.retries = 3;
5317 
5318 	aux->ddc.class = I2C_CLASS_DDC;
5319 	aux->ddc.owner = THIS_MODULE;
5320 	aux->ddc.dev.parent = aux->dev;
5321 #ifndef __NetBSD__		/* XXX of? */
5322 	aux->ddc.dev.of_node = aux->dev->of_node;
5323 #endif
5324 
5325 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
5326 		sizeof(aux->ddc.name));
5327 
5328 	return i2c_add_adapter(&aux->ddc);
5329 }
5330 
5331 /**
5332  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
5333  * @aux: DisplayPort AUX channel
5334  */
drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux * aux)5335 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
5336 {
5337 	i2c_del_adapter(&aux->ddc);
5338 }
5339 
5340 /**
5341  * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
5342  * @port: The port to check
5343  *
5344  * A single physical MST hub object can be represented in the topology
5345  * by multiple branches, with virtual ports between those branches.
5346  *
5347  * As of DP1.4, An MST hub with internal (virtual) ports must expose
5348  * certain DPCD registers over those ports. See sections 2.6.1.1.1
5349  * and 2.6.1.1.2 of Display Port specification v1.4 for details.
5350  *
5351  * May acquire mgr->lock
5352  *
5353  * Returns:
5354  * true if the port is a virtual DP peer device, false otherwise
5355  */
drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port * port)5356 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
5357 {
5358 	struct drm_dp_mst_port *downstream_port;
5359 
5360 	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
5361 		return false;
5362 
5363 	/* Virtual DP Sink (Internal Display Panel) */
5364 	if (port->port_num >= 8)
5365 		return true;
5366 
5367 	/* DP-to-HDMI Protocol Converter */
5368 	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
5369 	    !port->mcs &&
5370 	    port->ldps)
5371 		return true;
5372 
5373 	/* DP-to-DP */
5374 	mutex_lock(&port->mgr->lock);
5375 	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
5376 	    port->mstb &&
5377 	    port->mstb->num_ports == 2) {
5378 		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
5379 			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
5380 			    !downstream_port->input) {
5381 				mutex_unlock(&port->mgr->lock);
5382 				return true;
5383 			}
5384 		}
5385 	}
5386 	mutex_unlock(&port->mgr->lock);
5387 
5388 	return false;
5389 }
5390 
5391 /**
5392  * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
5393  * @port: The port to check. A leaf of the MST tree with an attached display.
5394  *
5395  * Depending on the situation, DSC may be enabled via the endpoint aux,
5396  * the immediately upstream aux, or the connector's physical aux.
5397  *
5398  * This is both the correct aux to read DSC_CAPABILITY and the
5399  * correct aux to write DSC_ENABLED.
5400  *
5401  * This operation can be expensive (up to four aux reads), so
5402  * the caller should cache the return.
5403  *
5404  * Returns:
5405  * NULL if DSC cannot be enabled on this port, otherwise the aux device
5406  */
drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port * port)5407 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
5408 {
5409 	struct drm_dp_mst_port *immediate_upstream_port;
5410 	struct drm_dp_mst_port *fec_port;
5411 	struct drm_dp_desc desc = { 0 };
5412 	u8 endpoint_fec;
5413 	u8 endpoint_dsc;
5414 
5415 	if (!port)
5416 		return NULL;
5417 
5418 	if (port->parent->port_parent)
5419 		immediate_upstream_port = port->parent->port_parent;
5420 	else
5421 		immediate_upstream_port = NULL;
5422 
5423 	fec_port = immediate_upstream_port;
5424 	while (fec_port) {
5425 		/*
5426 		 * Each physical link (i.e. not a virtual port) between the
5427 		 * output and the primary device must support FEC
5428 		 */
5429 		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
5430 		    !fec_port->fec_capable)
5431 			return NULL;
5432 
5433 		fec_port = fec_port->parent->port_parent;
5434 	}
5435 
5436 	/* DP-to-DP peer device */
5437 	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
5438 		u8 upstream_dsc;
5439 
5440 		if (drm_dp_dpcd_read(&port->aux,
5441 				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5442 			return NULL;
5443 		if (drm_dp_dpcd_read(&port->aux,
5444 				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5445 			return NULL;
5446 		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
5447 				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
5448 			return NULL;
5449 
5450 		/* Enpoint decompression with DP-to-DP peer device */
5451 		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5452 		    (endpoint_fec & DP_FEC_CAPABLE) &&
5453 		    (upstream_dsc & 0x2) /* DSC passthrough */)
5454 			return &port->aux;
5455 
5456 		/* Virtual DPCD decompression with DP-to-DP peer device */
5457 		return &immediate_upstream_port->aux;
5458 	}
5459 
5460 	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
5461 	if (drm_dp_mst_is_virtual_dpcd(port))
5462 		return &port->aux;
5463 
5464 	/*
5465 	 * Synaptics quirk
5466 	 * Applies to ports for which:
5467 	 * - Physical aux has Synaptics OUI
5468 	 * - DPv1.4 or higher
5469 	 * - Port is on primary branch device
5470 	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
5471 	 */
5472 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
5473 		return NULL;
5474 
5475 	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
5476 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
5477 	    port->parent == port->mgr->mst_primary) {
5478 		u8 downstreamport;
5479 
5480 		if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
5481 				     &downstreamport, 1) < 0)
5482 			return NULL;
5483 
5484 		if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
5485 		   ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
5486 		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
5487 			return port->mgr->aux;
5488 	}
5489 
5490 	/*
5491 	 * The check below verifies if the MST sink
5492 	 * connected to the GPU is capable of DSC -
5493 	 * therefore the endpoint needs to be
5494 	 * both DSC and FEC capable.
5495 	 */
5496 	if (drm_dp_dpcd_read(&port->aux,
5497 	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
5498 		return NULL;
5499 	if (drm_dp_dpcd_read(&port->aux,
5500 	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
5501 		return NULL;
5502 	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
5503 	   (endpoint_fec & DP_FEC_CAPABLE))
5504 		return &port->aux;
5505 
5506 	return NULL;
5507 }
5508 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
5509