1 /*
2  * Copyright © 2014 Red Hat.
3  *
4  * Permission to use, copy, modify, distribute, and sell this software and its
5  * documentation for any purpose is hereby granted without fee, provided that
6  * the above copyright notice appear in all copies and that both that copyright
7  * notice and this permission notice appear in supporting documentation, and
8  * that the name of the copyright holders not be used in advertising or
9  * publicity pertaining to distribution of the software without specific,
10  * written prior permission.  The copyright holders make no representations
11  * about the suitability of this software for any purpose.  It is provided "as
12  * is" without express or implied warranty.
13  *
14  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20  * OF THIS SOFTWARE.
21  */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24 
25 #include <linux/types.h>
26 #include <drm/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28 
29 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 #include <linux/stackdepot.h>
31 #include <linux/timekeeping.h>
32 
33 enum drm_dp_mst_topology_ref_type {
34 	DRM_DP_MST_TOPOLOGY_REF_GET,
35 	DRM_DP_MST_TOPOLOGY_REF_PUT,
36 };
37 
38 struct drm_dp_mst_topology_ref_history {
39 	struct drm_dp_mst_topology_ref_entry {
40 		enum drm_dp_mst_topology_ref_type type;
41 		int count;
42 		ktime_t ts_nsec;
43 		depot_stack_handle_t backtrace;
44 	} *entries;
45 	int len;
46 };
47 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
48 
49 struct drm_dp_mst_branch;
50 
51 /**
52  * struct drm_dp_vcpi - Virtual Channel Payload Identifier
53  * @vcpi: Virtual channel ID.
54  * @pbn: Payload Bandwidth Number for this channel
55  * @aligned_pbn: PBN aligned with slot size
56  * @num_slots: number of slots for this PBN
57  */
58 struct drm_dp_vcpi {
59 	int vcpi;
60 	int pbn;
61 	int aligned_pbn;
62 	int num_slots;
63 };
64 
65 /**
66  * struct drm_dp_mst_port - MST port
67  * @port_num: port number
68  * @input: if this port is an input port. Protected by
69  * &drm_dp_mst_topology_mgr.base.lock.
70  * @mcs: message capability status - DP 1.2 spec. Protected by
71  * &drm_dp_mst_topology_mgr.base.lock.
72  * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
73  * &drm_dp_mst_topology_mgr.base.lock.
74  * @pdt: Peer Device Type. Protected by
75  * &drm_dp_mst_topology_mgr.base.lock.
76  * @ldps: Legacy Device Plug Status. Protected by
77  * &drm_dp_mst_topology_mgr.base.lock.
78  * @dpcd_rev: DPCD revision of device on this port. Protected by
79  * &drm_dp_mst_topology_mgr.base.lock.
80  * @num_sdp_streams: Number of simultaneous streams. Protected by
81  * &drm_dp_mst_topology_mgr.base.lock.
82  * @num_sdp_stream_sinks: Number of stream sinks. Protected by
83  * &drm_dp_mst_topology_mgr.base.lock.
84  * @full_pbn: Max possible bandwidth for this port. Protected by
85  * &drm_dp_mst_topology_mgr.base.lock.
86  * @next: link to next port on this branch device
87  * @aux: i2c aux transport to talk to device connected to this port, protected
88  * by &drm_dp_mst_topology_mgr.base.lock.
89  * @parent: branch device parent of this port
90  * @vcpi: Virtual Channel Payload info for this port.
91  * @connector: DRM connector this port is connected to. Protected by
92  * &drm_dp_mst_topology_mgr.base.lock.
93  * @mgr: topology manager this port lives under.
94  *
95  * This structure represents an MST port endpoint on a device somewhere
96  * in the MST topology.
97  */
98 struct drm_dp_mst_port {
99 	/**
100 	 * @topology_kref: refcount for this port's lifetime in the topology,
101 	 * only the DP MST helpers should need to touch this
102 	 */
103 	struct kref topology_kref;
104 
105 	/**
106 	 * @malloc_kref: refcount for the memory allocation containing this
107 	 * structure. See drm_dp_mst_get_port_malloc() and
108 	 * drm_dp_mst_put_port_malloc().
109 	 */
110 	struct kref malloc_kref;
111 
112 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
113 	/**
114 	 * @topology_ref_history: A history of each topology
115 	 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
116 	 */
117 	struct drm_dp_mst_topology_ref_history topology_ref_history;
118 #endif
119 
120 	u8 port_num;
121 	bool input;
122 	bool mcs;
123 	bool ddps;
124 	u8 pdt;
125 	bool ldps;
126 	u8 dpcd_rev;
127 	u8 num_sdp_streams;
128 	u8 num_sdp_stream_sinks;
129 	uint16_t full_pbn;
130 	struct list_head next;
131 	/**
132 	 * @mstb: the branch device connected to this port, if there is one.
133 	 * This should be considered protected for reading by
134 	 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
135 	 * &drm_dp_mst_topology_mgr.up_req_work and
136 	 * &drm_dp_mst_topology_mgr.work, which do not grab
137 	 * &drm_dp_mst_topology_mgr.lock during reads but are the only
138 	 * updaters of this list and are protected from writing concurrently
139 	 * by &drm_dp_mst_topology_mgr.probe_lock.
140 	 */
141 	struct drm_dp_mst_branch *mstb;
142 	struct drm_dp_aux aux; /* i2c bus for this port? */
143 	struct drm_dp_mst_branch *parent;
144 
145 	struct drm_dp_vcpi vcpi;
146 	struct drm_connector *connector;
147 	struct drm_dp_mst_topology_mgr *mgr;
148 
149 	/**
150 	 * @cached_edid: for DP logical ports - make tiling work by ensuring
151 	 * that the EDID for all connectors is read immediately.
152 	 */
153 	struct edid *cached_edid;
154 	/**
155 	 * @has_audio: Tracks whether the sink connector to this port is
156 	 * audio-capable.
157 	 */
158 	bool has_audio;
159 
160 	/**
161 	 * @fec_capable: bool indicating if FEC can be supported up to that
162 	 * point in the MST topology.
163 	 */
164 	bool fec_capable;
165 };
166 
167 /* sideband msg header - not bit struct */
168 struct drm_dp_sideband_msg_hdr {
169 	u8 lct;
170 	u8 lcr;
171 	u8 rad[8];
172 	bool broadcast;
173 	bool path_msg;
174 	u8 msg_len;
175 	bool somt;
176 	bool eomt;
177 	bool seqno;
178 };
179 
180 struct drm_dp_sideband_msg_rx {
181 	u8 chunk[48];
182 	u8 msg[256];
183 	u8 curchunk_len;
184 	u8 curchunk_idx; /* chunk we are parsing now */
185 	u8 curchunk_hdrlen;
186 	u8 curlen; /* total length of the msg */
187 	bool have_somt;
188 	bool have_eomt;
189 	struct drm_dp_sideband_msg_hdr initial_hdr;
190 };
191 
192 /**
193  * struct drm_dp_mst_branch - MST branch device.
194  * @rad: Relative Address to talk to this branch device.
195  * @lct: Link count total to talk to this branch device.
196  * @num_ports: number of ports on the branch.
197  * @port_parent: pointer to the port parent, NULL if toplevel.
198  * @mgr: topology manager for this branch device.
199  * @link_address_sent: if a link address message has been sent to this device yet.
200  * @guid: guid for DP 1.2 branch device. port under this branch can be
201  * identified by port #.
202  *
203  * This structure represents an MST branch device, there is one
204  * primary branch device at the root, along with any other branches connected
205  * to downstream port of parent branches.
206  */
207 struct drm_dp_mst_branch {
208 	/**
209 	 * @topology_kref: refcount for this branch device's lifetime in the
210 	 * topology, only the DP MST helpers should need to touch this
211 	 */
212 	struct kref topology_kref;
213 
214 	/**
215 	 * @malloc_kref: refcount for the memory allocation containing this
216 	 * structure. See drm_dp_mst_get_mstb_malloc() and
217 	 * drm_dp_mst_put_mstb_malloc().
218 	 */
219 	struct kref malloc_kref;
220 
221 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
222 	/**
223 	 * @topology_ref_history: A history of each topology
224 	 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
225 	 */
226 	struct drm_dp_mst_topology_ref_history topology_ref_history;
227 #endif
228 
229 	/**
230 	 * @destroy_next: linked-list entry used by
231 	 * drm_dp_delayed_destroy_work()
232 	 */
233 	struct list_head destroy_next;
234 
235 	u8 rad[8];
236 	u8 lct;
237 	int num_ports;
238 
239 	/**
240 	 * @ports: the list of ports on this branch device. This should be
241 	 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
242 	 * There are two exceptions to this:
243 	 * &drm_dp_mst_topology_mgr.up_req_work and
244 	 * &drm_dp_mst_topology_mgr.work, which do not grab
245 	 * &drm_dp_mst_topology_mgr.lock during reads but are the only
246 	 * updaters of this list and are protected from updating the list
247 	 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
248 	 */
249 	struct list_head ports;
250 
251 	struct drm_dp_mst_port *port_parent;
252 	struct drm_dp_mst_topology_mgr *mgr;
253 
254 	bool link_address_sent;
255 
256 	/* global unique identifier to identify branch devices */
257 	u8 guid[16];
258 };
259 
260 
261 struct drm_dp_nak_reply {
262 	u8 guid[16];
263 	u8 reason;
264 	u8 nak_data;
265 };
266 
267 struct drm_dp_link_address_ack_reply {
268 	u8 guid[16];
269 	u8 nports;
270 	struct drm_dp_link_addr_reply_port {
271 		bool input_port;
272 		u8 peer_device_type;
273 		u8 port_number;
274 		bool mcs;
275 		bool ddps;
276 		bool legacy_device_plug_status;
277 		u8 dpcd_revision;
278 		u8 peer_guid[16];
279 		u8 num_sdp_streams;
280 		u8 num_sdp_stream_sinks;
281 	} ports[16];
282 };
283 
284 struct drm_dp_remote_dpcd_read_ack_reply {
285 	u8 port_number;
286 	u8 num_bytes;
287 	u8 bytes[255];
288 };
289 
290 struct drm_dp_remote_dpcd_write_ack_reply {
291 	u8 port_number;
292 };
293 
294 struct drm_dp_remote_dpcd_write_nak_reply {
295 	u8 port_number;
296 	u8 reason;
297 	u8 bytes_written_before_failure;
298 };
299 
300 struct drm_dp_remote_i2c_read_ack_reply {
301 	u8 port_number;
302 	u8 num_bytes;
303 	u8 bytes[255];
304 };
305 
306 struct drm_dp_remote_i2c_read_nak_reply {
307 	u8 port_number;
308 	u8 nak_reason;
309 	u8 i2c_nak_transaction;
310 };
311 
312 struct drm_dp_remote_i2c_write_ack_reply {
313 	u8 port_number;
314 };
315 
316 struct drm_dp_query_stream_enc_status_ack_reply {
317 	/* Bit[23:16]- Stream Id */
318 	u8 stream_id;
319 
320 	/* Bit[15]- Signed */
321 	bool reply_signed;
322 
323 	/* Bit[10:8]- Stream Output Sink Type */
324 	bool unauthorizable_device_present;
325 	bool legacy_device_present;
326 	bool query_capable_device_present;
327 
328 	/* Bit[12:11]- Stream Output CP Type */
329 	bool hdcp_1x_device_present;
330 	bool hdcp_2x_device_present;
331 
332 	/* Bit[4]- Stream Authentication */
333 	bool auth_completed;
334 
335 	/* Bit[3]- Stream Encryption */
336 	bool encryption_enabled;
337 
338 	/* Bit[2]- Stream Repeater Function Present */
339 	bool repeater_present;
340 
341 	/* Bit[1:0]- Stream State */
342 	u8 state;
343 };
344 
345 #define DRM_DP_MAX_SDP_STREAMS 16
346 struct drm_dp_allocate_payload {
347 	u8 port_number;
348 	u8 number_sdp_streams;
349 	u8 vcpi;
350 	u16 pbn;
351 	u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
352 };
353 
354 struct drm_dp_allocate_payload_ack_reply {
355 	u8 port_number;
356 	u8 vcpi;
357 	u16 allocated_pbn;
358 };
359 
360 struct drm_dp_connection_status_notify {
361 	u8 guid[16];
362 	u8 port_number;
363 	bool legacy_device_plug_status;
364 	bool displayport_device_plug_status;
365 	bool message_capability_status;
366 	bool input_port;
367 	u8 peer_device_type;
368 };
369 
370 struct drm_dp_remote_dpcd_read {
371 	u8 port_number;
372 	u32 dpcd_address;
373 	u8 num_bytes;
374 };
375 
376 struct drm_dp_remote_dpcd_write {
377 	u8 port_number;
378 	u32 dpcd_address;
379 	u8 num_bytes;
380 	u8 *bytes;
381 };
382 
383 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
384 struct drm_dp_remote_i2c_read {
385 	u8 num_transactions;
386 	u8 port_number;
387 	struct drm_dp_remote_i2c_read_tx {
388 		u8 i2c_dev_id;
389 		u8 num_bytes;
390 		u8 *bytes;
391 		u8 no_stop_bit;
392 		u8 i2c_transaction_delay;
393 	} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
394 	u8 read_i2c_device_id;
395 	u8 num_bytes_read;
396 };
397 
398 struct drm_dp_remote_i2c_write {
399 	u8 port_number;
400 	u8 write_i2c_device_id;
401 	u8 num_bytes;
402 	u8 *bytes;
403 };
404 
405 struct drm_dp_query_stream_enc_status {
406 	u8 stream_id;
407 	u8 client_id[7];	/* 56-bit nonce */
408 	u8 stream_event;
409 	bool valid_stream_event;
410 	u8 stream_behavior;
411 	u8 valid_stream_behavior;
412 };
413 
414 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
415 struct drm_dp_port_number_req {
416 	u8 port_number;
417 };
418 
419 struct drm_dp_enum_path_resources_ack_reply {
420 	u8 port_number;
421 	bool fec_capable;
422 	u16 full_payload_bw_number;
423 	u16 avail_payload_bw_number;
424 };
425 
426 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
427 struct drm_dp_port_number_rep {
428 	u8 port_number;
429 };
430 
431 struct drm_dp_query_payload {
432 	u8 port_number;
433 	u8 vcpi;
434 };
435 
436 struct drm_dp_resource_status_notify {
437 	u8 port_number;
438 	u8 guid[16];
439 	u16 available_pbn;
440 };
441 
442 struct drm_dp_query_payload_ack_reply {
443 	u8 port_number;
444 	u16 allocated_pbn;
445 };
446 
447 struct drm_dp_sideband_msg_req_body {
448 	u8 req_type;
449 	union ack_req {
450 		struct drm_dp_connection_status_notify conn_stat;
451 		struct drm_dp_port_number_req port_num;
452 		struct drm_dp_resource_status_notify resource_stat;
453 
454 		struct drm_dp_query_payload query_payload;
455 		struct drm_dp_allocate_payload allocate_payload;
456 
457 		struct drm_dp_remote_dpcd_read dpcd_read;
458 		struct drm_dp_remote_dpcd_write dpcd_write;
459 
460 		struct drm_dp_remote_i2c_read i2c_read;
461 		struct drm_dp_remote_i2c_write i2c_write;
462 
463 		struct drm_dp_query_stream_enc_status enc_status;
464 	} u;
465 };
466 
467 struct drm_dp_sideband_msg_reply_body {
468 	u8 reply_type;
469 	u8 req_type;
470 	union ack_replies {
471 		struct drm_dp_nak_reply nak;
472 		struct drm_dp_link_address_ack_reply link_addr;
473 		struct drm_dp_port_number_rep port_number;
474 
475 		struct drm_dp_enum_path_resources_ack_reply path_resources;
476 		struct drm_dp_allocate_payload_ack_reply allocate_payload;
477 		struct drm_dp_query_payload_ack_reply query_payload;
478 
479 		struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
480 		struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
481 		struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
482 
483 		struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
484 		struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
485 		struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
486 
487 		struct drm_dp_query_stream_enc_status_ack_reply enc_status;
488 	} u;
489 };
490 
491 /* msg is queued to be put into a slot */
492 #define DRM_DP_SIDEBAND_TX_QUEUED 0
493 /* msg has started transmitting on a slot - still on msgq */
494 #define DRM_DP_SIDEBAND_TX_START_SEND 1
495 /* msg has finished transmitting on a slot - removed from msgq only in slot */
496 #define DRM_DP_SIDEBAND_TX_SENT 2
497 /* msg has received a response - removed from slot */
498 #define DRM_DP_SIDEBAND_TX_RX 3
499 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
500 
501 struct drm_dp_sideband_msg_tx {
502 	u8 msg[256];
503 	u8 chunk[48];
504 	u8 cur_offset;
505 	u8 cur_len;
506 	struct drm_dp_mst_branch *dst;
507 	struct list_head next;
508 	int seqno;
509 	int state;
510 	bool path_msg;
511 	struct drm_dp_sideband_msg_reply_body reply;
512 };
513 
514 /* sideband msg handler */
515 struct drm_dp_mst_topology_mgr;
516 struct drm_dp_mst_topology_cbs {
517 	/* create a connector for a port */
518 	struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
519 	/*
520 	 * Checks for any pending MST interrupts, passing them to MST core for
521 	 * processing, the same way an HPD IRQ pulse handler would do this.
522 	 * If provided MST core calls this callback from a poll-waiting loop
523 	 * when waiting for MST down message replies. The driver is expected
524 	 * to guard against a race between this callback and the driver's HPD
525 	 * IRQ pulse handler.
526 	 */
527 	void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
528 };
529 
530 #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
531 
532 #define DP_PAYLOAD_LOCAL 1
533 #define DP_PAYLOAD_REMOTE 2
534 #define DP_PAYLOAD_DELETE_LOCAL 3
535 
536 struct drm_dp_payload {
537 	int payload_state;
538 	int start_slot;
539 	int num_slots;
540 	int vcpi;
541 };
542 
543 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
544 
545 struct drm_dp_vcpi_allocation {
546 	struct drm_dp_mst_port *port;
547 	int vcpi;
548 	int pbn;
549 	bool dsc_enabled;
550 	struct list_head next;
551 };
552 
553 struct drm_dp_mst_topology_state {
554 	struct drm_private_state base;
555 	struct list_head vcpis;
556 	struct drm_dp_mst_topology_mgr *mgr;
557 };
558 
559 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
560 
561 /**
562  * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
563  *
564  * This struct represents the toplevel displayport MST topology manager.
565  * There should be one instance of this for every MST capable DP connector
566  * on the GPU.
567  */
568 struct drm_dp_mst_topology_mgr {
569 	/**
570 	 * @base: Base private object for atomic
571 	 */
572 	struct drm_private_obj base;
573 
574 	/**
575 	 * @dev: device pointer for adding i2c devices etc.
576 	 */
577 	struct drm_device *dev;
578 	/**
579 	 * @cbs: callbacks for connector addition and destruction.
580 	 */
581 	const struct drm_dp_mst_topology_cbs *cbs;
582 	/**
583 	 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
584 	 * in one go.
585 	 */
586 	int max_dpcd_transaction_bytes;
587 	/**
588 	 * @aux: AUX channel for the DP MST connector this topolgy mgr is
589 	 * controlling.
590 	 */
591 	struct drm_dp_aux *aux;
592 	/**
593 	 * @max_payloads: maximum number of payloads the GPU can generate.
594 	 */
595 	int max_payloads;
596 	/**
597 	 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
598 	 * to build the MST connector path value.
599 	 */
600 	int conn_base_id;
601 
602 	/**
603 	 * @up_req_recv: Message receiver state for up requests.
604 	 */
605 	struct drm_dp_sideband_msg_rx up_req_recv;
606 
607 	/**
608 	 * @down_rep_recv: Message receiver state for replies to down
609 	 * requests.
610 	 */
611 	struct drm_dp_sideband_msg_rx down_rep_recv;
612 
613 	/**
614 	 * @lock: protects @mst_state, @mst_primary, @dpcd, and
615 	 * @payload_id_table_cleared.
616 	 */
617 	struct mutex lock;
618 
619 	/**
620 	 * @probe_lock: Prevents @work and @up_req_work, the only writers of
621 	 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
622 	 * while they update the topology.
623 	 */
624 	struct mutex probe_lock;
625 
626 	/**
627 	 * @mst_state: If this manager is enabled for an MST capable port. False
628 	 * if no MST sink/branch devices is connected.
629 	 */
630 	bool mst_state : 1;
631 
632 	/**
633 	 * @payload_id_table_cleared: Whether or not we've cleared the payload
634 	 * ID table for @mst_primary. Protected by @lock.
635 	 */
636 	bool payload_id_table_cleared : 1;
637 
638 	/**
639 	 * @mst_primary: Pointer to the primary/first branch device.
640 	 */
641 	struct drm_dp_mst_branch *mst_primary;
642 
643 	/**
644 	 * @dpcd: Cache of DPCD for primary port.
645 	 */
646 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
647 	/**
648 	 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
649 	 */
650 	u8 sink_count;
651 	/**
652 	 * @pbn_div: PBN to slots divisor.
653 	 */
654 	int pbn_div;
655 
656 	/**
657 	 * @funcs: Atomic helper callbacks
658 	 */
659 	const struct drm_private_state_funcs *funcs;
660 
661 	/**
662 	 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
663 	 */
664 	struct mutex qlock;
665 
666 	/**
667 	 * @tx_msg_downq: List of pending down requests
668 	 */
669 	struct list_head tx_msg_downq;
670 
671 	/**
672 	 * @payload_lock: Protect payload information.
673 	 */
674 	struct mutex payload_lock;
675 	/**
676 	 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
677 	 * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
678 	 * this array is determined by @max_payloads.
679 	 */
680 	struct drm_dp_vcpi **proposed_vcpis;
681 	/**
682 	 * @payloads: Array of payloads. The size of this array is determined
683 	 * by @max_payloads.
684 	 */
685 	struct drm_dp_payload *payloads;
686 	/**
687 	 * @payload_mask: Elements of @payloads actually in use. Since
688 	 * reallocation of active outputs isn't possible gaps can be created by
689 	 * disabling outputs out of order compared to how they've been enabled.
690 	 */
691 	unsigned long payload_mask;
692 	/**
693 	 * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
694 	 */
695 	unsigned long vcpi_mask;
696 
697 	/**
698 	 * @tx_waitq: Wait to queue stall for the tx worker.
699 	 */
700 	wait_queue_head_t tx_waitq;
701 	/**
702 	 * @work: Probe work.
703 	 */
704 	struct work_struct work;
705 	/**
706 	 * @tx_work: Sideband transmit worker. This can nest within the main
707 	 * @work worker for each transaction @work launches.
708 	 */
709 	struct work_struct tx_work;
710 
711 	/**
712 	 * @destroy_port_list: List of to be destroyed connectors.
713 	 */
714 	struct list_head destroy_port_list;
715 	/**
716 	 * @destroy_branch_device_list: List of to be destroyed branch
717 	 * devices.
718 	 */
719 	struct list_head destroy_branch_device_list;
720 	/**
721 	 * @delayed_destroy_lock: Protects @destroy_port_list and
722 	 * @destroy_branch_device_list.
723 	 */
724 	struct mutex delayed_destroy_lock;
725 
726 	/**
727 	 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
728 	 * A dedicated WQ makes it possible to drain any requeued work items
729 	 * on it.
730 	 */
731 	struct workqueue_struct *delayed_destroy_wq;
732 
733 	/**
734 	 * @delayed_destroy_work: Work item to destroy MST port and branch
735 	 * devices, needed to avoid locking inversion.
736 	 */
737 	struct work_struct delayed_destroy_work;
738 
739 	/**
740 	 * @up_req_list: List of pending up requests from the topology that
741 	 * need to be processed, in chronological order.
742 	 */
743 	struct list_head up_req_list;
744 	/**
745 	 * @up_req_lock: Protects @up_req_list
746 	 */
747 	struct mutex up_req_lock;
748 	/**
749 	 * @up_req_work: Work item to process up requests received from the
750 	 * topology. Needed to avoid blocking hotplug handling and sideband
751 	 * transmissions.
752 	 */
753 	struct work_struct up_req_work;
754 
755 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
756 	/**
757 	 * @topology_ref_history_lock: protects
758 	 * &drm_dp_mst_port.topology_ref_history and
759 	 * &drm_dp_mst_branch.topology_ref_history.
760 	 */
761 	struct mutex topology_ref_history_lock;
762 #endif
763 };
764 
765 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
766 				 struct drm_device *dev, struct drm_dp_aux *aux,
767 				 int max_dpcd_transaction_bytes,
768 				 int max_payloads, int conn_base_id);
769 
770 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
771 
772 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
773 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
774 
775 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
776 
777 
778 int
779 drm_dp_mst_detect_port(struct drm_connector *connector,
780 		       struct drm_modeset_acquire_ctx *ctx,
781 		       struct drm_dp_mst_topology_mgr *mgr,
782 		       struct drm_dp_mst_port *port);
783 
784 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
785 
786 int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
787 
788 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
789 
790 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
791 			      struct drm_dp_mst_port *port, int pbn, int slots);
792 
793 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
794 
795 
796 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
797 
798 
799 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
800 				struct drm_dp_mst_port *port);
801 
802 
803 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
804 			   int pbn);
805 
806 
807 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
808 
809 
810 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
811 
812 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
813 
814 void drm_dp_mst_dump_topology(struct seq_file *m,
815 			      struct drm_dp_mst_topology_mgr *mgr);
816 
817 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
818 int __must_check
819 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
820 			       bool sync);
821 
822 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
823 			     unsigned int offset, void *buffer, size_t size);
824 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
825 			      unsigned int offset, void *buffer, size_t size);
826 
827 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
828 				       struct drm_dp_mst_port *port);
829 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
830 					   struct drm_dp_mst_port *port);
831 
832 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
833 								    struct drm_dp_mst_topology_mgr *mgr);
834 int __must_check
835 drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
836 			      struct drm_dp_mst_topology_mgr *mgr,
837 			      struct drm_dp_mst_port *port, int pbn,
838 			      int pbn_div);
839 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
840 				 struct drm_dp_mst_port *port,
841 				 int pbn, int pbn_div,
842 				 bool enable);
843 int __must_check
844 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
845 				  struct drm_dp_mst_topology_mgr *mgr);
846 int __must_check
847 drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
848 				 struct drm_dp_mst_topology_mgr *mgr,
849 				 struct drm_dp_mst_port *port);
850 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
851 				 struct drm_dp_mst_port *port, bool power_up);
852 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
853 		struct drm_dp_mst_port *port,
854 		struct drm_dp_query_stream_enc_status_ack_reply *status);
855 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
856 
857 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
858 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
859 
860 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
861 
862 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
863 
864 /**
865  * __drm_dp_mst_state_iter_get - private atomic state iterator function for
866  * macro-internal use
867  * @state: &struct drm_atomic_state pointer
868  * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
869  * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
870  * iteration cursor
871  * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
872  * iteration cursor
873  * @i: int iteration cursor, for macro-internal use
874  *
875  * Used by for_each_oldnew_mst_mgr_in_state(),
876  * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
877  * call this directly.
878  *
879  * Returns:
880  * True if the current &struct drm_private_obj is a &struct
881  * drm_dp_mst_topology_mgr, false otherwise.
882  */
883 static inline bool
__drm_dp_mst_state_iter_get(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr ** mgr,struct drm_dp_mst_topology_state ** old_state,struct drm_dp_mst_topology_state ** new_state,int i)884 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
885 			    struct drm_dp_mst_topology_mgr **mgr,
886 			    struct drm_dp_mst_topology_state **old_state,
887 			    struct drm_dp_mst_topology_state **new_state,
888 			    int i)
889 {
890 	struct __drm_private_objs_state *objs_state = &state->private_objs[i];
891 
892 	if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
893 		return false;
894 
895 	*mgr = to_dp_mst_topology_mgr(objs_state->ptr);
896 	if (old_state)
897 		*old_state = to_dp_mst_topology_state(objs_state->old_state);
898 	if (new_state)
899 		*new_state = to_dp_mst_topology_state(objs_state->new_state);
900 
901 	return true;
902 }
903 
904 /**
905  * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
906  * managers in an atomic update
907  * @__state: &struct drm_atomic_state pointer
908  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
909  * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
910  * state
911  * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
912  * state
913  * @__i: int iteration cursor, for macro-internal use
914  *
915  * This iterates over all DRM DP MST topology managers in an atomic update,
916  * tracking both old and new state. This is useful in places where the state
917  * delta needs to be considered, for example in atomic check functions.
918  */
919 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
920 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
921 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
922 
923 /**
924  * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
925  * in an atomic update
926  * @__state: &struct drm_atomic_state pointer
927  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
928  * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
929  * state
930  * @__i: int iteration cursor, for macro-internal use
931  *
932  * This iterates over all DRM DP MST topology managers in an atomic update,
933  * tracking only the old state. This is useful in disable functions, where we
934  * need the old state the hardware is still in.
935  */
936 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
937 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
938 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
939 
940 /**
941  * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
942  * in an atomic update
943  * @__state: &struct drm_atomic_state pointer
944  * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
945  * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
946  * state
947  * @__i: int iteration cursor, for macro-internal use
948  *
949  * This iterates over all DRM DP MST topology managers in an atomic update,
950  * tracking only the new state. This is useful in enable functions, where we
951  * need the new state the hardware should be in when the atomic commit
952  * operation has completed.
953  */
954 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
955 	for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
956 		for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
957 
958 #endif
959