1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #ifndef VCHIQ_CORE_H
5 #define VCHIQ_CORE_H
6 
7 #include <linux/mutex.h>
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/dev_printk.h>
11 #include <linux/kthread.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/spinlock_types.h>
15 #include <linux/wait.h>
16 
17 #include "../../include/linux/raspberrypi/vchiq.h"
18 #include "vchiq_cfg.h"
19 
20 /* Do this so that we can test-build the code on non-rpi systems */
21 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
22 
23 #else
24 
25 #ifndef dsb
26 #define dsb(a)
27 #endif
28 
29 #endif	/* IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE) */
30 
31 #define VCHIQ_SERVICE_HANDLE_INVALID 0
32 
33 #define VCHIQ_SLOT_SIZE     4096
34 #define VCHIQ_MAX_MSG_SIZE  (VCHIQ_SLOT_SIZE - sizeof(struct vchiq_header))
35 
36 #define VCHIQ_SLOT_MASK        (VCHIQ_SLOT_SIZE - 1)
37 #define VCHIQ_SLOT_QUEUE_MASK  (VCHIQ_MAX_SLOTS_PER_SIDE - 1)
38 #define VCHIQ_SLOT_ZERO_SLOTS  DIV_ROUND_UP(sizeof(struct vchiq_slot_zero), \
39 					    VCHIQ_SLOT_SIZE)
40 
41 #define BITSET_SIZE(b)        ((b + 31) >> 5)
42 #define BITSET_WORD(b)        (b >> 5)
43 #define BITSET_BIT(b)         (1 << (b & 31))
44 #define BITSET_IS_SET(bs, b)  (bs[BITSET_WORD(b)] & BITSET_BIT(b))
45 #define BITSET_SET(bs, b)     (bs[BITSET_WORD(b)] |= BITSET_BIT(b))
46 
47 enum {
48 	DEBUG_ENTRIES,
49 #if VCHIQ_ENABLE_DEBUG
50 	DEBUG_SLOT_HANDLER_COUNT,
51 	DEBUG_SLOT_HANDLER_LINE,
52 	DEBUG_PARSE_LINE,
53 	DEBUG_PARSE_HEADER,
54 	DEBUG_PARSE_MSGID,
55 	DEBUG_AWAIT_COMPLETION_LINE,
56 	DEBUG_DEQUEUE_MESSAGE_LINE,
57 	DEBUG_SERVICE_CALLBACK_LINE,
58 	DEBUG_MSG_QUEUE_FULL_COUNT,
59 	DEBUG_COMPLETION_QUEUE_FULL_COUNT,
60 #endif
61 	DEBUG_MAX
62 };
63 
64 #if VCHIQ_ENABLE_DEBUG
65 
66 #define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
67 #define DEBUG_TRACE(d) \
68 	do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
69 #define DEBUG_VALUE(d, v) \
70 	do { debug_ptr[DEBUG_ ## d] = (v); dsb(sy); } while (0)
71 #define DEBUG_COUNT(d) \
72 	do { debug_ptr[DEBUG_ ## d]++; dsb(sy); } while (0)
73 
74 #else /* VCHIQ_ENABLE_DEBUG */
75 
76 #define DEBUG_INITIALISE(local)
77 #define DEBUG_TRACE(d)
78 #define DEBUG_VALUE(d, v)
79 #define DEBUG_COUNT(d)
80 
81 #endif /* VCHIQ_ENABLE_DEBUG */
82 
83 enum vchiq_connstate {
84 	VCHIQ_CONNSTATE_DISCONNECTED,
85 	VCHIQ_CONNSTATE_CONNECTING,
86 	VCHIQ_CONNSTATE_CONNECTED,
87 	VCHIQ_CONNSTATE_PAUSING,
88 	VCHIQ_CONNSTATE_PAUSE_SENT,
89 	VCHIQ_CONNSTATE_PAUSED,
90 	VCHIQ_CONNSTATE_RESUMING,
91 	VCHIQ_CONNSTATE_PAUSE_TIMEOUT,
92 	VCHIQ_CONNSTATE_RESUME_TIMEOUT
93 };
94 
95 enum {
96 	VCHIQ_SRVSTATE_FREE,
97 	VCHIQ_SRVSTATE_HIDDEN,
98 	VCHIQ_SRVSTATE_LISTENING,
99 	VCHIQ_SRVSTATE_OPENING,
100 	VCHIQ_SRVSTATE_OPEN,
101 	VCHIQ_SRVSTATE_OPENSYNC,
102 	VCHIQ_SRVSTATE_CLOSESENT,
103 	VCHIQ_SRVSTATE_CLOSERECVD,
104 	VCHIQ_SRVSTATE_CLOSEWAIT,
105 	VCHIQ_SRVSTATE_CLOSED
106 };
107 
108 enum vchiq_bulk_dir {
109 	VCHIQ_BULK_TRANSMIT,
110 	VCHIQ_BULK_RECEIVE
111 };
112 
113 struct vchiq_bulk {
114 	short mode;
115 	short dir;
116 	void *userdata;
117 	dma_addr_t data;
118 	int size;
119 	void *remote_data;
120 	int remote_size;
121 	int actual;
122 };
123 
124 struct vchiq_bulk_queue {
125 	int local_insert;  /* Where to insert the next local bulk */
126 	int remote_insert; /* Where to insert the next remote bulk (master) */
127 	int process;       /* Bulk to transfer next */
128 	int remote_notify; /* Bulk to notify the remote client of next (mstr) */
129 	int remove;        /* Bulk to notify the local client of, and remove, next */
130 	struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
131 };
132 
133 /*
134  * Remote events provide a way of presenting several virtual doorbells to a
135  * peer (ARM host to VPU) using only one physical doorbell. They can be thought
136  * of as a way for the peer to signal a semaphore, in this case implemented as
137  * a workqueue.
138  *
139  * Remote events remain signalled until acknowledged by the receiver, and they
140  * are non-counting. They are designed in such a way as to minimise the number
141  * of interrupts and avoid unnecessary waiting.
142  *
143  * A remote_event is as small data structures that live in shared memory. It
144  * comprises two booleans - armed and fired:
145  *
146  * The sender sets fired when they signal the receiver.
147  * If fired is set, the receiver has been signalled and need not wait.
148  * The receiver sets the armed field before they begin to wait.
149  * If armed is set, the receiver is waiting and wishes to be woken by interrupt.
150  */
151 struct remote_event {
152 	int armed;
153 	int fired;
154 	u32 __unused;
155 };
156 
157 struct opaque_platform_state;
158 
159 struct vchiq_slot {
160 	char data[VCHIQ_SLOT_SIZE];
161 };
162 
163 struct vchiq_slot_info {
164 	/* Use two counters rather than one to avoid the need for a mutex. */
165 	short use_count;
166 	short release_count;
167 };
168 
169 struct vchiq_service {
170 	struct vchiq_service_base base;
171 	unsigned int handle;
172 	struct kref ref_count;
173 	struct rcu_head rcu;
174 	int srvstate;
175 	void (*userdata_term)(void *userdata);
176 	unsigned int localport;
177 	unsigned int remoteport;
178 	int public_fourcc;
179 	int client_id;
180 	char auto_close;
181 	char sync;
182 	char closing;
183 	char trace;
184 	atomic_t poll_flags;
185 	short version;
186 	short version_min;
187 	short peer_version;
188 
189 	struct vchiq_state *state;
190 	struct vchiq_instance *instance;
191 
192 	int service_use_count;
193 
194 	struct vchiq_bulk_queue bulk_tx;
195 	struct vchiq_bulk_queue bulk_rx;
196 
197 	struct completion remove_event;
198 	struct completion bulk_remove_event;
199 	struct mutex bulk_mutex;
200 
201 	struct service_stats_struct {
202 		int quota_stalls;
203 		int slot_stalls;
204 		int bulk_stalls;
205 		int error_count;
206 		int ctrl_tx_count;
207 		int ctrl_rx_count;
208 		int bulk_tx_count;
209 		int bulk_rx_count;
210 		int bulk_aborted_count;
211 		u64 ctrl_tx_bytes;
212 		u64 ctrl_rx_bytes;
213 		u64 bulk_tx_bytes;
214 		u64 bulk_rx_bytes;
215 	} stats;
216 
217 	int msg_queue_read;
218 	int msg_queue_write;
219 	struct completion msg_queue_pop;
220 	struct completion msg_queue_push;
221 	struct vchiq_header *msg_queue[VCHIQ_MAX_SLOTS];
222 };
223 
224 /*
225  * The quota information is outside struct vchiq_service so that it can
226  * be statically allocated, since for accounting reasons a service's slot
227  * usage is carried over between users of the same port number.
228  */
229 struct vchiq_service_quota {
230 	unsigned short slot_quota;
231 	unsigned short slot_use_count;
232 	unsigned short message_quota;
233 	unsigned short message_use_count;
234 	struct completion quota_event;
235 	int previous_tx_index;
236 };
237 
238 struct vchiq_shared_state {
239 	/* A non-zero value here indicates that the content is valid. */
240 	int initialised;
241 
242 	/* The first and last (inclusive) slots allocated to the owner. */
243 	int slot_first;
244 	int slot_last;
245 
246 	/* The slot allocated to synchronous messages from the owner. */
247 	int slot_sync;
248 
249 	/*
250 	 * Signalling this event indicates that owner's slot handler thread
251 	 * should run.
252 	 */
253 	struct remote_event trigger;
254 
255 	/*
256 	 * Indicates the byte position within the stream where the next message
257 	 * will be written. The least significant bits are an index into the
258 	 * slot. The next bits are the index of the slot in slot_queue.
259 	 */
260 	int tx_pos;
261 
262 	/* This event should be signalled when a slot is recycled. */
263 	struct remote_event recycle;
264 
265 	/* The slot_queue index where the next recycled slot will be written. */
266 	int slot_queue_recycle;
267 
268 	/* This event should be signalled when a synchronous message is sent. */
269 	struct remote_event sync_trigger;
270 
271 	/*
272 	 * This event should be signalled when a synchronous message has been
273 	 * released.
274 	 */
275 	struct remote_event sync_release;
276 
277 	/* A circular buffer of slot indexes. */
278 	int slot_queue[VCHIQ_MAX_SLOTS_PER_SIDE];
279 
280 	/* Debugging state */
281 	int debug[DEBUG_MAX];
282 };
283 
284 struct vchiq_slot_zero {
285 	int magic;
286 	short version;
287 	short version_min;
288 	int slot_zero_size;
289 	int slot_size;
290 	int max_slots;
291 	int max_slots_per_side;
292 	int platform_data[2];
293 	struct vchiq_shared_state master;
294 	struct vchiq_shared_state slave;
295 	struct vchiq_slot_info slots[VCHIQ_MAX_SLOTS];
296 };
297 
298 struct vchiq_state {
299 	struct device *dev;
300 	int id;
301 	int initialised;
302 	enum vchiq_connstate conn_state;
303 	short version_common;
304 
305 	struct vchiq_shared_state *local;
306 	struct vchiq_shared_state *remote;
307 	struct vchiq_slot *slot_data;
308 
309 	unsigned short default_slot_quota;
310 	unsigned short default_message_quota;
311 
312 	/* Event indicating connect message received */
313 	struct completion connect;
314 
315 	/* Mutex protecting services */
316 	struct mutex mutex;
317 	struct vchiq_instance **instance;
318 
319 	/* Processes incoming messages */
320 	struct task_struct *slot_handler_thread;
321 
322 	/* Processes recycled slots */
323 	struct task_struct *recycle_thread;
324 
325 	/* Processes synchronous messages */
326 	struct task_struct *sync_thread;
327 
328 	/* Local implementation of the trigger remote event */
329 	wait_queue_head_t trigger_event;
330 
331 	/* Local implementation of the recycle remote event */
332 	wait_queue_head_t recycle_event;
333 
334 	/* Local implementation of the sync trigger remote event */
335 	wait_queue_head_t sync_trigger_event;
336 
337 	/* Local implementation of the sync release remote event */
338 	wait_queue_head_t sync_release_event;
339 
340 	char *tx_data;
341 	char *rx_data;
342 	struct vchiq_slot_info *rx_info;
343 
344 	struct mutex slot_mutex;
345 
346 	struct mutex recycle_mutex;
347 
348 	struct mutex sync_mutex;
349 
350 	spinlock_t msg_queue_spinlock;
351 
352 	spinlock_t bulk_waiter_spinlock;
353 
354 	spinlock_t quota_spinlock;
355 
356 	/*
357 	 * Indicates the byte position within the stream from where the next
358 	 * message will be read. The least significant bits are an index into
359 	 * the slot.The next bits are the index of the slot in
360 	 * remote->slot_queue.
361 	 */
362 	int rx_pos;
363 
364 	/*
365 	 * A cached copy of local->tx_pos. Only write to local->tx_pos, and read
366 	 * from remote->tx_pos.
367 	 */
368 	int local_tx_pos;
369 
370 	/* The slot_queue index of the slot to become available next. */
371 	int slot_queue_available;
372 
373 	/* A flag to indicate if any poll has been requested */
374 	int poll_needed;
375 
376 	/* Ths index of the previous slot used for data messages. */
377 	int previous_data_index;
378 
379 	/* The number of slots occupied by data messages. */
380 	unsigned short data_use_count;
381 
382 	/* The maximum number of slots to be occupied by data messages. */
383 	unsigned short data_quota;
384 
385 	/* An array of bit sets indicating which services must be polled. */
386 	atomic_t poll_services[BITSET_SIZE(VCHIQ_MAX_SERVICES)];
387 
388 	/* The number of the first unused service */
389 	int unused_service;
390 
391 	/* Signalled when a free slot becomes available. */
392 	struct completion slot_available_event;
393 
394 	/* Signalled when a free data slot becomes available. */
395 	struct completion data_quota_event;
396 
397 	struct state_stats_struct {
398 		int slot_stalls;
399 		int data_stalls;
400 		int ctrl_tx_count;
401 		int ctrl_rx_count;
402 		int error_count;
403 	} stats;
404 
405 	struct vchiq_service __rcu *services[VCHIQ_MAX_SERVICES];
406 	struct vchiq_service_quota service_quotas[VCHIQ_MAX_SERVICES];
407 	struct vchiq_slot_info slot_info[VCHIQ_MAX_SLOTS];
408 
409 	struct opaque_platform_state *platform_state;
410 };
411 
vchiq_remote_initialised(const struct vchiq_state * state)412 static inline bool vchiq_remote_initialised(const struct vchiq_state *state)
413 {
414 	return state->remote && state->remote->initialised;
415 }
416 
417 struct bulk_waiter {
418 	struct vchiq_bulk *bulk;
419 	struct completion event;
420 	int actual;
421 };
422 
423 struct vchiq_config {
424 	unsigned int max_msg_size;
425 	unsigned int bulk_threshold;	/* The message size above which it
426 					 * is better to use a bulk transfer
427 					 * (<= max_msg_size)
428 					 */
429 	unsigned int max_outstanding_bulks;
430 	unsigned int max_services;
431 	short version;      /* The version of VCHIQ */
432 	short version_min;  /* The minimum compatible version of VCHIQ */
433 };
434 
435 extern spinlock_t bulk_waiter_spinlock;
436 
437 extern const char *
438 get_conn_state_name(enum vchiq_connstate conn_state);
439 
440 extern struct vchiq_slot_zero *
441 vchiq_init_slots(struct device *dev, void *mem_base, int mem_size);
442 
443 extern int
444 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
445 
446 extern int
447 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
448 
449 struct vchiq_service *
450 vchiq_add_service_internal(struct vchiq_state *state,
451 			   const struct vchiq_service_params_kernel *params,
452 			   int srvstate, struct vchiq_instance *instance,
453 			   void (*userdata_term)(void *userdata));
454 
455 extern int
456 vchiq_open_service_internal(struct vchiq_service *service, int client_id);
457 
458 extern int
459 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
460 
461 extern void
462 vchiq_terminate_service_internal(struct vchiq_service *service);
463 
464 extern void
465 vchiq_free_service_internal(struct vchiq_service *service);
466 
467 extern void
468 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance);
469 
470 extern void
471 remote_event_pollall(struct vchiq_state *state);
472 
473 extern int
474 vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
475 				      unsigned int handle, struct bulk_waiter *userdata);
476 
477 extern int
478 vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
479 				       void *offset, void __user *uoffset, int size,
480 				       void __user *userdata, enum vchiq_bulk_dir dir);
481 
482 extern int
483 vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
484 				       void *offset, void __user *uoffset, int size,
485 				       enum vchiq_bulk_mode mode, void *userdata,
486 				       enum vchiq_bulk_dir dir);
487 
488 extern void
489 vchiq_dump_state(struct seq_file *f, struct vchiq_state *state);
490 
491 extern void
492 request_poll(struct vchiq_state *state, struct vchiq_service *service,
493 	     int poll_type);
494 
495 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle);
496 
497 extern struct vchiq_service *
498 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle);
499 
500 extern struct vchiq_service *
501 find_service_by_port(struct vchiq_state *state, unsigned int localport);
502 
503 extern struct vchiq_service *
504 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
505 
506 extern struct vchiq_service *
507 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
508 
509 extern struct vchiq_service *
510 __next_service_by_instance(struct vchiq_state *state,
511 			   struct vchiq_instance *instance,
512 			   int *pidx);
513 
514 extern struct vchiq_service *
515 next_service_by_instance(struct vchiq_state *state,
516 			 struct vchiq_instance *instance,
517 			 int *pidx);
518 
519 extern void
520 vchiq_service_get(struct vchiq_service *service);
521 
522 extern void
523 vchiq_service_put(struct vchiq_service *service);
524 
525 extern int
526 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
527 		    ssize_t (*copy_callback)(void *context, void *dest,
528 					     size_t offset, size_t maxsize),
529 		    void *context,
530 		    size_t size);
531 
532 int vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
533 			    void __user *uoffset, int size, int dir);
534 
535 void vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk);
536 
537 void remote_event_signal(struct vchiq_state *state, struct remote_event *event);
538 
539 void vchiq_dump_platform_state(struct seq_file *f);
540 
541 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f);
542 
543 void vchiq_dump_platform_service_state(struct seq_file *f, struct vchiq_service *service);
544 
545 int vchiq_use_service_internal(struct vchiq_service *service);
546 
547 int vchiq_release_service_internal(struct vchiq_service *service);
548 
549 void vchiq_on_remote_use(struct vchiq_state *state);
550 
551 void vchiq_on_remote_release(struct vchiq_state *state);
552 
553 int vchiq_platform_init_state(struct vchiq_state *state);
554 
555 int vchiq_check_service(struct vchiq_service *service);
556 
557 int vchiq_send_remote_use(struct vchiq_state *state);
558 
559 int vchiq_send_remote_use_active(struct vchiq_state *state);
560 
561 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
562 				       enum vchiq_connstate oldstate,
563 				  enum vchiq_connstate newstate);
564 
565 void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
566 
567 void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
568 			const void *void_mem, size_t num_bytes);
569 
570 int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
571 
572 int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
573 
574 void vchiq_get_config(struct vchiq_config *config);
575 
576 int vchiq_set_service_option(struct vchiq_instance *instance, unsigned int service,
577 			     enum vchiq_service_option option, int value);
578 
579 #endif
580