1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_arm.h"
17 #include "vchiq_core.h"
18 
19 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 
21 #define VCHIQ_MSG_PADDING            0  /* -                                 */
22 #define VCHIQ_MSG_CONNECT            1  /* -                                 */
23 #define VCHIQ_MSG_OPEN               2  /* + (srcport, -), fourcc, client_id */
24 #define VCHIQ_MSG_OPENACK            3  /* + (srcport, dstport)              */
25 #define VCHIQ_MSG_CLOSE              4  /* + (srcport, dstport)              */
26 #define VCHIQ_MSG_DATA               5  /* + (srcport, dstport)              */
27 #define VCHIQ_MSG_BULK_RX            6  /* + (srcport, dstport), data, size  */
28 #define VCHIQ_MSG_BULK_TX            7  /* + (srcport, dstport), data, size  */
29 #define VCHIQ_MSG_BULK_RX_DONE       8  /* + (srcport, dstport), actual      */
30 #define VCHIQ_MSG_BULK_TX_DONE       9  /* + (srcport, dstport), actual      */
31 #define VCHIQ_MSG_PAUSE             10  /* -                                 */
32 #define VCHIQ_MSG_RESUME            11  /* -                                 */
33 #define VCHIQ_MSG_REMOTE_USE        12  /* -                                 */
34 #define VCHIQ_MSG_REMOTE_RELEASE    13  /* -                                 */
35 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14  /* -                                 */
36 
37 #define TYPE_SHIFT 24
38 
39 #define VCHIQ_PORT_MAX                 (VCHIQ_MAX_SERVICES - 1)
40 #define VCHIQ_PORT_FREE                0x1000
41 #define VCHIQ_PORT_IS_VALID(port)      ((port) < VCHIQ_PORT_FREE)
42 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
43 	(((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
44 #define VCHIQ_MSG_TYPE(msgid)          ((unsigned int)(msgid) >> TYPE_SHIFT)
45 #define VCHIQ_MSG_SRCPORT(msgid) \
46 	((unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff))
47 #define VCHIQ_MSG_DSTPORT(msgid) \
48 	((unsigned short)(msgid) & 0xfff)
49 
50 #define MAKE_CONNECT			(VCHIQ_MSG_CONNECT << TYPE_SHIFT)
51 #define MAKE_OPEN(srcport) \
52 	((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
53 #define MAKE_OPENACK(srcport, dstport) \
54 	((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
55 #define MAKE_CLOSE(srcport, dstport) \
56 	((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
57 #define MAKE_DATA(srcport, dstport) \
58 	((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
59 #define MAKE_PAUSE			(VCHIQ_MSG_PAUSE << TYPE_SHIFT)
60 #define MAKE_RESUME			(VCHIQ_MSG_RESUME << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE			(VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
62 #define MAKE_REMOTE_USE_ACTIVE		(VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
63 
64 /* Ensure the fields are wide enough */
65 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
66 	== 0);
67 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
68 static_assert((unsigned int)VCHIQ_PORT_MAX <
69 	(unsigned int)VCHIQ_PORT_FREE);
70 
71 #define VCHIQ_MSGID_PADDING            VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
72 #define VCHIQ_MSGID_CLAIMED            0x40000000
73 
74 #define VCHIQ_FOURCC_INVALID           0x00000000
75 #define VCHIQ_FOURCC_IS_LEGAL(fourcc)  ((fourcc) != VCHIQ_FOURCC_INVALID)
76 
77 #define VCHIQ_BULK_ACTUAL_ABORTED -1
78 
79 #if VCHIQ_ENABLE_STATS
80 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
82 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
83 	(service->stats. stat += addend)
84 #else
85 #define VCHIQ_STATS_INC(state, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
87 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
88 #endif
89 
90 #define HANDLE_STATE_SHIFT 12
91 
92 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
93 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
94 #define SLOT_INDEX_FROM_DATA(state, data) \
95 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
96 	VCHIQ_SLOT_SIZE)
97 #define SLOT_INDEX_FROM_INFO(state, info) \
98 	((unsigned int)(info - state->slot_info))
99 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
100 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
101 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
102 	(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
103 
104 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
105 
106 #define NO_CLOSE_RECVD	0
107 #define CLOSE_RECVD	1
108 
109 #define NO_RETRY_POLL	0
110 #define RETRY_POLL	1
111 
112 struct vchiq_open_payload {
113 	int fourcc;
114 	int client_id;
115 	short version;
116 	short version_min;
117 };
118 
119 struct vchiq_openack_payload {
120 	short version;
121 };
122 
123 enum {
124 	QMFLAGS_IS_BLOCKING     = BIT(0),
125 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
126 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
127 };
128 
129 enum {
130 	VCHIQ_POLL_TERMINATE,
131 	VCHIQ_POLL_REMOVE,
132 	VCHIQ_POLL_TXNOTIFY,
133 	VCHIQ_POLL_RXNOTIFY,
134 	VCHIQ_POLL_COUNT
135 };
136 
137 /* we require this for consistency between endpoints */
138 static_assert(sizeof(struct vchiq_header) == 8);
139 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
140 
check_sizes(void)141 static inline void check_sizes(void)
142 {
143 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
144 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
145 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
146 	BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
147 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
148 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
149 	BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
150 }
151 
152 static unsigned int handle_seq;
153 
154 static const char *const srvstate_names[] = {
155 	"FREE",
156 	"HIDDEN",
157 	"LISTENING",
158 	"OPENING",
159 	"OPEN",
160 	"OPENSYNC",
161 	"CLOSESENT",
162 	"CLOSERECVD",
163 	"CLOSEWAIT",
164 	"CLOSED"
165 };
166 
167 static const char *const reason_names[] = {
168 	"SERVICE_OPENED",
169 	"SERVICE_CLOSED",
170 	"MESSAGE_AVAILABLE",
171 	"BULK_TRANSMIT_DONE",
172 	"BULK_RECEIVE_DONE",
173 	"BULK_TRANSMIT_ABORTED",
174 	"BULK_RECEIVE_ABORTED"
175 };
176 
177 static const char *const conn_state_names[] = {
178 	"DISCONNECTED",
179 	"CONNECTING",
180 	"CONNECTED",
181 	"PAUSING",
182 	"PAUSE_SENT",
183 	"PAUSED",
184 	"RESUMING",
185 	"PAUSE_TIMEOUT",
186 	"RESUME_TIMEOUT"
187 };
188 
189 static void
190 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
191 
msg_type_str(unsigned int msg_type)192 static const char *msg_type_str(unsigned int msg_type)
193 {
194 	switch (msg_type) {
195 	case VCHIQ_MSG_PADDING:			return "PADDING";
196 	case VCHIQ_MSG_CONNECT:			return "CONNECT";
197 	case VCHIQ_MSG_OPEN:			return "OPEN";
198 	case VCHIQ_MSG_OPENACK:			return "OPENACK";
199 	case VCHIQ_MSG_CLOSE:			return "CLOSE";
200 	case VCHIQ_MSG_DATA:			return "DATA";
201 	case VCHIQ_MSG_BULK_RX:			return "BULK_RX";
202 	case VCHIQ_MSG_BULK_TX:			return "BULK_TX";
203 	case VCHIQ_MSG_BULK_RX_DONE:		return "BULK_RX_DONE";
204 	case VCHIQ_MSG_BULK_TX_DONE:		return "BULK_TX_DONE";
205 	case VCHIQ_MSG_PAUSE:			return "PAUSE";
206 	case VCHIQ_MSG_RESUME:			return "RESUME";
207 	case VCHIQ_MSG_REMOTE_USE:		return "REMOTE_USE";
208 	case VCHIQ_MSG_REMOTE_RELEASE:		return "REMOTE_RELEASE";
209 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:	return "REMOTE_USE_ACTIVE";
210 	}
211 	return "???";
212 }
213 
214 static inline void
set_service_state(struct vchiq_service * service,int newstate)215 set_service_state(struct vchiq_service *service, int newstate)
216 {
217 	dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n",
218 		service->state->id, service->localport,
219 		srvstate_names[service->srvstate],
220 		srvstate_names[newstate]);
221 	service->srvstate = newstate;
222 }
223 
handle_to_service(struct vchiq_instance * instance,unsigned int handle)224 struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle)
225 {
226 	int idx = handle & (VCHIQ_MAX_SERVICES - 1);
227 
228 	return rcu_dereference(instance->state->services[idx]);
229 }
230 
231 struct vchiq_service *
find_service_by_handle(struct vchiq_instance * instance,unsigned int handle)232 find_service_by_handle(struct vchiq_instance *instance, unsigned int handle)
233 {
234 	struct vchiq_service *service;
235 
236 	rcu_read_lock();
237 	service = handle_to_service(instance, handle);
238 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
239 	    service->handle == handle &&
240 	    kref_get_unless_zero(&service->ref_count)) {
241 		service = rcu_pointer_handoff(service);
242 		rcu_read_unlock();
243 		return service;
244 	}
245 	rcu_read_unlock();
246 	dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
247 	return NULL;
248 }
249 
250 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,unsigned int localport)251 find_service_by_port(struct vchiq_state *state, unsigned int localport)
252 {
253 	if (localport <= VCHIQ_PORT_MAX) {
254 		struct vchiq_service *service;
255 
256 		rcu_read_lock();
257 		service = rcu_dereference(state->services[localport]);
258 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
259 		    kref_get_unless_zero(&service->ref_count)) {
260 			service = rcu_pointer_handoff(service);
261 			rcu_read_unlock();
262 			return service;
263 		}
264 		rcu_read_unlock();
265 	}
266 	dev_dbg(state->dev, "core: Invalid port %u\n", localport);
267 	return NULL;
268 }
269 
270 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)271 find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
272 {
273 	struct vchiq_service *service;
274 
275 	rcu_read_lock();
276 	service = handle_to_service(instance, handle);
277 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
278 	    service->handle == handle &&
279 	    service->instance == instance &&
280 	    kref_get_unless_zero(&service->ref_count)) {
281 		service = rcu_pointer_handoff(service);
282 		rcu_read_unlock();
283 		return service;
284 	}
285 	rcu_read_unlock();
286 	dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
287 	return NULL;
288 }
289 
290 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)291 find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
292 {
293 	struct vchiq_service *service;
294 
295 	rcu_read_lock();
296 	service = handle_to_service(instance, handle);
297 	if (service &&
298 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
299 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
300 	    service->handle == handle &&
301 	    service->instance == instance &&
302 	    kref_get_unless_zero(&service->ref_count)) {
303 		service = rcu_pointer_handoff(service);
304 		rcu_read_unlock();
305 		return service;
306 	}
307 	rcu_read_unlock();
308 	dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle);
309 	return service;
310 }
311 
312 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)313 __next_service_by_instance(struct vchiq_state *state,
314 			   struct vchiq_instance *instance,
315 			   int *pidx)
316 {
317 	struct vchiq_service *service = NULL;
318 	int idx = *pidx;
319 
320 	while (idx < state->unused_service) {
321 		struct vchiq_service *srv;
322 
323 		srv = rcu_dereference(state->services[idx]);
324 		idx++;
325 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
326 		    srv->instance == instance) {
327 			service = srv;
328 			break;
329 		}
330 	}
331 
332 	*pidx = idx;
333 	return service;
334 }
335 
336 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)337 next_service_by_instance(struct vchiq_state *state,
338 			 struct vchiq_instance *instance,
339 			 int *pidx)
340 {
341 	struct vchiq_service *service;
342 
343 	rcu_read_lock();
344 	while (1) {
345 		service = __next_service_by_instance(state, instance, pidx);
346 		if (!service)
347 			break;
348 		if (kref_get_unless_zero(&service->ref_count)) {
349 			service = rcu_pointer_handoff(service);
350 			break;
351 		}
352 	}
353 	rcu_read_unlock();
354 	return service;
355 }
356 
357 void
vchiq_service_get(struct vchiq_service * service)358 vchiq_service_get(struct vchiq_service *service)
359 {
360 	if (!service) {
361 		WARN(1, "%s service is NULL\n", __func__);
362 		return;
363 	}
364 	kref_get(&service->ref_count);
365 }
366 
service_release(struct kref * kref)367 static void service_release(struct kref *kref)
368 {
369 	struct vchiq_service *service =
370 		container_of(kref, struct vchiq_service, ref_count);
371 	struct vchiq_state *state = service->state;
372 
373 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
374 	rcu_assign_pointer(state->services[service->localport], NULL);
375 	if (service->userdata_term)
376 		service->userdata_term(service->base.userdata);
377 	kfree_rcu(service, rcu);
378 }
379 
380 void
vchiq_service_put(struct vchiq_service * service)381 vchiq_service_put(struct vchiq_service *service)
382 {
383 	if (!service) {
384 		WARN(1, "%s: service is NULL\n", __func__);
385 		return;
386 	}
387 	kref_put(&service->ref_count, service_release);
388 }
389 
390 int
vchiq_get_client_id(struct vchiq_instance * instance,unsigned int handle)391 vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle)
392 {
393 	struct vchiq_service *service;
394 	int id;
395 
396 	rcu_read_lock();
397 	service = handle_to_service(instance, handle);
398 	id = service ? service->client_id : 0;
399 	rcu_read_unlock();
400 	return id;
401 }
402 
403 void *
vchiq_get_service_userdata(struct vchiq_instance * instance,unsigned int handle)404 vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle)
405 {
406 	void *userdata;
407 	struct vchiq_service *service;
408 
409 	rcu_read_lock();
410 	service = handle_to_service(instance, handle);
411 	userdata = service ? service->base.userdata : NULL;
412 	rcu_read_unlock();
413 	return userdata;
414 }
415 EXPORT_SYMBOL(vchiq_get_service_userdata);
416 
417 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)418 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
419 {
420 	struct vchiq_state *state = service->state;
421 	struct vchiq_service_quota *quota;
422 
423 	service->closing = 1;
424 
425 	/* Synchronise with other threads. */
426 	mutex_lock(&state->recycle_mutex);
427 	mutex_unlock(&state->recycle_mutex);
428 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
429 		/*
430 		 * If we're pausing then the slot_mutex is held until resume
431 		 * by the slot handler.  Therefore don't try to acquire this
432 		 * mutex if we're the slot handler and in the pause sent state.
433 		 * We don't need to in this case anyway.
434 		 */
435 		mutex_lock(&state->slot_mutex);
436 		mutex_unlock(&state->slot_mutex);
437 	}
438 
439 	/* Unblock any sending thread. */
440 	quota = &state->service_quotas[service->localport];
441 	complete(&quota->quota_event);
442 }
443 
444 static void
mark_service_closing(struct vchiq_service * service)445 mark_service_closing(struct vchiq_service *service)
446 {
447 	mark_service_closing_internal(service, 0);
448 }
449 
450 static inline int
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)451 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
452 		      struct vchiq_header *header, void *bulk_userdata)
453 {
454 	int status;
455 
456 	dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %pK, %pK)\n",
457 		service->state->id, service->localport, reason_names[reason],
458 		header, bulk_userdata);
459 	status = service->base.callback(service->instance, reason, header, service->handle,
460 					bulk_userdata);
461 	if (status && (status != -EAGAIN)) {
462 		dev_warn(service->state->dev,
463 			 "core: %d: ignoring ERROR from callback to service %x\n",
464 			 service->state->id, service->handle);
465 		status = 0;
466 	}
467 
468 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
469 		vchiq_release_message(service->instance, service->handle, header);
470 
471 	return status;
472 }
473 
474 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)475 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
476 {
477 	enum vchiq_connstate oldstate = state->conn_state;
478 
479 	dev_dbg(state->dev, "core: %d: %s->%s\n",
480 		state->id, conn_state_names[oldstate], conn_state_names[newstate]);
481 	state->conn_state = newstate;
482 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
483 }
484 
485 /* This initialises a single remote_event, and the associated wait_queue. */
486 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)487 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
488 {
489 	event->armed = 0;
490 	/*
491 	 * Don't clear the 'fired' flag because it may already have been set
492 	 * by the other side.
493 	 */
494 	init_waitqueue_head(wq);
495 }
496 
497 /*
498  * All the event waiting routines in VCHIQ used a custom semaphore
499  * implementation that filtered most signals. This achieved a behaviour similar
500  * to the "killable" family of functions. While cleaning up this code all the
501  * routines where switched to the "interruptible" family of functions, as the
502  * former was deemed unjustified and the use "killable" set all VCHIQ's
503  * threads in D state.
504  *
505  * Returns: 0 on success, a negative error code on failure
506  */
507 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)508 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
509 {
510 	int ret = 0;
511 
512 	if (!event->fired) {
513 		event->armed = 1;
514 		dsb(sy);
515 		ret = wait_event_interruptible(*wq, event->fired);
516 		if (ret) {
517 			event->armed = 0;
518 			return ret;
519 		}
520 		event->armed = 0;
521 		/* Ensure that the peer sees that we are not waiting (armed == 0). */
522 		wmb();
523 	}
524 
525 	event->fired = 0;
526 	return ret;
527 }
528 
529 /*
530  * Acknowledge that the event has been signalled, and wake any waiters. Usually
531  * called as a result of the doorbell being rung.
532  */
533 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)534 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
535 {
536 	event->fired = 1;
537 	event->armed = 0;
538 	wake_up_all(wq);
539 }
540 
541 /* Check if a single event has been signalled, waking the waiters if it has. */
542 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)543 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
544 {
545 	if (event->fired && event->armed)
546 		remote_event_signal_local(wq, event);
547 }
548 
549 /*
550  * VCHIQ used a small, fixed number of remote events. It is simplest to
551  * enumerate them here for polling.
552  */
553 void
remote_event_pollall(struct vchiq_state * state)554 remote_event_pollall(struct vchiq_state *state)
555 {
556 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
557 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
558 	remote_event_poll(&state->trigger_event, &state->local->trigger);
559 	remote_event_poll(&state->recycle_event, &state->local->recycle);
560 }
561 
562 /*
563  * Round up message sizes so that any space at the end of a slot is always big
564  * enough for a header. This relies on header size being a power of two, which
565  * has been verified earlier by a static assertion.
566  */
567 
568 static inline size_t
calc_stride(size_t size)569 calc_stride(size_t size)
570 {
571 	/* Allow room for the header */
572 	size += sizeof(struct vchiq_header);
573 
574 	/* Round up */
575 	return (size + sizeof(struct vchiq_header) - 1) &
576 		~(sizeof(struct vchiq_header) - 1);
577 }
578 
579 /* Called by the slot handler thread */
580 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)581 get_listening_service(struct vchiq_state *state, int fourcc)
582 {
583 	int i;
584 
585 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
586 
587 	rcu_read_lock();
588 	for (i = 0; i < state->unused_service; i++) {
589 		struct vchiq_service *service;
590 
591 		service = rcu_dereference(state->services[i]);
592 		if (service &&
593 		    service->public_fourcc == fourcc &&
594 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
595 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
596 		      service->remoteport == VCHIQ_PORT_FREE)) &&
597 		    kref_get_unless_zero(&service->ref_count)) {
598 			service = rcu_pointer_handoff(service);
599 			rcu_read_unlock();
600 			return service;
601 		}
602 	}
603 	rcu_read_unlock();
604 	return NULL;
605 }
606 
607 /* Called by the slot handler thread */
608 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)609 get_connected_service(struct vchiq_state *state, unsigned int port)
610 {
611 	int i;
612 
613 	rcu_read_lock();
614 	for (i = 0; i < state->unused_service; i++) {
615 		struct vchiq_service *service =
616 			rcu_dereference(state->services[i]);
617 
618 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
619 		    service->remoteport == port &&
620 		    kref_get_unless_zero(&service->ref_count)) {
621 			service = rcu_pointer_handoff(service);
622 			rcu_read_unlock();
623 			return service;
624 		}
625 	}
626 	rcu_read_unlock();
627 	return NULL;
628 }
629 
630 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)631 request_poll(struct vchiq_state *state, struct vchiq_service *service,
632 	     int poll_type)
633 {
634 	u32 value;
635 	int index;
636 
637 	if (!service)
638 		goto skip_service;
639 
640 	do {
641 		value = atomic_read(&service->poll_flags);
642 	} while (atomic_cmpxchg(&service->poll_flags, value,
643 		 value | BIT(poll_type)) != value);
644 
645 	index = BITSET_WORD(service->localport);
646 	do {
647 		value = atomic_read(&state->poll_services[index]);
648 	} while (atomic_cmpxchg(&state->poll_services[index],
649 		 value, value | BIT(service->localport & 0x1f)) != value);
650 
651 skip_service:
652 	state->poll_needed = 1;
653 	/* Ensure the slot handler thread sees the poll_needed flag. */
654 	wmb();
655 
656 	/* ... and ensure the slot handler runs. */
657 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
658 }
659 
660 /*
661  * Called from queue_message, by the slot handler and application threads,
662  * with slot_mutex held
663  */
664 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)665 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
666 {
667 	struct vchiq_shared_state *local = state->local;
668 	int tx_pos = state->local_tx_pos;
669 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
670 
671 	if (space > slot_space) {
672 		struct vchiq_header *header;
673 		/* Fill the remaining space with padding */
674 		WARN_ON(!state->tx_data);
675 		header = (struct vchiq_header *)
676 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
677 		header->msgid = VCHIQ_MSGID_PADDING;
678 		header->size = slot_space - sizeof(struct vchiq_header);
679 
680 		tx_pos += slot_space;
681 	}
682 
683 	/* If necessary, get the next slot. */
684 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
685 		int slot_index;
686 
687 		/* If there is no free slot... */
688 
689 		if (!try_wait_for_completion(&state->slot_available_event)) {
690 			/* ...wait for one. */
691 
692 			VCHIQ_STATS_INC(state, slot_stalls);
693 
694 			/* But first, flush through the last slot. */
695 			state->local_tx_pos = tx_pos;
696 			local->tx_pos = tx_pos;
697 			remote_event_signal(state, &state->remote->trigger);
698 
699 			if (!is_blocking ||
700 			    (wait_for_completion_interruptible(&state->slot_available_event)))
701 				return NULL; /* No space available */
702 		}
703 
704 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
705 			complete(&state->slot_available_event);
706 			dev_warn(state->dev, "%s: invalid tx_pos: %d\n",
707 				 __func__, tx_pos);
708 			return NULL;
709 		}
710 
711 		slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
712 		state->tx_data =
713 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
714 	}
715 
716 	state->local_tx_pos = tx_pos + space;
717 
718 	return (struct vchiq_header *)(state->tx_data +
719 						(tx_pos & VCHIQ_SLOT_MASK));
720 }
721 
722 static void
process_free_data_message(struct vchiq_state * state,u32 * service_found,struct vchiq_header * header)723 process_free_data_message(struct vchiq_state *state, u32 *service_found,
724 			  struct vchiq_header *header)
725 {
726 	int msgid = header->msgid;
727 	int port = VCHIQ_MSG_SRCPORT(msgid);
728 	struct vchiq_service_quota *quota = &state->service_quotas[port];
729 	int count;
730 
731 	spin_lock(&state->quota_spinlock);
732 	count = quota->message_use_count;
733 	if (count > 0)
734 		quota->message_use_count = count - 1;
735 	spin_unlock(&state->quota_spinlock);
736 
737 	if (count == quota->message_quota) {
738 		/*
739 		 * Signal the service that it
740 		 * has dropped below its quota
741 		 */
742 		complete(&quota->quota_event);
743 	} else if (count == 0) {
744 		dev_err(state->dev,
745 			"core: service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
746 			port, quota->message_use_count, header, msgid,
747 			header->msgid, header->size);
748 		WARN(1, "invalid message use count\n");
749 	}
750 	if (!BITSET_IS_SET(service_found, port)) {
751 		/* Set the found bit for this service */
752 		BITSET_SET(service_found, port);
753 
754 		spin_lock(&state->quota_spinlock);
755 		count = quota->slot_use_count;
756 		if (count > 0)
757 			quota->slot_use_count = count - 1;
758 		spin_unlock(&state->quota_spinlock);
759 
760 		if (count > 0) {
761 			/*
762 			 * Signal the service in case
763 			 * it has dropped below its quota
764 			 */
765 			complete(&quota->quota_event);
766 			dev_dbg(state->dev, "core: %d: pfq:%d %x@%pK - slot_use->%d\n",
767 				state->id, port, header->size, header, count - 1);
768 		} else {
769 			dev_err(state->dev,
770 				"core: service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)\n",
771 				port, count, header, msgid, header->msgid, header->size);
772 			WARN(1, "bad slot use count\n");
773 		}
774 	}
775 }
776 
777 /* Called by the recycle thread. */
778 static void
process_free_queue(struct vchiq_state * state,u32 * service_found,size_t length)779 process_free_queue(struct vchiq_state *state, u32 *service_found,
780 		   size_t length)
781 {
782 	struct vchiq_shared_state *local = state->local;
783 	int slot_queue_available;
784 
785 	/*
786 	 * Find slots which have been freed by the other side, and return them
787 	 * to the available queue.
788 	 */
789 	slot_queue_available = state->slot_queue_available;
790 
791 	/*
792 	 * Use a memory barrier to ensure that any state that may have been
793 	 * modified by another thread is not masked by stale prefetched
794 	 * values.
795 	 */
796 	mb();
797 
798 	while (slot_queue_available != local->slot_queue_recycle) {
799 		unsigned int pos;
800 		int slot_index = local->slot_queue[slot_queue_available &
801 			VCHIQ_SLOT_QUEUE_MASK];
802 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
803 		int data_found = 0;
804 
805 		slot_queue_available++;
806 		/*
807 		 * Beware of the address dependency - data is calculated
808 		 * using an index written by the other side.
809 		 */
810 		rmb();
811 
812 		dev_dbg(state->dev, "core: %d: pfq %d=%pK %x %x\n",
813 			state->id, slot_index, data, local->slot_queue_recycle,
814 			slot_queue_available);
815 
816 		/* Initialise the bitmask for services which have used this slot */
817 		memset(service_found, 0, length);
818 
819 		pos = 0;
820 
821 		while (pos < VCHIQ_SLOT_SIZE) {
822 			struct vchiq_header *header =
823 				(struct vchiq_header *)(data + pos);
824 			int msgid = header->msgid;
825 
826 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
827 				process_free_data_message(state, service_found,
828 							  header);
829 				data_found = 1;
830 			}
831 
832 			pos += calc_stride(header->size);
833 			if (pos > VCHIQ_SLOT_SIZE) {
834 				dev_err(state->dev,
835 					"core: pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
836 					pos, header, msgid, header->msgid, header->size);
837 				WARN(1, "invalid slot position\n");
838 			}
839 		}
840 
841 		if (data_found) {
842 			int count;
843 
844 			spin_lock(&state->quota_spinlock);
845 			count = state->data_use_count;
846 			if (count > 0)
847 				state->data_use_count = count - 1;
848 			spin_unlock(&state->quota_spinlock);
849 			if (count == state->data_quota)
850 				complete(&state->data_quota_event);
851 		}
852 
853 		/*
854 		 * Don't allow the slot to be reused until we are no
855 		 * longer interested in it.
856 		 */
857 		mb();
858 
859 		state->slot_queue_available = slot_queue_available;
860 		complete(&state->slot_available_event);
861 	}
862 }
863 
864 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)865 memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
866 {
867 	memcpy(dest + offset, context + offset, maxsize);
868 	return maxsize;
869 }
870 
871 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)872 copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
873 					   size_t maxsize),
874 	void *context,
875 	void *dest,
876 	size_t size)
877 {
878 	size_t pos = 0;
879 
880 	while (pos < size) {
881 		ssize_t callback_result;
882 		size_t max_bytes = size - pos;
883 
884 		callback_result = copy_callback(context, dest + pos, pos,
885 						max_bytes);
886 
887 		if (callback_result < 0)
888 			return callback_result;
889 
890 		if (!callback_result)
891 			return -EIO;
892 
893 		if (callback_result > max_bytes)
894 			return -EIO;
895 
896 		pos += callback_result;
897 	}
898 
899 	return size;
900 }
901 
902 /* Called by the slot handler and application threads */
903 static int
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)904 queue_message(struct vchiq_state *state, struct vchiq_service *service,
905 	      int msgid,
906 	      ssize_t (*copy_callback)(void *context, void *dest,
907 				       size_t offset, size_t maxsize),
908 	      void *context, size_t size, int flags)
909 {
910 	struct vchiq_shared_state *local;
911 	struct vchiq_service_quota *quota = NULL;
912 	struct vchiq_header *header;
913 	int type = VCHIQ_MSG_TYPE(msgid);
914 
915 	size_t stride;
916 
917 	local = state->local;
918 
919 	stride = calc_stride(size);
920 
921 	WARN_ON(stride > VCHIQ_SLOT_SIZE);
922 
923 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
924 	    mutex_lock_killable(&state->slot_mutex))
925 		return -EAGAIN;
926 
927 	if (type == VCHIQ_MSG_DATA) {
928 		int tx_end_index;
929 
930 		if (!service) {
931 			WARN(1, "%s: service is NULL\n", __func__);
932 			mutex_unlock(&state->slot_mutex);
933 			return -EINVAL;
934 		}
935 
936 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
937 				 QMFLAGS_NO_MUTEX_UNLOCK));
938 
939 		if (service->closing) {
940 			/* The service has been closed */
941 			mutex_unlock(&state->slot_mutex);
942 			return -EHOSTDOWN;
943 		}
944 
945 		quota = &state->service_quotas[service->localport];
946 
947 		spin_lock(&state->quota_spinlock);
948 
949 		/*
950 		 * Ensure this service doesn't use more than its quota of
951 		 * messages or slots
952 		 */
953 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
954 
955 		/*
956 		 * Ensure data messages don't use more than their quota of
957 		 * slots
958 		 */
959 		while ((tx_end_index != state->previous_data_index) &&
960 		       (state->data_use_count == state->data_quota)) {
961 			VCHIQ_STATS_INC(state, data_stalls);
962 			spin_unlock(&state->quota_spinlock);
963 			mutex_unlock(&state->slot_mutex);
964 
965 			if (wait_for_completion_interruptible(&state->data_quota_event))
966 				return -EAGAIN;
967 
968 			mutex_lock(&state->slot_mutex);
969 			spin_lock(&state->quota_spinlock);
970 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
971 			if ((tx_end_index == state->previous_data_index) ||
972 			    (state->data_use_count < state->data_quota)) {
973 				/* Pass the signal on to other waiters */
974 				complete(&state->data_quota_event);
975 				break;
976 			}
977 		}
978 
979 		while ((quota->message_use_count == quota->message_quota) ||
980 		       ((tx_end_index != quota->previous_tx_index) &&
981 			(quota->slot_use_count == quota->slot_quota))) {
982 			spin_unlock(&state->quota_spinlock);
983 			dev_dbg(state->dev,
984 				"core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n",
985 				state->id, service->localport, msg_type_str(type), size,
986 				quota->message_use_count, quota->slot_use_count);
987 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
988 			mutex_unlock(&state->slot_mutex);
989 			if (wait_for_completion_interruptible(&quota->quota_event))
990 				return -EAGAIN;
991 			if (service->closing)
992 				return -EHOSTDOWN;
993 			if (mutex_lock_killable(&state->slot_mutex))
994 				return -EAGAIN;
995 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
996 				/* The service has been closed */
997 				mutex_unlock(&state->slot_mutex);
998 				return -EHOSTDOWN;
999 			}
1000 			spin_lock(&state->quota_spinlock);
1001 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
1002 		}
1003 
1004 		spin_unlock(&state->quota_spinlock);
1005 	}
1006 
1007 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1008 
1009 	if (!header) {
1010 		if (service)
1011 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1012 		/*
1013 		 * In the event of a failure, return the mutex to the
1014 		 * state it was in
1015 		 */
1016 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1017 			mutex_unlock(&state->slot_mutex);
1018 		return -EAGAIN;
1019 	}
1020 
1021 	if (type == VCHIQ_MSG_DATA) {
1022 		ssize_t callback_result;
1023 		int tx_end_index;
1024 		int slot_use_count;
1025 
1026 		dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
1027 			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1028 			VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1029 
1030 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1031 				 QMFLAGS_NO_MUTEX_UNLOCK));
1032 
1033 		callback_result =
1034 			copy_message_data(copy_callback, context,
1035 					  header->data, size);
1036 
1037 		if (callback_result < 0) {
1038 			mutex_unlock(&state->slot_mutex);
1039 			VCHIQ_SERVICE_STATS_INC(service, error_count);
1040 			return -EINVAL;
1041 		}
1042 
1043 		vchiq_log_dump_mem(state->dev, "Sent", 0,
1044 				   header->data,
1045 				   min_t(size_t, 16, callback_result));
1046 
1047 		spin_lock(&state->quota_spinlock);
1048 		quota->message_use_count++;
1049 
1050 		tx_end_index =
1051 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1052 
1053 		/*
1054 		 * If this transmission can't fit in the last slot used by any
1055 		 * service, the data_use_count must be increased.
1056 		 */
1057 		if (tx_end_index != state->previous_data_index) {
1058 			state->previous_data_index = tx_end_index;
1059 			state->data_use_count++;
1060 		}
1061 
1062 		/*
1063 		 * If this isn't the same slot last used by this service,
1064 		 * the service's slot_use_count must be increased.
1065 		 */
1066 		if (tx_end_index != quota->previous_tx_index) {
1067 			quota->previous_tx_index = tx_end_index;
1068 			slot_use_count = ++quota->slot_use_count;
1069 		} else {
1070 			slot_use_count = 0;
1071 		}
1072 
1073 		spin_unlock(&state->quota_spinlock);
1074 
1075 		if (slot_use_count)
1076 			dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n",
1077 				state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1078 				size, slot_use_count, header);
1079 
1080 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1081 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1082 	} else {
1083 		dev_dbg(state->dev, "core: %d: qm %s@%pK,%zx (%d->%d)\n",
1084 			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1085 			VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1086 		if (size != 0) {
1087 			/*
1088 			 * It is assumed for now that this code path
1089 			 * only happens from calls inside this file.
1090 			 *
1091 			 * External callers are through the vchiq_queue_message
1092 			 * path which always sets the type to be VCHIQ_MSG_DATA
1093 			 *
1094 			 * At first glance this appears to be correct but
1095 			 * more review is needed.
1096 			 */
1097 			copy_message_data(copy_callback, context,
1098 					  header->data, size);
1099 		}
1100 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1101 	}
1102 
1103 	header->msgid = msgid;
1104 	header->size = size;
1105 
1106 	{
1107 		int svc_fourcc;
1108 
1109 		svc_fourcc = service
1110 			? service->base.fourcc
1111 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1112 
1113 		dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n",
1114 			msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1115 			&svc_fourcc, VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size);
1116 	}
1117 
1118 	/* Make sure the new header is visible to the peer. */
1119 	wmb();
1120 
1121 	/* Make the new tx_pos visible to the peer. */
1122 	local->tx_pos = state->local_tx_pos;
1123 	wmb();
1124 
1125 	if (service && (type == VCHIQ_MSG_CLOSE))
1126 		set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1127 
1128 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1129 		mutex_unlock(&state->slot_mutex);
1130 
1131 	remote_event_signal(state, &state->remote->trigger);
1132 
1133 	return 0;
1134 }
1135 
1136 /* Called by the slot handler and application threads */
1137 static int
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size)1138 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1139 		   int msgid,
1140 		   ssize_t (*copy_callback)(void *context, void *dest,
1141 					    size_t offset, size_t maxsize),
1142 		   void *context, int size)
1143 {
1144 	struct vchiq_shared_state *local;
1145 	struct vchiq_header *header;
1146 	ssize_t callback_result;
1147 	int svc_fourcc;
1148 	int ret;
1149 
1150 	local = state->local;
1151 
1152 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1153 	    mutex_lock_killable(&state->sync_mutex))
1154 		return -EAGAIN;
1155 
1156 	ret = remote_event_wait(&state->sync_release_event, &local->sync_release);
1157 	if (ret)
1158 		return ret;
1159 
1160 	/* Ensure that reads don't overtake the remote_event_wait. */
1161 	rmb();
1162 
1163 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1164 		local->slot_sync);
1165 
1166 	{
1167 		int oldmsgid = header->msgid;
1168 
1169 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1170 			dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n",
1171 				state->id, oldmsgid);
1172 	}
1173 
1174 	dev_dbg(state->dev, "sync: %d: qms %s@%pK,%x (%d->%d)\n",
1175 		state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
1176 		VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
1177 
1178 	callback_result =
1179 		copy_message_data(copy_callback, context,
1180 				  header->data, size);
1181 
1182 	if (callback_result < 0) {
1183 		mutex_unlock(&state->slot_mutex);
1184 		VCHIQ_SERVICE_STATS_INC(service, error_count);
1185 		return -EINVAL;
1186 	}
1187 
1188 	if (service) {
1189 		vchiq_log_dump_mem(state->dev, "Sent", 0,
1190 				   header->data,
1191 				   min_t(size_t, 16, callback_result));
1192 
1193 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1194 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1195 	} else {
1196 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1197 	}
1198 
1199 	header->size = size;
1200 	header->msgid = msgid;
1201 
1202 	svc_fourcc = service ? service->base.fourcc
1203 			     : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1204 
1205 	dev_dbg(state->dev,
1206 		"sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n",
1207 		msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
1208 		&svc_fourcc, VCHIQ_MSG_SRCPORT(msgid),
1209 		VCHIQ_MSG_DSTPORT(msgid), size);
1210 
1211 	remote_event_signal(state, &state->remote->sync_trigger);
1212 
1213 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1214 		mutex_unlock(&state->sync_mutex);
1215 
1216 	return 0;
1217 }
1218 
1219 static inline void
claim_slot(struct vchiq_slot_info * slot)1220 claim_slot(struct vchiq_slot_info *slot)
1221 {
1222 	slot->use_count++;
1223 }
1224 
1225 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1226 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1227 	     struct vchiq_header *header, struct vchiq_service *service)
1228 {
1229 	mutex_lock(&state->recycle_mutex);
1230 
1231 	if (header) {
1232 		int msgid = header->msgid;
1233 
1234 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
1235 			mutex_unlock(&state->recycle_mutex);
1236 			return;
1237 		}
1238 
1239 		/* Rewrite the message header to prevent a double release */
1240 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1241 	}
1242 
1243 	slot_info->release_count++;
1244 
1245 	if (slot_info->release_count == slot_info->use_count) {
1246 		int slot_queue_recycle;
1247 		/* Add to the freed queue */
1248 
1249 		/*
1250 		 * A read barrier is necessary here to prevent speculative
1251 		 * fetches of remote->slot_queue_recycle from overtaking the
1252 		 * mutex.
1253 		 */
1254 		rmb();
1255 
1256 		slot_queue_recycle = state->remote->slot_queue_recycle;
1257 		state->remote->slot_queue[slot_queue_recycle &
1258 			VCHIQ_SLOT_QUEUE_MASK] =
1259 			SLOT_INDEX_FROM_INFO(state, slot_info);
1260 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1261 		dev_dbg(state->dev, "core: %d: %d - recycle->%x\n",
1262 			state->id, SLOT_INDEX_FROM_INFO(state, slot_info),
1263 			state->remote->slot_queue_recycle);
1264 
1265 		/*
1266 		 * A write barrier is necessary, but remote_event_signal
1267 		 * contains one.
1268 		 */
1269 		remote_event_signal(state, &state->remote->recycle);
1270 	}
1271 
1272 	mutex_unlock(&state->recycle_mutex);
1273 }
1274 
1275 static inline enum vchiq_reason
get_bulk_reason(struct vchiq_bulk * bulk)1276 get_bulk_reason(struct vchiq_bulk *bulk)
1277 {
1278 	if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1279 		if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1280 			return VCHIQ_BULK_TRANSMIT_ABORTED;
1281 
1282 		return VCHIQ_BULK_TRANSMIT_DONE;
1283 	}
1284 
1285 	if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1286 		return VCHIQ_BULK_RECEIVE_ABORTED;
1287 
1288 	return VCHIQ_BULK_RECEIVE_DONE;
1289 }
1290 
1291 /* Called by the slot handler - don't hold the bulk mutex */
1292 static int
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1293 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1294 	     int retry_poll)
1295 {
1296 	int status = 0;
1297 
1298 	dev_dbg(service->state->dev,
1299 		"core: %d: nb:%d %cx - p=%x rn=%x r=%x\n",
1300 		service->state->id, service->localport,
1301 		(queue == &service->bulk_tx) ? 't' : 'r',
1302 		queue->process, queue->remote_notify, queue->remove);
1303 
1304 	queue->remote_notify = queue->process;
1305 
1306 	while (queue->remove != queue->remote_notify) {
1307 		struct vchiq_bulk *bulk =
1308 			&queue->bulks[BULK_INDEX(queue->remove)];
1309 
1310 		/*
1311 		 * Only generate callbacks for non-dummy bulk
1312 		 * requests, and non-terminated services
1313 		 */
1314 		if (bulk->data && service->instance) {
1315 			if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1316 				if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1317 					VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
1318 					VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
1319 								bulk->actual);
1320 				} else {
1321 					VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
1322 					VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
1323 								bulk->actual);
1324 				}
1325 			} else {
1326 				VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
1327 			}
1328 			if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1329 				struct bulk_waiter *waiter;
1330 
1331 				spin_lock(&service->state->bulk_waiter_spinlock);
1332 				waiter = bulk->userdata;
1333 				if (waiter) {
1334 					waiter->actual = bulk->actual;
1335 					complete(&waiter->event);
1336 				}
1337 				spin_unlock(&service->state->bulk_waiter_spinlock);
1338 			} else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1339 				enum vchiq_reason reason =
1340 						get_bulk_reason(bulk);
1341 				status = make_service_callback(service, reason,	NULL,
1342 							       bulk->userdata);
1343 				if (status == -EAGAIN)
1344 					break;
1345 			}
1346 		}
1347 
1348 		queue->remove++;
1349 		complete(&service->bulk_remove_event);
1350 	}
1351 	if (!retry_poll)
1352 		status = 0;
1353 
1354 	if (status == -EAGAIN)
1355 		request_poll(service->state, service, (queue == &service->bulk_tx) ?
1356 			     VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1357 
1358 	return status;
1359 }
1360 
1361 static void
poll_services_of_group(struct vchiq_state * state,int group)1362 poll_services_of_group(struct vchiq_state *state, int group)
1363 {
1364 	u32 flags = atomic_xchg(&state->poll_services[group], 0);
1365 	int i;
1366 
1367 	for (i = 0; flags; i++) {
1368 		struct vchiq_service *service;
1369 		u32 service_flags;
1370 
1371 		if ((flags & BIT(i)) == 0)
1372 			continue;
1373 
1374 		service = find_service_by_port(state, (group << 5) + i);
1375 		flags &= ~BIT(i);
1376 
1377 		if (!service)
1378 			continue;
1379 
1380 		service_flags = atomic_xchg(&service->poll_flags, 0);
1381 		if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1382 			dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n",
1383 				state->id, service->localport, service->remoteport);
1384 
1385 			/*
1386 			 * Make it look like a client, because
1387 			 * it must be removed and not left in
1388 			 * the LISTENING state.
1389 			 */
1390 			service->public_fourcc = VCHIQ_FOURCC_INVALID;
1391 
1392 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1393 				request_poll(state, service, VCHIQ_POLL_REMOVE);
1394 		} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1395 			dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n",
1396 				state->id, service->localport, service->remoteport);
1397 			if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
1398 				request_poll(state, service, VCHIQ_POLL_TERMINATE);
1399 		}
1400 		if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1401 			notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1402 		if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1403 			notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1404 		vchiq_service_put(service);
1405 	}
1406 }
1407 
1408 /* Called by the slot handler thread */
1409 static void
poll_services(struct vchiq_state * state)1410 poll_services(struct vchiq_state *state)
1411 {
1412 	int group;
1413 
1414 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1415 		poll_services_of_group(state, group);
1416 }
1417 
1418 /* Called with the bulk_mutex held */
1419 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1420 abort_outstanding_bulks(struct vchiq_service *service,
1421 			struct vchiq_bulk_queue *queue)
1422 {
1423 	int is_tx = (queue == &service->bulk_tx);
1424 
1425 	dev_dbg(service->state->dev,
1426 		"core: %d: aob:%d %cx - li=%x ri=%x p=%x\n",
1427 		service->state->id, service->localport,
1428 		is_tx ? 't' : 'r', queue->local_insert,
1429 		queue->remote_insert, queue->process);
1430 
1431 	WARN_ON((int)(queue->local_insert - queue->process) < 0);
1432 	WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1433 
1434 	while ((queue->process != queue->local_insert) ||
1435 	       (queue->process != queue->remote_insert)) {
1436 		struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
1437 
1438 		if (queue->process == queue->remote_insert) {
1439 			/* fabricate a matching dummy bulk */
1440 			bulk->remote_data = NULL;
1441 			bulk->remote_size = 0;
1442 			queue->remote_insert++;
1443 		}
1444 
1445 		if (queue->process != queue->local_insert) {
1446 			vchiq_complete_bulk(service->instance, bulk);
1447 
1448 			dev_dbg(service->state->dev,
1449 				"core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n",
1450 				is_tx ? "Send Bulk to" : "Recv Bulk from",
1451 				&service->base.fourcc,
1452 				service->remoteport, bulk->size, bulk->remote_size);
1453 		} else {
1454 			/* fabricate a matching dummy bulk */
1455 			bulk->data = 0;
1456 			bulk->size = 0;
1457 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1458 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1459 				VCHIQ_BULK_RECEIVE;
1460 			queue->local_insert++;
1461 		}
1462 
1463 		queue->process++;
1464 	}
1465 }
1466 
1467 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1468 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1469 {
1470 	const struct vchiq_open_payload *payload;
1471 	struct vchiq_service *service = NULL;
1472 	int msgid, size;
1473 	unsigned int localport, remoteport, fourcc;
1474 	short version, version_min;
1475 
1476 	msgid = header->msgid;
1477 	size = header->size;
1478 	localport = VCHIQ_MSG_DSTPORT(msgid);
1479 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1480 	if (size < sizeof(struct vchiq_open_payload))
1481 		goto fail_open;
1482 
1483 	payload = (struct vchiq_open_payload *)header->data;
1484 	fourcc = payload->fourcc;
1485 	dev_dbg(state->dev, "core: %d: prs OPEN@%pK (%d->'%p4cc')\n",
1486 		state->id, header, localport, &fourcc);
1487 
1488 	service = get_listening_service(state, fourcc);
1489 	if (!service)
1490 		goto fail_open;
1491 
1492 	/* A matching service exists */
1493 	version = payload->version;
1494 	version_min = payload->version_min;
1495 
1496 	if ((service->version < version_min) || (version < service->version_min)) {
1497 		/* Version mismatch */
1498 		dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
1499 			state->id, service->localport, &fourcc,
1500 			service->version, service->version_min, version, version_min);
1501 		vchiq_service_put(service);
1502 		service = NULL;
1503 		goto fail_open;
1504 	}
1505 	service->peer_version = version;
1506 
1507 	if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1508 		struct vchiq_openack_payload ack_payload = {
1509 			service->version
1510 		};
1511 		int openack_id = MAKE_OPENACK(service->localport, remoteport);
1512 
1513 		if (state->version_common <
1514 		    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1515 			service->sync = 0;
1516 
1517 		/* Acknowledge the OPEN */
1518 		if (service->sync) {
1519 			if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
1520 					       &ack_payload, sizeof(ack_payload)) == -EAGAIN)
1521 				goto bail_not_ready;
1522 
1523 			/* The service is now open */
1524 			set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
1525 		} else {
1526 			if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
1527 					  &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
1528 				goto bail_not_ready;
1529 
1530 			/* The service is now open */
1531 			set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1532 		}
1533 	}
1534 
1535 	/* Success - the message has been dealt with */
1536 	vchiq_service_put(service);
1537 	return 1;
1538 
1539 fail_open:
1540 	/* No available service, or an invalid request - send a CLOSE */
1541 	if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1542 			  NULL, NULL, 0, 0) == -EAGAIN)
1543 		goto bail_not_ready;
1544 
1545 	return 1;
1546 
1547 bail_not_ready:
1548 	if (service)
1549 		vchiq_service_put(service);
1550 
1551 	return 0;
1552 }
1553 
1554 /**
1555  * parse_message() - parses a single message from the rx slot
1556  * @state:  vchiq state struct
1557  * @header: message header
1558  *
1559  * Context: Process context
1560  *
1561  * Return:
1562  * * >= 0     - size of the parsed message payload (without header)
1563  * * -EINVAL  - fatal error occurred, bail out is required
1564  */
1565 static int
parse_message(struct vchiq_state * state,struct vchiq_header * header)1566 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1567 {
1568 	struct vchiq_service *service = NULL;
1569 	unsigned int localport, remoteport;
1570 	int msgid, size, type, ret = -EINVAL;
1571 	int svc_fourcc;
1572 
1573 	DEBUG_INITIALISE(state->local);
1574 
1575 	DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1576 	msgid = header->msgid;
1577 	DEBUG_VALUE(PARSE_MSGID, msgid);
1578 	size = header->size;
1579 	type = VCHIQ_MSG_TYPE(msgid);
1580 	localport = VCHIQ_MSG_DSTPORT(msgid);
1581 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1582 
1583 	if (type != VCHIQ_MSG_DATA)
1584 		VCHIQ_STATS_INC(state, ctrl_rx_count);
1585 
1586 	switch (type) {
1587 	case VCHIQ_MSG_OPENACK:
1588 	case VCHIQ_MSG_CLOSE:
1589 	case VCHIQ_MSG_DATA:
1590 	case VCHIQ_MSG_BULK_RX:
1591 	case VCHIQ_MSG_BULK_TX:
1592 	case VCHIQ_MSG_BULK_RX_DONE:
1593 	case VCHIQ_MSG_BULK_TX_DONE:
1594 		service = find_service_by_port(state, localport);
1595 		if ((!service ||
1596 		     ((service->remoteport != remoteport) &&
1597 		      (service->remoteport != VCHIQ_PORT_FREE))) &&
1598 		    (localport == 0) &&
1599 		    (type == VCHIQ_MSG_CLOSE)) {
1600 			/*
1601 			 * This could be a CLOSE from a client which
1602 			 * hadn't yet received the OPENACK - look for
1603 			 * the connected service
1604 			 */
1605 			if (service)
1606 				vchiq_service_put(service);
1607 			service = get_connected_service(state, remoteport);
1608 			if (service)
1609 				dev_warn(state->dev,
1610 					 "core: %d: prs %s@%pK (%d->%d) - found connected service %d\n",
1611 					 state->id, msg_type_str(type), header,
1612 					 remoteport, localport, service->localport);
1613 		}
1614 
1615 		if (!service) {
1616 			dev_err(state->dev,
1617 				"core: %d: prs %s@%pK (%d->%d) - invalid/closed service %d\n",
1618 				state->id, msg_type_str(type), header, remoteport,
1619 				localport, localport);
1620 			goto skip_message;
1621 		}
1622 		break;
1623 	default:
1624 		break;
1625 	}
1626 
1627 	svc_fourcc = service ? service->base.fourcc
1628 			     : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1629 
1630 	dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n",
1631 		msg_type_str(type), type, &svc_fourcc, remoteport, localport, size);
1632 	if (size > 0)
1633 		vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
1634 
1635 	if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1636 	    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1637 		dev_err(state->dev, "core: header %pK (msgid %x) - size %x too big for slot\n",
1638 			header, (unsigned int)msgid, (unsigned int)size);
1639 		WARN(1, "oversized for slot\n");
1640 	}
1641 
1642 	switch (type) {
1643 	case VCHIQ_MSG_OPEN:
1644 		WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1645 		if (!parse_open(state, header))
1646 			goto bail_not_ready;
1647 		break;
1648 	case VCHIQ_MSG_OPENACK:
1649 		if (size >= sizeof(struct vchiq_openack_payload)) {
1650 			const struct vchiq_openack_payload *payload =
1651 				(struct vchiq_openack_payload *)
1652 				header->data;
1653 			service->peer_version = payload->version;
1654 		}
1655 		dev_dbg(state->dev,
1656 			"core: %d: prs OPENACK@%pK,%x (%d->%d) v:%d\n",
1657 			state->id, header, size, remoteport, localport,
1658 			service->peer_version);
1659 		if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1660 			service->remoteport = remoteport;
1661 			set_service_state(service, VCHIQ_SRVSTATE_OPEN);
1662 			complete(&service->remove_event);
1663 		} else {
1664 			dev_err(state->dev, "core: OPENACK received in state %s\n",
1665 				srvstate_names[service->srvstate]);
1666 		}
1667 		break;
1668 	case VCHIQ_MSG_CLOSE:
1669 		WARN_ON(size); /* There should be no data */
1670 
1671 		dev_dbg(state->dev, "core: %d: prs CLOSE@%pK (%d->%d)\n",
1672 			state->id, header, remoteport, localport);
1673 
1674 		mark_service_closing_internal(service, 1);
1675 
1676 		if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
1677 			goto bail_not_ready;
1678 
1679 		dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n",
1680 			&service->base.fourcc, service->localport, service->remoteport);
1681 		break;
1682 	case VCHIQ_MSG_DATA:
1683 		dev_dbg(state->dev, "core: %d: prs DATA@%pK,%x (%d->%d)\n",
1684 			state->id, header, size, remoteport, localport);
1685 
1686 		if ((service->remoteport == remoteport) &&
1687 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1688 			header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1689 			claim_slot(state->rx_info);
1690 			DEBUG_TRACE(PARSE_LINE);
1691 			if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
1692 						  NULL) == -EAGAIN) {
1693 				DEBUG_TRACE(PARSE_LINE);
1694 				goto bail_not_ready;
1695 			}
1696 			VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1697 			VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
1698 		} else {
1699 			VCHIQ_STATS_INC(state, error_count);
1700 		}
1701 		break;
1702 	case VCHIQ_MSG_CONNECT:
1703 		dev_dbg(state->dev, "core: %d: prs CONNECT@%pK\n",
1704 			state->id, header);
1705 		state->version_common =	((struct vchiq_slot_zero *)
1706 					 state->slot_data)->version;
1707 		complete(&state->connect);
1708 		break;
1709 	case VCHIQ_MSG_BULK_RX:
1710 	case VCHIQ_MSG_BULK_TX:
1711 		/*
1712 		 * We should never receive a bulk request from the
1713 		 * other side since we're not setup to perform as the
1714 		 * master.
1715 		 */
1716 		WARN_ON(1);
1717 		break;
1718 	case VCHIQ_MSG_BULK_RX_DONE:
1719 	case VCHIQ_MSG_BULK_TX_DONE:
1720 		if ((service->remoteport == remoteport) &&
1721 		    (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1722 			struct vchiq_bulk_queue *queue;
1723 			struct vchiq_bulk *bulk;
1724 
1725 			queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1726 				&service->bulk_rx : &service->bulk_tx;
1727 
1728 			DEBUG_TRACE(PARSE_LINE);
1729 			if (mutex_lock_killable(&service->bulk_mutex)) {
1730 				DEBUG_TRACE(PARSE_LINE);
1731 				goto bail_not_ready;
1732 			}
1733 			if ((int)(queue->remote_insert -
1734 				queue->local_insert) >= 0) {
1735 				dev_err(state->dev,
1736 					"core: %d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)\n",
1737 					state->id, msg_type_str(type), header, remoteport,
1738 					localport, queue->remote_insert, queue->local_insert);
1739 				mutex_unlock(&service->bulk_mutex);
1740 				break;
1741 			}
1742 			if (queue->process != queue->remote_insert) {
1743 				dev_err(state->dev, "%s: p %x != ri %x\n",
1744 					__func__, queue->process,
1745 					queue->remote_insert);
1746 				mutex_unlock(&service->bulk_mutex);
1747 				goto bail_not_ready;
1748 			}
1749 
1750 			bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
1751 			bulk->actual = *(int *)header->data;
1752 			queue->remote_insert++;
1753 
1754 			dev_dbg(state->dev, "core: %d: prs %s@%pK (%d->%d) %x@%pad\n",
1755 				state->id, msg_type_str(type), header, remoteport,
1756 				localport, bulk->actual, &bulk->data);
1757 
1758 			dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n",
1759 				state->id, localport,
1760 				(type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
1761 				queue->local_insert, queue->remote_insert, queue->process);
1762 
1763 			DEBUG_TRACE(PARSE_LINE);
1764 			WARN_ON(queue->process == queue->local_insert);
1765 			vchiq_complete_bulk(service->instance, bulk);
1766 			queue->process++;
1767 			mutex_unlock(&service->bulk_mutex);
1768 			DEBUG_TRACE(PARSE_LINE);
1769 			notify_bulks(service, queue, RETRY_POLL);
1770 			DEBUG_TRACE(PARSE_LINE);
1771 		}
1772 		break;
1773 	case VCHIQ_MSG_PADDING:
1774 		dev_dbg(state->dev, "core: %d: prs PADDING@%pK,%x\n",
1775 			state->id, header, size);
1776 		break;
1777 	case VCHIQ_MSG_PAUSE:
1778 		/* If initiated, signal the application thread */
1779 		dev_dbg(state->dev, "core: %d: prs PAUSE@%pK,%x\n",
1780 			state->id, header, size);
1781 		if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1782 			dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n",
1783 				state->id);
1784 			break;
1785 		}
1786 		if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1787 			/* Send a PAUSE in response */
1788 			if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1789 					  QMFLAGS_NO_MUTEX_UNLOCK) == -EAGAIN)
1790 				goto bail_not_ready;
1791 		}
1792 		/* At this point slot_mutex is held */
1793 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1794 		break;
1795 	case VCHIQ_MSG_RESUME:
1796 		dev_dbg(state->dev, "core: %d: prs RESUME@%pK,%x\n",
1797 			state->id, header, size);
1798 		/* Release the slot mutex */
1799 		mutex_unlock(&state->slot_mutex);
1800 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1801 		break;
1802 
1803 	case VCHIQ_MSG_REMOTE_USE:
1804 		vchiq_on_remote_use(state);
1805 		break;
1806 	case VCHIQ_MSG_REMOTE_RELEASE:
1807 		vchiq_on_remote_release(state);
1808 		break;
1809 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1810 		break;
1811 
1812 	default:
1813 		dev_err(state->dev, "core: %d: prs invalid msgid %x@%pK,%x\n",
1814 			state->id, msgid, header, size);
1815 		WARN(1, "invalid message\n");
1816 		break;
1817 	}
1818 
1819 skip_message:
1820 	ret = size;
1821 
1822 bail_not_ready:
1823 	if (service)
1824 		vchiq_service_put(service);
1825 
1826 	return ret;
1827 }
1828 
1829 /* Called by the slot handler thread */
1830 static void
parse_rx_slots(struct vchiq_state * state)1831 parse_rx_slots(struct vchiq_state *state)
1832 {
1833 	struct vchiq_shared_state *remote = state->remote;
1834 	int tx_pos;
1835 
1836 	DEBUG_INITIALISE(state->local);
1837 
1838 	tx_pos = remote->tx_pos;
1839 
1840 	while (state->rx_pos != tx_pos) {
1841 		struct vchiq_header *header;
1842 		int size;
1843 
1844 		DEBUG_TRACE(PARSE_LINE);
1845 		if (!state->rx_data) {
1846 			int rx_index;
1847 
1848 			WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1849 			rx_index = remote->slot_queue[
1850 				SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1851 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1852 				rx_index);
1853 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1854 
1855 			/*
1856 			 * Initialise use_count to one, and increment
1857 			 * release_count at the end of the slot to avoid
1858 			 * releasing the slot prematurely.
1859 			 */
1860 			state->rx_info->use_count = 1;
1861 			state->rx_info->release_count = 0;
1862 		}
1863 
1864 		header = (struct vchiq_header *)(state->rx_data +
1865 			(state->rx_pos & VCHIQ_SLOT_MASK));
1866 		size = parse_message(state, header);
1867 		if (size < 0)
1868 			return;
1869 
1870 		state->rx_pos += calc_stride(size);
1871 
1872 		DEBUG_TRACE(PARSE_LINE);
1873 		/*
1874 		 * Perform some housekeeping when the end of the slot is
1875 		 * reached.
1876 		 */
1877 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1878 			/* Remove the extra reference count. */
1879 			release_slot(state, state->rx_info, NULL, NULL);
1880 			state->rx_data = NULL;
1881 		}
1882 	}
1883 }
1884 
1885 /**
1886  * handle_poll() - handle service polling and other rare conditions
1887  * @state:  vchiq state struct
1888  *
1889  * Context: Process context
1890  *
1891  * Return:
1892  * * 0        - poll handled successful
1893  * * -EAGAIN  - retry later
1894  */
1895 static int
handle_poll(struct vchiq_state * state)1896 handle_poll(struct vchiq_state *state)
1897 {
1898 	switch (state->conn_state) {
1899 	case VCHIQ_CONNSTATE_CONNECTED:
1900 		/* Poll the services as requested */
1901 		poll_services(state);
1902 		break;
1903 
1904 	case VCHIQ_CONNSTATE_PAUSING:
1905 		if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
1906 				  QMFLAGS_NO_MUTEX_UNLOCK) != -EAGAIN) {
1907 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
1908 		} else {
1909 			/* Retry later */
1910 			return -EAGAIN;
1911 		}
1912 		break;
1913 
1914 	case VCHIQ_CONNSTATE_RESUMING:
1915 		if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
1916 				  QMFLAGS_NO_MUTEX_LOCK) != -EAGAIN) {
1917 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1918 		} else {
1919 			/*
1920 			 * This should really be impossible,
1921 			 * since the PAUSE should have flushed
1922 			 * through outstanding messages.
1923 			 */
1924 			dev_err(state->dev, "core: Failed to send RESUME message\n");
1925 		}
1926 		break;
1927 	default:
1928 		break;
1929 	}
1930 
1931 	return 0;
1932 }
1933 
1934 /* Called by the slot handler thread */
1935 static int
slot_handler_func(void * v)1936 slot_handler_func(void *v)
1937 {
1938 	struct vchiq_state *state = v;
1939 	struct vchiq_shared_state *local = state->local;
1940 	int ret;
1941 
1942 	DEBUG_INITIALISE(local);
1943 
1944 	while (!kthread_should_stop()) {
1945 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1946 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1947 		ret = remote_event_wait(&state->trigger_event, &local->trigger);
1948 		if (ret)
1949 			return ret;
1950 
1951 		/* Ensure that reads don't overtake the remote_event_wait. */
1952 		rmb();
1953 
1954 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1955 		if (state->poll_needed) {
1956 			state->poll_needed = 0;
1957 
1958 			/*
1959 			 * Handle service polling and other rare conditions here
1960 			 * out of the mainline code
1961 			 */
1962 			if (handle_poll(state) == -EAGAIN)
1963 				state->poll_needed = 1;
1964 		}
1965 
1966 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1967 		parse_rx_slots(state);
1968 	}
1969 	return 0;
1970 }
1971 
1972 /* Called by the recycle thread */
1973 static int
recycle_func(void * v)1974 recycle_func(void *v)
1975 {
1976 	struct vchiq_state *state = v;
1977 	struct vchiq_shared_state *local = state->local;
1978 	u32 *found;
1979 	size_t length;
1980 	int ret;
1981 
1982 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1983 
1984 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1985 			      GFP_KERNEL);
1986 	if (!found)
1987 		return -ENOMEM;
1988 
1989 	while (!kthread_should_stop()) {
1990 		ret = remote_event_wait(&state->recycle_event, &local->recycle);
1991 		if (ret)
1992 			return ret;
1993 
1994 		process_free_queue(state, found, length);
1995 	}
1996 	return 0;
1997 }
1998 
1999 /* Called by the sync thread */
2000 static int
sync_func(void * v)2001 sync_func(void *v)
2002 {
2003 	struct vchiq_state *state = v;
2004 	struct vchiq_shared_state *local = state->local;
2005 	struct vchiq_header *header =
2006 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2007 			state->remote->slot_sync);
2008 	int svc_fourcc;
2009 	int ret;
2010 
2011 	while (!kthread_should_stop()) {
2012 		struct vchiq_service *service;
2013 		int msgid, size;
2014 		int type;
2015 		unsigned int localport, remoteport;
2016 
2017 		ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2018 		if (ret)
2019 			return ret;
2020 
2021 		/* Ensure that reads don't overtake the remote_event_wait. */
2022 		rmb();
2023 
2024 		msgid = header->msgid;
2025 		size = header->size;
2026 		type = VCHIQ_MSG_TYPE(msgid);
2027 		localport = VCHIQ_MSG_DSTPORT(msgid);
2028 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
2029 
2030 		service = find_service_by_port(state, localport);
2031 
2032 		if (!service) {
2033 			dev_err(state->dev,
2034 				"sync: %d: sf %s@%pK (%d->%d) - invalid/closed service %d\n",
2035 				state->id, msg_type_str(type), header, remoteport,
2036 				localport, localport);
2037 			release_message_sync(state, header);
2038 			continue;
2039 		}
2040 
2041 		svc_fourcc = service->base.fourcc;
2042 
2043 		dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n",
2044 			msg_type_str(type), &svc_fourcc, remoteport, localport, size);
2045 		if (size > 0)
2046 			vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size));
2047 
2048 		switch (type) {
2049 		case VCHIQ_MSG_OPENACK:
2050 			if (size >= sizeof(struct vchiq_openack_payload)) {
2051 				const struct vchiq_openack_payload *payload =
2052 					(struct vchiq_openack_payload *)
2053 					header->data;
2054 				service->peer_version = payload->version;
2055 			}
2056 			dev_err(state->dev, "sync: %d: sf OPENACK@%pK,%x (%d->%d) v:%d\n",
2057 				state->id, header, size, remoteport, localport,
2058 				service->peer_version);
2059 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2060 				service->remoteport = remoteport;
2061 				set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
2062 				service->sync = 1;
2063 				complete(&service->remove_event);
2064 			}
2065 			release_message_sync(state, header);
2066 			break;
2067 
2068 		case VCHIQ_MSG_DATA:
2069 			dev_dbg(state->dev, "sync: %d: sf DATA@%pK,%x (%d->%d)\n",
2070 				state->id, header, size, remoteport, localport);
2071 
2072 			if ((service->remoteport == remoteport) &&
2073 			    (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2074 				if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
2075 							  NULL) == -EAGAIN)
2076 					dev_err(state->dev,
2077 						"sync: error: synchronous callback to service %d returns -EAGAIN\n",
2078 						localport);
2079 			}
2080 			break;
2081 
2082 		default:
2083 			dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%pK,%x\n",
2084 				state->id, msgid, header, size);
2085 			release_message_sync(state, header);
2086 			break;
2087 		}
2088 
2089 		vchiq_service_put(service);
2090 	}
2091 
2092 	return 0;
2093 }
2094 
2095 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2096 get_conn_state_name(enum vchiq_connstate conn_state)
2097 {
2098 	return conn_state_names[conn_state];
2099 }
2100 
2101 struct vchiq_slot_zero *
vchiq_init_slots(struct device * dev,void * mem_base,int mem_size)2102 vchiq_init_slots(struct device *dev, void *mem_base, int mem_size)
2103 {
2104 	int mem_align =
2105 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2106 	struct vchiq_slot_zero *slot_zero =
2107 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2108 	int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2109 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2110 
2111 	check_sizes();
2112 
2113 	/* Ensure there is enough memory to run an absolutely minimum system */
2114 	num_slots -= first_data_slot;
2115 
2116 	if (num_slots < 4) {
2117 		dev_err(dev, "core: %s: Insufficient memory %x bytes\n",
2118 			__func__, mem_size);
2119 		return NULL;
2120 	}
2121 
2122 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2123 
2124 	slot_zero->magic = VCHIQ_MAGIC;
2125 	slot_zero->version = VCHIQ_VERSION;
2126 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2127 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2128 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2129 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2130 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2131 
2132 	slot_zero->master.slot_sync = first_data_slot;
2133 	slot_zero->master.slot_first = first_data_slot + 1;
2134 	slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2135 	slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2136 	slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2137 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2138 
2139 	return slot_zero;
2140 }
2141 
2142 int
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero,struct device * dev)2143 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev)
2144 {
2145 	struct vchiq_shared_state *local;
2146 	struct vchiq_shared_state *remote;
2147 	char threadname[16];
2148 	int i, ret;
2149 
2150 	local = &slot_zero->slave;
2151 	remote = &slot_zero->master;
2152 
2153 	if (local->initialised) {
2154 		if (remote->initialised)
2155 			dev_err(dev, "local state has already been initialised\n");
2156 		else
2157 			dev_err(dev, "master/slave mismatch two slaves\n");
2158 
2159 		return -EINVAL;
2160 	}
2161 
2162 	memset(state, 0, sizeof(struct vchiq_state));
2163 
2164 	state->dev = dev;
2165 
2166 	/*
2167 	 * initialize shared state pointers
2168 	 */
2169 
2170 	state->local = local;
2171 	state->remote = remote;
2172 	state->slot_data = (struct vchiq_slot *)slot_zero;
2173 
2174 	/*
2175 	 * initialize events and mutexes
2176 	 */
2177 
2178 	init_completion(&state->connect);
2179 	mutex_init(&state->mutex);
2180 	mutex_init(&state->slot_mutex);
2181 	mutex_init(&state->recycle_mutex);
2182 	mutex_init(&state->sync_mutex);
2183 
2184 	spin_lock_init(&state->msg_queue_spinlock);
2185 	spin_lock_init(&state->bulk_waiter_spinlock);
2186 	spin_lock_init(&state->quota_spinlock);
2187 
2188 	init_completion(&state->slot_available_event);
2189 	init_completion(&state->data_quota_event);
2190 
2191 	state->slot_queue_available = 0;
2192 
2193 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2194 		struct vchiq_service_quota *quota = &state->service_quotas[i];
2195 
2196 		init_completion(&quota->quota_event);
2197 	}
2198 
2199 	for (i = local->slot_first; i <= local->slot_last; i++) {
2200 		local->slot_queue[state->slot_queue_available] = i;
2201 		state->slot_queue_available++;
2202 		complete(&state->slot_available_event);
2203 	}
2204 
2205 	state->default_slot_quota = state->slot_queue_available / 2;
2206 	state->default_message_quota =
2207 		min_t(unsigned short, state->default_slot_quota * 256, ~0);
2208 
2209 	state->previous_data_index = -1;
2210 	state->data_use_count = 0;
2211 	state->data_quota = state->slot_queue_available - 1;
2212 
2213 	remote_event_create(&state->trigger_event, &local->trigger);
2214 	local->tx_pos = 0;
2215 	remote_event_create(&state->recycle_event, &local->recycle);
2216 	local->slot_queue_recycle = state->slot_queue_available;
2217 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2218 	remote_event_create(&state->sync_release_event, &local->sync_release);
2219 
2220 	/* At start-of-day, the slot is empty and available */
2221 	((struct vchiq_header *)
2222 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2223 							VCHIQ_MSGID_PADDING;
2224 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2225 
2226 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2227 
2228 	ret = vchiq_platform_init_state(state);
2229 	if (ret)
2230 		return ret;
2231 
2232 	/*
2233 	 * bring up slot handler thread
2234 	 */
2235 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2236 	state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
2237 
2238 	if (IS_ERR(state->slot_handler_thread)) {
2239 		dev_err(state->dev, "couldn't create thread %s\n", threadname);
2240 		return PTR_ERR(state->slot_handler_thread);
2241 	}
2242 	set_user_nice(state->slot_handler_thread, -19);
2243 
2244 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2245 	state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
2246 	if (IS_ERR(state->recycle_thread)) {
2247 		dev_err(state->dev, "couldn't create thread %s\n", threadname);
2248 		ret = PTR_ERR(state->recycle_thread);
2249 		goto fail_free_handler_thread;
2250 	}
2251 	set_user_nice(state->recycle_thread, -19);
2252 
2253 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2254 	state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
2255 	if (IS_ERR(state->sync_thread)) {
2256 		dev_err(state->dev, "couldn't create thread %s\n", threadname);
2257 		ret = PTR_ERR(state->sync_thread);
2258 		goto fail_free_recycle_thread;
2259 	}
2260 	set_user_nice(state->sync_thread, -20);
2261 
2262 	wake_up_process(state->slot_handler_thread);
2263 	wake_up_process(state->recycle_thread);
2264 	wake_up_process(state->sync_thread);
2265 
2266 	/* Indicate readiness to the other side */
2267 	local->initialised = 1;
2268 
2269 	return 0;
2270 
2271 fail_free_recycle_thread:
2272 	kthread_stop(state->recycle_thread);
2273 fail_free_handler_thread:
2274 	kthread_stop(state->slot_handler_thread);
2275 
2276 	return ret;
2277 }
2278 
vchiq_msg_queue_push(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)2279 void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
2280 			  struct vchiq_header *header)
2281 {
2282 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2283 	int pos;
2284 
2285 	if (!service)
2286 		return;
2287 
2288 	while (service->msg_queue_write == service->msg_queue_read +
2289 		VCHIQ_MAX_SLOTS) {
2290 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2291 			flush_signals(current);
2292 	}
2293 
2294 	pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2295 	service->msg_queue_write++;
2296 	service->msg_queue[pos] = header;
2297 
2298 	complete(&service->msg_queue_push);
2299 }
2300 EXPORT_SYMBOL(vchiq_msg_queue_push);
2301 
vchiq_msg_hold(struct vchiq_instance * instance,unsigned int handle)2302 struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle)
2303 {
2304 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2305 	struct vchiq_header *header;
2306 	int pos;
2307 
2308 	if (!service)
2309 		return NULL;
2310 
2311 	if (service->msg_queue_write == service->msg_queue_read)
2312 		return NULL;
2313 
2314 	while (service->msg_queue_write == service->msg_queue_read) {
2315 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2316 			flush_signals(current);
2317 	}
2318 
2319 	pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2320 	service->msg_queue_read++;
2321 	header = service->msg_queue[pos];
2322 
2323 	complete(&service->msg_queue_pop);
2324 
2325 	return header;
2326 }
2327 EXPORT_SYMBOL(vchiq_msg_hold);
2328 
vchiq_validate_params(struct vchiq_state * state,const struct vchiq_service_params_kernel * params)2329 static int vchiq_validate_params(struct vchiq_state *state,
2330 				 const struct vchiq_service_params_kernel *params)
2331 {
2332 	if (!params->callback || !params->fourcc) {
2333 		dev_err(state->dev, "Can't add service, invalid params\n");
2334 		return -EINVAL;
2335 	}
2336 
2337 	return 0;
2338 }
2339 
2340 /* Called from application thread when a client or server service is created. */
2341 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,void (* userdata_term)(void * userdata))2342 vchiq_add_service_internal(struct vchiq_state *state,
2343 			   const struct vchiq_service_params_kernel *params,
2344 			   int srvstate, struct vchiq_instance *instance,
2345 			   void (*userdata_term)(void *userdata))
2346 {
2347 	struct vchiq_service *service;
2348 	struct vchiq_service __rcu **pservice = NULL;
2349 	struct vchiq_service_quota *quota;
2350 	int ret;
2351 	int i;
2352 
2353 	ret = vchiq_validate_params(state, params);
2354 	if (ret)
2355 		return NULL;
2356 
2357 	service = kzalloc(sizeof(*service), GFP_KERNEL);
2358 	if (!service)
2359 		return service;
2360 
2361 	service->base.fourcc   = params->fourcc;
2362 	service->base.callback = params->callback;
2363 	service->base.userdata = params->userdata;
2364 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2365 	kref_init(&service->ref_count);
2366 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2367 	service->userdata_term = userdata_term;
2368 	service->localport     = VCHIQ_PORT_FREE;
2369 	service->remoteport    = VCHIQ_PORT_FREE;
2370 
2371 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2372 		VCHIQ_FOURCC_INVALID : params->fourcc;
2373 	service->auto_close    = 1;
2374 	atomic_set(&service->poll_flags, 0);
2375 	service->version       = params->version;
2376 	service->version_min   = params->version_min;
2377 	service->state         = state;
2378 	service->instance      = instance;
2379 	init_completion(&service->remove_event);
2380 	init_completion(&service->bulk_remove_event);
2381 	init_completion(&service->msg_queue_pop);
2382 	init_completion(&service->msg_queue_push);
2383 	mutex_init(&service->bulk_mutex);
2384 
2385 	/*
2386 	 * Although it is perfectly possible to use a spinlock
2387 	 * to protect the creation of services, it is overkill as it
2388 	 * disables interrupts while the array is searched.
2389 	 * The only danger is of another thread trying to create a
2390 	 * service - service deletion is safe.
2391 	 * Therefore it is preferable to use state->mutex which,
2392 	 * although slower to claim, doesn't block interrupts while
2393 	 * it is held.
2394 	 */
2395 
2396 	mutex_lock(&state->mutex);
2397 
2398 	/* Prepare to use a previously unused service */
2399 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2400 		pservice = &state->services[state->unused_service];
2401 
2402 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2403 		for (i = 0; i < state->unused_service; i++) {
2404 			if (!rcu_access_pointer(state->services[i])) {
2405 				pservice = &state->services[i];
2406 				break;
2407 			}
2408 		}
2409 	} else {
2410 		rcu_read_lock();
2411 		for (i = (state->unused_service - 1); i >= 0; i--) {
2412 			struct vchiq_service *srv;
2413 
2414 			srv = rcu_dereference(state->services[i]);
2415 			if (!srv) {
2416 				pservice = &state->services[i];
2417 			} else if ((srv->public_fourcc == params->fourcc) &&
2418 				   ((srv->instance != instance) ||
2419 				   (srv->base.callback != params->callback))) {
2420 				/*
2421 				 * There is another server using this
2422 				 * fourcc which doesn't match.
2423 				 */
2424 				pservice = NULL;
2425 				break;
2426 			}
2427 		}
2428 		rcu_read_unlock();
2429 	}
2430 
2431 	if (pservice) {
2432 		service->localport = (pservice - state->services);
2433 		if (!handle_seq)
2434 			handle_seq = VCHIQ_MAX_STATES *
2435 				 VCHIQ_MAX_SERVICES;
2436 		service->handle = handle_seq |
2437 			(state->id * VCHIQ_MAX_SERVICES) |
2438 			service->localport;
2439 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2440 		rcu_assign_pointer(*pservice, service);
2441 		if (pservice == &state->services[state->unused_service])
2442 			state->unused_service++;
2443 	}
2444 
2445 	mutex_unlock(&state->mutex);
2446 
2447 	if (!pservice) {
2448 		kfree(service);
2449 		return NULL;
2450 	}
2451 
2452 	quota = &state->service_quotas[service->localport];
2453 	quota->slot_quota = state->default_slot_quota;
2454 	quota->message_quota = state->default_message_quota;
2455 	if (quota->slot_use_count == 0)
2456 		quota->previous_tx_index =
2457 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2458 			- 1;
2459 
2460 	/* Bring this service online */
2461 	set_service_state(service, srvstate);
2462 
2463 	dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n",
2464 		(srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
2465 		&params->fourcc, service->localport);
2466 
2467 	/* Don't unlock the service - leave it with a ref_count of 1. */
2468 
2469 	return service;
2470 }
2471 
2472 int
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2473 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2474 {
2475 	struct vchiq_open_payload payload = {
2476 		service->base.fourcc,
2477 		client_id,
2478 		service->version,
2479 		service->version_min
2480 	};
2481 	int status = 0;
2482 
2483 	service->client_id = client_id;
2484 	vchiq_use_service_internal(service);
2485 	status = queue_message(service->state,
2486 			       NULL, MAKE_OPEN(service->localport),
2487 			       memcpy_copy_callback,
2488 			       &payload,
2489 			       sizeof(payload),
2490 			       QMFLAGS_IS_BLOCKING);
2491 
2492 	if (status)
2493 		return status;
2494 
2495 	/* Wait for the ACK/NAK */
2496 	if (wait_for_completion_interruptible(&service->remove_event)) {
2497 		status = -EAGAIN;
2498 		vchiq_release_service_internal(service);
2499 	} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2500 		   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2501 		if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2502 			dev_err(service->state->dev,
2503 				"core: %d: osi - srvstate = %s (ref %u)\n",
2504 				service->state->id, srvstate_names[service->srvstate],
2505 				kref_read(&service->ref_count));
2506 		status = -EINVAL;
2507 		VCHIQ_SERVICE_STATS_INC(service, error_count);
2508 		vchiq_release_service_internal(service);
2509 	}
2510 
2511 	return status;
2512 }
2513 
2514 static void
release_service_messages(struct vchiq_service * service)2515 release_service_messages(struct vchiq_service *service)
2516 {
2517 	struct vchiq_state *state = service->state;
2518 	int slot_last = state->remote->slot_last;
2519 	int i;
2520 
2521 	/* Release any claimed messages aimed at this service */
2522 
2523 	if (service->sync) {
2524 		struct vchiq_header *header =
2525 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2526 						state->remote->slot_sync);
2527 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2528 			release_message_sync(state, header);
2529 
2530 		return;
2531 	}
2532 
2533 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2534 		struct vchiq_slot_info *slot_info =
2535 			SLOT_INFO_FROM_INDEX(state, i);
2536 		unsigned int pos, end;
2537 		char *data;
2538 
2539 		if (slot_info->release_count == slot_info->use_count)
2540 			continue;
2541 
2542 		data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2543 		end = VCHIQ_SLOT_SIZE;
2544 		if (data == state->rx_data)
2545 			/*
2546 			 * This buffer is still being read from - stop
2547 			 * at the current read position
2548 			 */
2549 			end = state->rx_pos & VCHIQ_SLOT_MASK;
2550 
2551 		pos = 0;
2552 
2553 		while (pos < end) {
2554 			struct vchiq_header *header =
2555 				(struct vchiq_header *)(data + pos);
2556 			int msgid = header->msgid;
2557 			int port = VCHIQ_MSG_DSTPORT(msgid);
2558 
2559 			if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
2560 				dev_dbg(state->dev, "core:  fsi - hdr %pK\n", header);
2561 				release_slot(state, slot_info, header, NULL);
2562 			}
2563 			pos += calc_stride(header->size);
2564 			if (pos > VCHIQ_SLOT_SIZE) {
2565 				dev_err(state->dev,
2566 					"core: fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x\n",
2567 					pos, header, msgid, header->msgid, header->size);
2568 				WARN(1, "invalid slot position\n");
2569 			}
2570 		}
2571 	}
2572 }
2573 
2574 static int
do_abort_bulks(struct vchiq_service * service)2575 do_abort_bulks(struct vchiq_service *service)
2576 {
2577 	int status;
2578 
2579 	/* Abort any outstanding bulk transfers */
2580 	if (mutex_lock_killable(&service->bulk_mutex))
2581 		return 0;
2582 	abort_outstanding_bulks(service, &service->bulk_tx);
2583 	abort_outstanding_bulks(service, &service->bulk_rx);
2584 	mutex_unlock(&service->bulk_mutex);
2585 
2586 	status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2587 	if (status)
2588 		return 0;
2589 
2590 	status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2591 	return !status;
2592 }
2593 
2594 static int
close_service_complete(struct vchiq_service * service,int failstate)2595 close_service_complete(struct vchiq_service *service, int failstate)
2596 {
2597 	int status;
2598 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2599 	int newstate;
2600 
2601 	switch (service->srvstate) {
2602 	case VCHIQ_SRVSTATE_OPEN:
2603 	case VCHIQ_SRVSTATE_CLOSESENT:
2604 	case VCHIQ_SRVSTATE_CLOSERECVD:
2605 		if (is_server) {
2606 			if (service->auto_close) {
2607 				service->client_id = 0;
2608 				service->remoteport = VCHIQ_PORT_FREE;
2609 				newstate = VCHIQ_SRVSTATE_LISTENING;
2610 			} else {
2611 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2612 			}
2613 		} else {
2614 			newstate = VCHIQ_SRVSTATE_CLOSED;
2615 		}
2616 		set_service_state(service, newstate);
2617 		break;
2618 	case VCHIQ_SRVSTATE_LISTENING:
2619 		break;
2620 	default:
2621 		dev_err(service->state->dev, "core: (%x) called in state %s\n",
2622 			service->handle, srvstate_names[service->srvstate]);
2623 		WARN(1, "%s in unexpected state\n", __func__);
2624 		return -EINVAL;
2625 	}
2626 
2627 	status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
2628 
2629 	if (status != -EAGAIN) {
2630 		int uc = service->service_use_count;
2631 		int i;
2632 		/* Complete the close process */
2633 		for (i = 0; i < uc; i++)
2634 			/*
2635 			 * cater for cases where close is forced and the
2636 			 * client may not close all it's handles
2637 			 */
2638 			vchiq_release_service_internal(service);
2639 
2640 		service->client_id = 0;
2641 		service->remoteport = VCHIQ_PORT_FREE;
2642 
2643 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2644 			vchiq_free_service_internal(service);
2645 		} else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2646 			if (is_server)
2647 				service->closing = 0;
2648 
2649 			complete(&service->remove_event);
2650 		}
2651 	} else {
2652 		set_service_state(service, failstate);
2653 	}
2654 
2655 	return status;
2656 }
2657 
2658 /*
2659  * Prepares a bulk transfer to be queued. The function is interruptible and is
2660  * intended to be called from user threads. It may return -EAGAIN to indicate
2661  * that a signal has been received and the call should be retried after being
2662  * returned to user context.
2663  */
2664 static int
vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service * service,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)2665 vchiq_bulk_xfer_queue_msg_interruptible(struct vchiq_service *service,
2666 					void *offset, void __user *uoffset,
2667 					int size, void *userdata,
2668 					enum vchiq_bulk_mode mode,
2669 					enum vchiq_bulk_dir dir)
2670 {
2671 	struct vchiq_bulk_queue *queue;
2672 	struct bulk_waiter *bulk_waiter = NULL;
2673 	struct vchiq_bulk *bulk;
2674 	struct vchiq_state *state = service->state;
2675 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
2676 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
2677 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
2678 	int status = -EINVAL;
2679 	int payload[2];
2680 
2681 	if (mode == VCHIQ_BULK_MODE_BLOCKING) {
2682 		bulk_waiter = userdata;
2683 		init_completion(&bulk_waiter->event);
2684 		bulk_waiter->actual = 0;
2685 		bulk_waiter->bulk = NULL;
2686 	}
2687 
2688 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
2689 		&service->bulk_tx : &service->bulk_rx;
2690 
2691 	if (mutex_lock_killable(&service->bulk_mutex))
2692 		return -EAGAIN;
2693 
2694 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
2695 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
2696 		do {
2697 			mutex_unlock(&service->bulk_mutex);
2698 			if (wait_for_completion_interruptible(&service->bulk_remove_event))
2699 				return -EAGAIN;
2700 			if (mutex_lock_killable(&service->bulk_mutex))
2701 				return -EAGAIN;
2702 		} while (queue->local_insert == queue->remove +
2703 				VCHIQ_NUM_SERVICE_BULKS);
2704 	}
2705 
2706 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
2707 
2708 	bulk->mode = mode;
2709 	bulk->dir = dir;
2710 	bulk->userdata = userdata;
2711 	bulk->size = size;
2712 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
2713 
2714 	if (vchiq_prepare_bulk_data(service->instance, bulk, offset, uoffset, size, dir))
2715 		goto unlock_error_exit;
2716 
2717 	/*
2718 	 * Ensure that the bulk data record is visible to the peer
2719 	 * before proceeding.
2720 	 */
2721 	wmb();
2722 
2723 	dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %pK\n",
2724 		state->id, service->localport, service->remoteport,
2725 		dir_char, size, &bulk->data, userdata);
2726 
2727 	/*
2728 	 * The slot mutex must be held when the service is being closed, so
2729 	 * claim it here to ensure that isn't happening
2730 	 */
2731 	if (mutex_lock_killable(&state->slot_mutex)) {
2732 		status = -EAGAIN;
2733 		goto cancel_bulk_error_exit;
2734 	}
2735 
2736 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
2737 		goto unlock_both_error_exit;
2738 
2739 	payload[0] = lower_32_bits(bulk->data);
2740 	payload[1] = bulk->size;
2741 	status = queue_message(state,
2742 			       NULL,
2743 			       VCHIQ_MAKE_MSG(dir_msgtype,
2744 					      service->localport,
2745 					      service->remoteport),
2746 			       memcpy_copy_callback,
2747 			       &payload,
2748 			       sizeof(payload),
2749 			       QMFLAGS_IS_BLOCKING |
2750 			       QMFLAGS_NO_MUTEX_LOCK |
2751 			       QMFLAGS_NO_MUTEX_UNLOCK);
2752 	if (status)
2753 		goto unlock_both_error_exit;
2754 
2755 	queue->local_insert++;
2756 
2757 	mutex_unlock(&state->slot_mutex);
2758 	mutex_unlock(&service->bulk_mutex);
2759 
2760 	dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n",
2761 		state->id, service->localport, dir_char, queue->local_insert,
2762 		queue->remote_insert, queue->process);
2763 
2764         if (bulk_waiter) {
2765                 bulk_waiter->bulk = bulk;
2766                 if (wait_for_completion_interruptible(&bulk_waiter->event))
2767                         status = -EAGAIN;
2768                 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
2769                         status = -EINVAL;
2770         }
2771 
2772 	return status;
2773 
2774 unlock_both_error_exit:
2775 	mutex_unlock(&state->slot_mutex);
2776 cancel_bulk_error_exit:
2777 	vchiq_complete_bulk(service->instance, bulk);
2778 unlock_error_exit:
2779 	mutex_unlock(&service->bulk_mutex);
2780 
2781 	return status;
2782 }
2783 
2784 /* Called by the slot handler */
2785 int
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2786 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2787 {
2788 	struct vchiq_state *state = service->state;
2789 	int status = 0;
2790 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2791 	int close_id = MAKE_CLOSE(service->localport,
2792 				  VCHIQ_MSG_DSTPORT(service->remoteport));
2793 
2794 	dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n",
2795 		service->state->id, service->localport, close_recvd,
2796 		srvstate_names[service->srvstate]);
2797 
2798 	switch (service->srvstate) {
2799 	case VCHIQ_SRVSTATE_CLOSED:
2800 	case VCHIQ_SRVSTATE_HIDDEN:
2801 	case VCHIQ_SRVSTATE_LISTENING:
2802 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2803 		if (close_recvd) {
2804 			dev_err(state->dev, "core: (1) called in state %s\n",
2805 				srvstate_names[service->srvstate]);
2806 		} else if (is_server) {
2807 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2808 				status = -EINVAL;
2809 			} else {
2810 				service->client_id = 0;
2811 				service->remoteport = VCHIQ_PORT_FREE;
2812 				if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT)
2813 					set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2814 			}
2815 			complete(&service->remove_event);
2816 		} else {
2817 			vchiq_free_service_internal(service);
2818 		}
2819 		break;
2820 	case VCHIQ_SRVSTATE_OPENING:
2821 		if (close_recvd) {
2822 			/* The open was rejected - tell the user */
2823 			set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
2824 			complete(&service->remove_event);
2825 		} else {
2826 			/* Shutdown mid-open - let the other side know */
2827 			status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
2828 		}
2829 		break;
2830 
2831 	case VCHIQ_SRVSTATE_OPENSYNC:
2832 		mutex_lock(&state->sync_mutex);
2833 		fallthrough;
2834 	case VCHIQ_SRVSTATE_OPEN:
2835 		if (close_recvd) {
2836 			if (!do_abort_bulks(service))
2837 				status = -EAGAIN;
2838 		}
2839 
2840 		release_service_messages(service);
2841 
2842 		if (!status)
2843 			status = queue_message(state, service, close_id, NULL,
2844 					       NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2845 
2846 		if (status) {
2847 			if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2848 				mutex_unlock(&state->sync_mutex);
2849 			break;
2850 		}
2851 
2852 		if (!close_recvd) {
2853 			/* Change the state while the mutex is still held */
2854 			set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
2855 			mutex_unlock(&state->slot_mutex);
2856 			if (service->sync)
2857 				mutex_unlock(&state->sync_mutex);
2858 			break;
2859 		}
2860 
2861 		/* Change the state while the mutex is still held */
2862 		set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2863 		mutex_unlock(&state->slot_mutex);
2864 		if (service->sync)
2865 			mutex_unlock(&state->sync_mutex);
2866 
2867 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2868 		break;
2869 
2870 	case VCHIQ_SRVSTATE_CLOSESENT:
2871 		if (!close_recvd)
2872 			/* This happens when a process is killed mid-close */
2873 			break;
2874 
2875 		if (!do_abort_bulks(service)) {
2876 			status = -EAGAIN;
2877 			break;
2878 		}
2879 
2880 		if (!status)
2881 			status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2882 		break;
2883 
2884 	case VCHIQ_SRVSTATE_CLOSERECVD:
2885 		if (!close_recvd && is_server)
2886 			/* Force into LISTENING mode */
2887 			set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2888 		status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
2889 		break;
2890 
2891 	default:
2892 		dev_err(state->dev, "core: (%d) called in state %s\n",
2893 			close_recvd, srvstate_names[service->srvstate]);
2894 		break;
2895 	}
2896 
2897 	return status;
2898 }
2899 
2900 /* Called from the application process upon process death */
2901 void
vchiq_terminate_service_internal(struct vchiq_service * service)2902 vchiq_terminate_service_internal(struct vchiq_service *service)
2903 {
2904 	struct vchiq_state *state = service->state;
2905 
2906 	dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n",
2907 		state->id, service->localport, service->remoteport);
2908 
2909 	mark_service_closing(service);
2910 
2911 	/* Mark the service for removal by the slot handler */
2912 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2913 }
2914 
2915 /* Called from the slot handler */
2916 void
vchiq_free_service_internal(struct vchiq_service * service)2917 vchiq_free_service_internal(struct vchiq_service *service)
2918 {
2919 	struct vchiq_state *state = service->state;
2920 
2921 	dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport);
2922 
2923 	switch (service->srvstate) {
2924 	case VCHIQ_SRVSTATE_OPENING:
2925 	case VCHIQ_SRVSTATE_CLOSED:
2926 	case VCHIQ_SRVSTATE_HIDDEN:
2927 	case VCHIQ_SRVSTATE_LISTENING:
2928 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2929 		break;
2930 	default:
2931 		dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n",
2932 			state->id, service->localport, srvstate_names[service->srvstate]);
2933 		return;
2934 	}
2935 
2936 	set_service_state(service, VCHIQ_SRVSTATE_FREE);
2937 
2938 	complete(&service->remove_event);
2939 
2940 	/* Release the initial lock */
2941 	vchiq_service_put(service);
2942 }
2943 
2944 int
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2945 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2946 {
2947 	struct vchiq_service *service;
2948 	int i;
2949 
2950 	/* Find all services registered to this client and enable them. */
2951 	i = 0;
2952 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2953 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2954 			set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
2955 		vchiq_service_put(service);
2956 	}
2957 
2958 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2959 		if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
2960 				  QMFLAGS_IS_BLOCKING) == -EAGAIN)
2961 			return -EAGAIN;
2962 
2963 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2964 	}
2965 
2966 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2967 		if (wait_for_completion_interruptible(&state->connect))
2968 			return -EAGAIN;
2969 
2970 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2971 		complete(&state->connect);
2972 	}
2973 
2974 	return 0;
2975 }
2976 
2977 void
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2978 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2979 {
2980 	struct vchiq_service *service;
2981 	int i;
2982 
2983 	/* Find all services registered to this client and remove them. */
2984 	i = 0;
2985 	while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
2986 		(void)vchiq_remove_service(instance, service->handle);
2987 		vchiq_service_put(service);
2988 	}
2989 }
2990 
2991 int
vchiq_close_service(struct vchiq_instance * instance,unsigned int handle)2992 vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
2993 {
2994 	/* Unregister the service */
2995 	struct vchiq_service *service = find_service_by_handle(instance, handle);
2996 	int status = 0;
2997 
2998 	if (!service)
2999 		return -EINVAL;
3000 
3001 	dev_dbg(service->state->dev, "core: %d: close_service:%d\n",
3002 		service->state->id, service->localport);
3003 
3004 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3005 	    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3006 	    (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
3007 		vchiq_service_put(service);
3008 		return -EINVAL;
3009 	}
3010 
3011 	mark_service_closing(service);
3012 
3013 	if (current == service->state->slot_handler_thread) {
3014 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3015 		WARN_ON(status == -EAGAIN);
3016 	} else {
3017 		/* Mark the service for termination by the slot handler */
3018 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
3019 	}
3020 
3021 	while (1) {
3022 		if (wait_for_completion_interruptible(&service->remove_event)) {
3023 			status = -EAGAIN;
3024 			break;
3025 		}
3026 
3027 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3028 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3029 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3030 			break;
3031 
3032 		dev_warn(service->state->dev,
3033 			 "core: %d: close_service:%d - waiting in state %s\n",
3034 			 service->state->id, service->localport,
3035 			 srvstate_names[service->srvstate]);
3036 	}
3037 
3038 	if (!status &&
3039 	    (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
3040 	    (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
3041 		status = -EINVAL;
3042 
3043 	vchiq_service_put(service);
3044 
3045 	return status;
3046 }
3047 EXPORT_SYMBOL(vchiq_close_service);
3048 
3049 int
vchiq_remove_service(struct vchiq_instance * instance,unsigned int handle)3050 vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
3051 {
3052 	/* Unregister the service */
3053 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3054 	int status = 0;
3055 
3056 	if (!service)
3057 		return -EINVAL;
3058 
3059 	dev_dbg(service->state->dev, "core: %d: remove_service:%d\n",
3060 		service->state->id, service->localport);
3061 
3062 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3063 		vchiq_service_put(service);
3064 		return -EINVAL;
3065 	}
3066 
3067 	mark_service_closing(service);
3068 
3069 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3070 	    (current == service->state->slot_handler_thread)) {
3071 		/*
3072 		 * Make it look like a client, because it must be removed and
3073 		 * not left in the LISTENING state.
3074 		 */
3075 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
3076 
3077 		status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3078 		WARN_ON(status == -EAGAIN);
3079 	} else {
3080 		/* Mark the service for removal by the slot handler */
3081 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3082 	}
3083 	while (1) {
3084 		if (wait_for_completion_interruptible(&service->remove_event)) {
3085 			status = -EAGAIN;
3086 			break;
3087 		}
3088 
3089 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3090 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3091 			break;
3092 
3093 		dev_warn(service->state->dev,
3094 			 "core: %d: remove_service:%d - waiting in state %s\n",
3095 			 service->state->id, service->localport,
3096 			 srvstate_names[service->srvstate]);
3097 	}
3098 
3099 	if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
3100 		status = -EINVAL;
3101 
3102 	vchiq_service_put(service);
3103 
3104 	return status;
3105 }
3106 
3107 int
vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance * instance,unsigned int handle,void * offset,void __user * uoffset,int size,void __user * userdata,enum vchiq_bulk_dir dir)3108 vchiq_bulk_xfer_blocking_interruptible(struct vchiq_instance *instance, unsigned int handle,
3109 				       void *offset, void __user *uoffset, int size,
3110 				       void __user *userdata, enum vchiq_bulk_dir dir)
3111 {
3112 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3113 	enum vchiq_bulk_mode mode = VCHIQ_BULK_MODE_BLOCKING;
3114 	int status = -EINVAL;
3115 
3116 	if (!service)
3117 		return -EINVAL;
3118 
3119 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3120 		goto error_exit;
3121 
3122 	if (!offset && !uoffset)
3123 		goto error_exit;
3124 
3125 	if (vchiq_check_service(service))
3126 		goto error_exit;
3127 
3128 
3129 	status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset, size,
3130 							 userdata, mode, dir);
3131 
3132 error_exit:
3133 	vchiq_service_put(service);
3134 
3135 	return status;
3136 }
3137 
3138 int
vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance * instance,unsigned int handle,void * offset,void __user * uoffset,int size,enum vchiq_bulk_mode mode,void * userdata,enum vchiq_bulk_dir dir)3139 vchiq_bulk_xfer_callback_interruptible(struct vchiq_instance *instance, unsigned int handle,
3140 				       void *offset, void __user *uoffset, int size,
3141 				       enum vchiq_bulk_mode mode, void *userdata,
3142 				       enum vchiq_bulk_dir dir)
3143 {
3144 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3145 	int status = -EINVAL;
3146 
3147 	if (!service)
3148 		return -EINVAL;
3149 
3150 	if (mode != VCHIQ_BULK_MODE_CALLBACK &&
3151 	    mode != VCHIQ_BULK_MODE_NOCALLBACK)
3152 		goto error_exit;
3153 
3154 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3155 		goto error_exit;
3156 
3157 	if (!offset && !uoffset)
3158 		goto error_exit;
3159 
3160 	if (vchiq_check_service(service))
3161 		goto error_exit;
3162 
3163 	status = vchiq_bulk_xfer_queue_msg_interruptible(service, offset, uoffset,
3164 							 size, userdata, mode, dir);
3165 
3166 error_exit:
3167 	vchiq_service_put(service);
3168 
3169 	return status;
3170 }
3171 
3172 /*
3173  * This function is called by VCHIQ ioctl interface and is interruptible.
3174  * It may receive -EAGAIN to indicate that a signal has been received
3175  * and the call should be retried after being returned to user context.
3176  */
3177 int
vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance * instance,unsigned int handle,struct bulk_waiter * userdata)3178 vchiq_bulk_xfer_waiting_interruptible(struct vchiq_instance *instance,
3179 				      unsigned int handle, struct bulk_waiter *userdata)
3180 {
3181 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3182 	struct bulk_waiter *bulk_waiter;
3183 	int status = -EINVAL;
3184 
3185 	if (!service)
3186 		return -EINVAL;
3187 
3188 	if (!userdata)
3189 		goto error_exit;
3190 
3191 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3192 		goto error_exit;
3193 
3194 	if (vchiq_check_service(service))
3195 		goto error_exit;
3196 
3197 	bulk_waiter = userdata;
3198 
3199 	vchiq_service_put(service);
3200 
3201 	status = 0;
3202 
3203 	if (wait_for_completion_interruptible(&bulk_waiter->event))
3204 		return -EAGAIN;
3205 	else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3206 		return -EINVAL;
3207 
3208 	return status;
3209 
3210 error_exit:
3211 	vchiq_service_put(service);
3212 
3213 	return status;
3214 }
3215 
3216 int
vchiq_queue_message(struct vchiq_instance * instance,unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3217 vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
3218 		    ssize_t (*copy_callback)(void *context, void *dest,
3219 					     size_t offset, size_t maxsize),
3220 		    void *context,
3221 		    size_t size)
3222 {
3223 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3224 	int status = -EINVAL;
3225 	int data_id;
3226 
3227 	if (!service)
3228 		goto error_exit;
3229 
3230 	if (vchiq_check_service(service))
3231 		goto error_exit;
3232 
3233 	if (!size) {
3234 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3235 		goto error_exit;
3236 	}
3237 
3238 	if (size > VCHIQ_MAX_MSG_SIZE) {
3239 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3240 		goto error_exit;
3241 	}
3242 
3243 	data_id = MAKE_DATA(service->localport, service->remoteport);
3244 
3245 	switch (service->srvstate) {
3246 	case VCHIQ_SRVSTATE_OPEN:
3247 		status = queue_message(service->state, service, data_id,
3248 				       copy_callback, context, size,
3249 				       QMFLAGS_IS_BLOCKING);
3250 		break;
3251 	case VCHIQ_SRVSTATE_OPENSYNC:
3252 		status = queue_message_sync(service->state, service, data_id,
3253 					    copy_callback, context, size);
3254 		break;
3255 	default:
3256 		status = -EINVAL;
3257 		break;
3258 	}
3259 
3260 error_exit:
3261 	if (service)
3262 		vchiq_service_put(service);
3263 
3264 	return status;
3265 }
3266 
vchiq_queue_kernel_message(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size)3267 int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
3268 			       unsigned int size)
3269 {
3270 	int status;
3271 
3272 	while (1) {
3273 		status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
3274 					     data, size);
3275 
3276 		/*
3277 		 * vchiq_queue_message() may return -EAGAIN, so we need to
3278 		 * implement a retry mechanism since this function is supposed
3279 		 * to block until queued
3280 		 */
3281 		if (status != -EAGAIN)
3282 			break;
3283 
3284 		msleep(1);
3285 	}
3286 
3287 	return status;
3288 }
3289 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3290 
3291 void
vchiq_release_message(struct vchiq_instance * instance,unsigned int handle,struct vchiq_header * header)3292 vchiq_release_message(struct vchiq_instance *instance, unsigned int handle,
3293 		      struct vchiq_header *header)
3294 {
3295 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3296 	struct vchiq_shared_state *remote;
3297 	struct vchiq_state *state;
3298 	int slot_index;
3299 
3300 	if (!service)
3301 		return;
3302 
3303 	state = service->state;
3304 	remote = state->remote;
3305 
3306 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3307 
3308 	if ((slot_index >= remote->slot_first) &&
3309 	    (slot_index <= remote->slot_last)) {
3310 		int msgid = header->msgid;
3311 
3312 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3313 			struct vchiq_slot_info *slot_info =
3314 				SLOT_INFO_FROM_INDEX(state, slot_index);
3315 
3316 			release_slot(state, slot_info, header, service);
3317 		}
3318 	} else if (slot_index == remote->slot_sync) {
3319 		release_message_sync(state, header);
3320 	}
3321 
3322 	vchiq_service_put(service);
3323 }
3324 EXPORT_SYMBOL(vchiq_release_message);
3325 
3326 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3327 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3328 {
3329 	header->msgid = VCHIQ_MSGID_PADDING;
3330 	remote_event_signal(state, &state->remote->sync_release);
3331 }
3332 
3333 int
vchiq_get_peer_version(struct vchiq_instance * instance,unsigned int handle,short * peer_version)3334 vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
3335 {
3336 	int status = -EINVAL;
3337 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3338 
3339 	if (!service)
3340 		goto exit;
3341 
3342 	if (vchiq_check_service(service))
3343 		goto exit;
3344 
3345 	if (!peer_version)
3346 		goto exit;
3347 
3348 	*peer_version = service->peer_version;
3349 	status = 0;
3350 
3351 exit:
3352 	if (service)
3353 		vchiq_service_put(service);
3354 	return status;
3355 }
3356 EXPORT_SYMBOL(vchiq_get_peer_version);
3357 
vchiq_get_config(struct vchiq_config * config)3358 void vchiq_get_config(struct vchiq_config *config)
3359 {
3360 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3361 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3362 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3363 	config->max_services           = VCHIQ_MAX_SERVICES;
3364 	config->version                = VCHIQ_VERSION;
3365 	config->version_min            = VCHIQ_VERSION_MIN;
3366 }
3367 
3368 int
vchiq_set_service_option(struct vchiq_instance * instance,unsigned int handle,enum vchiq_service_option option,int value)3369 vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle,
3370 			 enum vchiq_service_option option, int value)
3371 {
3372 	struct vchiq_service *service = find_service_by_handle(instance, handle);
3373 	struct vchiq_service_quota *quota;
3374 	int ret = -EINVAL;
3375 
3376 	if (!service)
3377 		return -EINVAL;
3378 
3379 	switch (option) {
3380 	case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3381 		service->auto_close = value;
3382 		ret = 0;
3383 		break;
3384 
3385 	case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3386 		quota = &service->state->service_quotas[service->localport];
3387 		if (value == 0)
3388 			value = service->state->default_slot_quota;
3389 		if ((value >= quota->slot_use_count) &&
3390 		    (value < (unsigned short)~0)) {
3391 			quota->slot_quota = value;
3392 			if ((value >= quota->slot_use_count) &&
3393 			    (quota->message_quota >= quota->message_use_count))
3394 				/*
3395 				 * Signal the service that it may have
3396 				 * dropped below its quota
3397 				 */
3398 				complete(&quota->quota_event);
3399 			ret = 0;
3400 		}
3401 		break;
3402 
3403 	case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3404 		quota = &service->state->service_quotas[service->localport];
3405 		if (value == 0)
3406 			value = service->state->default_message_quota;
3407 		if ((value >= quota->message_use_count) &&
3408 		    (value < (unsigned short)~0)) {
3409 			quota->message_quota = value;
3410 			if ((value >= quota->message_use_count) &&
3411 			    (quota->slot_quota >= quota->slot_use_count))
3412 				/*
3413 				 * Signal the service that it may have
3414 				 * dropped below its quota
3415 				 */
3416 				complete(&quota->quota_event);
3417 			ret = 0;
3418 		}
3419 		break;
3420 
3421 	case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3422 		if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3423 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3424 			service->sync = value;
3425 			ret = 0;
3426 		}
3427 		break;
3428 
3429 	case VCHIQ_SERVICE_OPTION_TRACE:
3430 		service->trace = value;
3431 		ret = 0;
3432 		break;
3433 
3434 	default:
3435 		break;
3436 	}
3437 	vchiq_service_put(service);
3438 
3439 	return ret;
3440 }
3441 
3442 static void
vchiq_dump_shared_state(struct seq_file * f,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3443 vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state,
3444 			struct vchiq_shared_state *shared, const char *label)
3445 {
3446 	static const char *const debug_names[] = {
3447 		"<entries>",
3448 		"SLOT_HANDLER_COUNT",
3449 		"SLOT_HANDLER_LINE",
3450 		"PARSE_LINE",
3451 		"PARSE_HEADER",
3452 		"PARSE_MSGID",
3453 		"AWAIT_COMPLETION_LINE",
3454 		"DEQUEUE_MESSAGE_LINE",
3455 		"SERVICE_CALLBACK_LINE",
3456 		"MSG_QUEUE_FULL_COUNT",
3457 		"COMPLETION_QUEUE_FULL_COUNT"
3458 	};
3459 	int i;
3460 
3461 	seq_printf(f, "  %s: slots %d-%d tx_pos=0x%x recycle=0x%x\n",
3462 		   label, shared->slot_first, shared->slot_last,
3463 		   shared->tx_pos, shared->slot_queue_recycle);
3464 
3465 	seq_puts(f, "    Slots claimed:\n");
3466 
3467 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3468 		struct vchiq_slot_info slot_info =
3469 						*SLOT_INFO_FROM_INDEX(state, i);
3470 		if (slot_info.use_count != slot_info.release_count) {
3471 			seq_printf(f, "      %d: %d/%d\n", i, slot_info.use_count,
3472 				   slot_info.release_count);
3473 		}
3474 	}
3475 
3476 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3477 		seq_printf(f, "    DEBUG: %s = %d(0x%x)\n",
3478 			   debug_names[i], shared->debug[i], shared->debug[i]);
3479 	}
3480 }
3481 
3482 static void
vchiq_dump_service_state(struct seq_file * f,struct vchiq_service * service)3483 vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service)
3484 {
3485 	unsigned int ref_count;
3486 
3487 	/*Don't include the lock just taken*/
3488 	ref_count = kref_read(&service->ref_count) - 1;
3489 	seq_printf(f, "Service %u: %s (ref %u)", service->localport,
3490 		   srvstate_names[service->srvstate], ref_count);
3491 
3492 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3493 		char remoteport[30];
3494 		struct vchiq_service_quota *quota =
3495 			&service->state->service_quotas[service->localport];
3496 		int fourcc = service->base.fourcc;
3497 		int tx_pending, rx_pending, tx_size = 0, rx_size = 0;
3498 
3499 		if (service->remoteport != VCHIQ_PORT_FREE) {
3500 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3501 				"%u", service->remoteport);
3502 
3503 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3504 				scnprintf(remoteport + len2, sizeof(remoteport) - len2,
3505 					  " (client 0x%x)", service->client_id);
3506 		} else {
3507 			strscpy(remoteport, "n/a", sizeof(remoteport));
3508 		}
3509 
3510 		seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n",
3511 			   &fourcc, remoteport,
3512 			   quota->message_use_count, quota->message_quota,
3513 			   quota->slot_use_count, quota->slot_quota);
3514 
3515 		tx_pending = service->bulk_tx.local_insert -
3516 			service->bulk_tx.remote_insert;
3517 		if (tx_pending) {
3518 			unsigned int i = BULK_INDEX(service->bulk_tx.remove);
3519 
3520 			tx_size = service->bulk_tx.bulks[i].size;
3521 		}
3522 
3523 		rx_pending = service->bulk_rx.local_insert -
3524 			service->bulk_rx.remote_insert;
3525 		if (rx_pending) {
3526 			unsigned int i = BULK_INDEX(service->bulk_rx.remove);
3527 
3528 			rx_size = service->bulk_rx.bulks[i].size;
3529 		}
3530 
3531 		seq_printf(f, "  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n",
3532 			   tx_pending, tx_size, rx_pending, rx_size);
3533 
3534 		if (VCHIQ_ENABLE_STATS) {
3535 			seq_printf(f, "  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
3536 				   service->stats.ctrl_tx_count,
3537 				   service->stats.ctrl_tx_bytes,
3538 				   service->stats.ctrl_rx_count,
3539 				   service->stats.ctrl_rx_bytes);
3540 
3541 			seq_printf(f, "  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n",
3542 				   service->stats.bulk_tx_count,
3543 				   service->stats.bulk_tx_bytes,
3544 				   service->stats.bulk_rx_count,
3545 				   service->stats.bulk_rx_bytes);
3546 
3547 			seq_printf(f, "  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n",
3548 				   service->stats.quota_stalls,
3549 				   service->stats.slot_stalls,
3550 				   service->stats.bulk_stalls,
3551 				   service->stats.bulk_aborted_count,
3552 				   service->stats.error_count);
3553 		}
3554 	}
3555 
3556 	vchiq_dump_platform_service_state(f, service);
3557 }
3558 
vchiq_dump_state(struct seq_file * f,struct vchiq_state * state)3559 void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state)
3560 {
3561 	int i;
3562 
3563 	seq_printf(f, "State %d: %s\n", state->id,
3564 		   conn_state_names[state->conn_state]);
3565 
3566 	seq_printf(f, "  tx_pos=0x%x(@%pK), rx_pos=0x%x(@%pK)\n",
3567 		   state->local->tx_pos,
3568 		   state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3569 		   state->rx_pos,
3570 		   state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3571 
3572 	seq_printf(f, "  Version: %d (min %d)\n", VCHIQ_VERSION,
3573 		   VCHIQ_VERSION_MIN);
3574 
3575 	if (VCHIQ_ENABLE_STATS) {
3576 		seq_printf(f, "  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n",
3577 			   state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3578 			   state->stats.error_count);
3579 	}
3580 
3581 	seq_printf(f, "  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n",
3582 		   ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3583 		   state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3584 		   state->data_quota - state->data_use_count,
3585 		   state->local->slot_queue_recycle - state->slot_queue_available,
3586 		   state->stats.slot_stalls, state->stats.data_stalls);
3587 
3588 	vchiq_dump_platform_state(f);
3589 
3590 	vchiq_dump_shared_state(f, state, state->local, "Local");
3591 
3592 	vchiq_dump_shared_state(f, state, state->remote, "Remote");
3593 
3594 	vchiq_dump_platform_instances(state, f);
3595 
3596 	for (i = 0; i < state->unused_service; i++) {
3597 		struct vchiq_service *service = find_service_by_port(state, i);
3598 
3599 		if (service) {
3600 			vchiq_dump_service_state(f, service);
3601 			vchiq_service_put(service);
3602 		}
3603 	}
3604 }
3605 
vchiq_send_remote_use(struct vchiq_state * state)3606 int vchiq_send_remote_use(struct vchiq_state *state)
3607 {
3608 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3609 		return -ENOTCONN;
3610 
3611 	return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3612 }
3613 
vchiq_send_remote_use_active(struct vchiq_state * state)3614 int vchiq_send_remote_use_active(struct vchiq_state *state)
3615 {
3616 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3617 		return -ENOTCONN;
3618 
3619 	return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3620 			     NULL, NULL, 0, 0);
3621 }
3622 
vchiq_log_dump_mem(struct device * dev,const char * label,u32 addr,const void * void_mem,size_t num_bytes)3623 void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr,
3624 			const void *void_mem, size_t num_bytes)
3625 {
3626 	const u8 *mem = void_mem;
3627 	size_t offset;
3628 	char line_buf[100];
3629 	char *s;
3630 
3631 	while (num_bytes > 0) {
3632 		s = line_buf;
3633 
3634 		for (offset = 0; offset < 16; offset++) {
3635 			if (offset < num_bytes)
3636 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3637 			else
3638 				s += scnprintf(s, 4, "   ");
3639 		}
3640 
3641 		for (offset = 0; offset < 16; offset++) {
3642 			if (offset < num_bytes) {
3643 				u8 ch = mem[offset];
3644 
3645 				if ((ch < ' ') || (ch > '~'))
3646 					ch = '.';
3647 				*s++ = (char)ch;
3648 			}
3649 		}
3650 		*s++ = '\0';
3651 
3652 		if (label && (*label != '\0'))
3653 			dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
3654 		else
3655 			dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf);
3656 
3657 		addr += 16;
3658 		mem += 16;
3659 		if (num_bytes > 16)
3660 			num_bytes -= 16;
3661 		else
3662 			num_bytes = 0;
3663 	}
3664 }
3665