1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
15 
16 #include "vchiq_core.h"
17 
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
19 
20 #define HANDLE_STATE_SHIFT 12
21 
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 	(((unsigned int)((char *)data - (char *)state->slot_data)) / \
26 	VCHIQ_SLOT_SIZE)
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 	((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 	((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
32 	(SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
33 
34 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
35 
36 #define SRVTRACE_LEVEL(srv) \
37 	(((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
38 #define SRVTRACE_ENABLED(srv, lev) \
39 	(((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
40 
41 struct vchiq_open_payload {
42 	int fourcc;
43 	int client_id;
44 	short version;
45 	short version_min;
46 };
47 
48 struct vchiq_openack_payload {
49 	short version;
50 };
51 
52 enum {
53 	QMFLAGS_IS_BLOCKING     = BIT(0),
54 	QMFLAGS_NO_MUTEX_LOCK   = BIT(1),
55 	QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
56 };
57 
58 /* we require this for consistency between endpoints */
59 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
60 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
61 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
62 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
63 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
64 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
65 
66 /* Run time control of log level, based on KERN_XXX level. */
67 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
68 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
69 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
70 
71 DEFINE_SPINLOCK(bulk_waiter_spinlock);
72 static DEFINE_SPINLOCK(quota_spinlock);
73 
74 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
75 static unsigned int handle_seq;
76 
77 static const char *const srvstate_names[] = {
78 	"FREE",
79 	"HIDDEN",
80 	"LISTENING",
81 	"OPENING",
82 	"OPEN",
83 	"OPENSYNC",
84 	"CLOSESENT",
85 	"CLOSERECVD",
86 	"CLOSEWAIT",
87 	"CLOSED"
88 };
89 
90 static const char *const reason_names[] = {
91 	"SERVICE_OPENED",
92 	"SERVICE_CLOSED",
93 	"MESSAGE_AVAILABLE",
94 	"BULK_TRANSMIT_DONE",
95 	"BULK_RECEIVE_DONE",
96 	"BULK_TRANSMIT_ABORTED",
97 	"BULK_RECEIVE_ABORTED"
98 };
99 
100 static const char *const conn_state_names[] = {
101 	"DISCONNECTED",
102 	"CONNECTING",
103 	"CONNECTED",
104 	"PAUSING",
105 	"PAUSE_SENT",
106 	"PAUSED",
107 	"RESUMING",
108 	"PAUSE_TIMEOUT",
109 	"RESUME_TIMEOUT"
110 };
111 
112 static void
113 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
114 
msg_type_str(unsigned int msg_type)115 static const char *msg_type_str(unsigned int msg_type)
116 {
117 	switch (msg_type) {
118 	case VCHIQ_MSG_PADDING:       return "PADDING";
119 	case VCHIQ_MSG_CONNECT:       return "CONNECT";
120 	case VCHIQ_MSG_OPEN:          return "OPEN";
121 	case VCHIQ_MSG_OPENACK:       return "OPENACK";
122 	case VCHIQ_MSG_CLOSE:         return "CLOSE";
123 	case VCHIQ_MSG_DATA:          return "DATA";
124 	case VCHIQ_MSG_BULK_RX:       return "BULK_RX";
125 	case VCHIQ_MSG_BULK_TX:       return "BULK_TX";
126 	case VCHIQ_MSG_BULK_RX_DONE:  return "BULK_RX_DONE";
127 	case VCHIQ_MSG_BULK_TX_DONE:  return "BULK_TX_DONE";
128 	case VCHIQ_MSG_PAUSE:         return "PAUSE";
129 	case VCHIQ_MSG_RESUME:        return "RESUME";
130 	case VCHIQ_MSG_REMOTE_USE:    return "REMOTE_USE";
131 	case VCHIQ_MSG_REMOTE_RELEASE:      return "REMOTE_RELEASE";
132 	case VCHIQ_MSG_REMOTE_USE_ACTIVE:   return "REMOTE_USE_ACTIVE";
133 	}
134 	return "???";
135 }
136 
137 static inline void
vchiq_set_service_state(struct vchiq_service * service,int newstate)138 vchiq_set_service_state(struct vchiq_service *service, int newstate)
139 {
140 	vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
141 		service->state->id, service->localport,
142 		srvstate_names[service->srvstate],
143 		srvstate_names[newstate]);
144 	service->srvstate = newstate;
145 }
146 
147 struct vchiq_service *
find_service_by_handle(unsigned int handle)148 find_service_by_handle(unsigned int handle)
149 {
150 	struct vchiq_service *service;
151 
152 	rcu_read_lock();
153 	service = handle_to_service(handle);
154 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
155 	    service->handle == handle &&
156 	    kref_get_unless_zero(&service->ref_count)) {
157 		service = rcu_pointer_handoff(service);
158 		rcu_read_unlock();
159 		return service;
160 	}
161 	rcu_read_unlock();
162 	vchiq_log_info(vchiq_core_log_level,
163 		       "Invalid service handle 0x%x", handle);
164 	return NULL;
165 }
166 
167 struct vchiq_service *
find_service_by_port(struct vchiq_state * state,int localport)168 find_service_by_port(struct vchiq_state *state, int localport)
169 {
170 
171 	if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
172 		struct vchiq_service *service;
173 
174 		rcu_read_lock();
175 		service = rcu_dereference(state->services[localport]);
176 		if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
177 		    kref_get_unless_zero(&service->ref_count)) {
178 			service = rcu_pointer_handoff(service);
179 			rcu_read_unlock();
180 			return service;
181 		}
182 		rcu_read_unlock();
183 	}
184 	vchiq_log_info(vchiq_core_log_level,
185 		       "Invalid port %d", localport);
186 	return NULL;
187 }
188 
189 struct vchiq_service *
find_service_for_instance(struct vchiq_instance * instance,unsigned int handle)190 find_service_for_instance(struct vchiq_instance *instance,
191 	unsigned int handle)
192 {
193 	struct vchiq_service *service;
194 
195 	rcu_read_lock();
196 	service = handle_to_service(handle);
197 	if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
198 	    service->handle == handle &&
199 	    service->instance == instance &&
200 	    kref_get_unless_zero(&service->ref_count)) {
201 		service = rcu_pointer_handoff(service);
202 		rcu_read_unlock();
203 		return service;
204 	}
205 	rcu_read_unlock();
206 	vchiq_log_info(vchiq_core_log_level,
207 		       "Invalid service handle 0x%x", handle);
208 	return NULL;
209 }
210 
211 struct vchiq_service *
find_closed_service_for_instance(struct vchiq_instance * instance,unsigned int handle)212 find_closed_service_for_instance(struct vchiq_instance *instance,
213 	unsigned int handle)
214 {
215 	struct vchiq_service *service;
216 
217 	rcu_read_lock();
218 	service = handle_to_service(handle);
219 	if (service &&
220 	    (service->srvstate == VCHIQ_SRVSTATE_FREE ||
221 	     service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
222 	    service->handle == handle &&
223 	    service->instance == instance &&
224 	    kref_get_unless_zero(&service->ref_count)) {
225 		service = rcu_pointer_handoff(service);
226 		rcu_read_unlock();
227 		return service;
228 	}
229 	rcu_read_unlock();
230 	vchiq_log_info(vchiq_core_log_level,
231 		       "Invalid service handle 0x%x", handle);
232 	return service;
233 }
234 
235 struct vchiq_service *
__next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)236 __next_service_by_instance(struct vchiq_state *state,
237 			   struct vchiq_instance *instance,
238 			   int *pidx)
239 {
240 	struct vchiq_service *service = NULL;
241 	int idx = *pidx;
242 
243 	while (idx < state->unused_service) {
244 		struct vchiq_service *srv;
245 
246 		srv = rcu_dereference(state->services[idx++]);
247 		if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
248 		    srv->instance == instance) {
249 			service = srv;
250 			break;
251 		}
252 	}
253 
254 	*pidx = idx;
255 	return service;
256 }
257 
258 struct vchiq_service *
next_service_by_instance(struct vchiq_state * state,struct vchiq_instance * instance,int * pidx)259 next_service_by_instance(struct vchiq_state *state,
260 			 struct vchiq_instance *instance,
261 			 int *pidx)
262 {
263 	struct vchiq_service *service;
264 
265 	rcu_read_lock();
266 	while (1) {
267 		service = __next_service_by_instance(state, instance, pidx);
268 		if (!service)
269 			break;
270 		if (kref_get_unless_zero(&service->ref_count)) {
271 			service = rcu_pointer_handoff(service);
272 			break;
273 		}
274 	}
275 	rcu_read_unlock();
276 	return service;
277 }
278 
279 void
lock_service(struct vchiq_service * service)280 lock_service(struct vchiq_service *service)
281 {
282 	if (!service) {
283 		WARN(1, "%s service is NULL\n", __func__);
284 		return;
285 	}
286 	kref_get(&service->ref_count);
287 }
288 
service_release(struct kref * kref)289 static void service_release(struct kref *kref)
290 {
291 	struct vchiq_service *service =
292 		container_of(kref, struct vchiq_service, ref_count);
293 	struct vchiq_state *state = service->state;
294 
295 	WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
296 	rcu_assign_pointer(state->services[service->localport], NULL);
297 	if (service->userdata_term)
298 		service->userdata_term(service->base.userdata);
299 	kfree_rcu(service, rcu);
300 }
301 
302 void
unlock_service(struct vchiq_service * service)303 unlock_service(struct vchiq_service *service)
304 {
305 	if (!service) {
306 		WARN(1, "%s: service is NULL\n", __func__);
307 		return;
308 	}
309 	kref_put(&service->ref_count, service_release);
310 }
311 
312 int
vchiq_get_client_id(unsigned int handle)313 vchiq_get_client_id(unsigned int handle)
314 {
315 	struct vchiq_service *service;
316 	int id;
317 
318 	rcu_read_lock();
319 	service = handle_to_service(handle);
320 	id = service ? service->client_id : 0;
321 	rcu_read_unlock();
322 	return id;
323 }
324 
325 void *
vchiq_get_service_userdata(unsigned int handle)326 vchiq_get_service_userdata(unsigned int handle)
327 {
328 	void *userdata;
329 	struct vchiq_service *service;
330 
331 	rcu_read_lock();
332 	service = handle_to_service(handle);
333 	userdata = service ? service->base.userdata : NULL;
334 	rcu_read_unlock();
335 	return userdata;
336 }
337 EXPORT_SYMBOL(vchiq_get_service_userdata);
338 
339 static void
mark_service_closing_internal(struct vchiq_service * service,int sh_thread)340 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
341 {
342 	struct vchiq_state *state = service->state;
343 	struct vchiq_service_quota *quota;
344 
345 	service->closing = 1;
346 
347 	/* Synchronise with other threads. */
348 	mutex_lock(&state->recycle_mutex);
349 	mutex_unlock(&state->recycle_mutex);
350 	if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
351 		/*
352 		 * If we're pausing then the slot_mutex is held until resume
353 		 * by the slot handler.  Therefore don't try to acquire this
354 		 * mutex if we're the slot handler and in the pause sent state.
355 		 * We don't need to in this case anyway.
356 		 */
357 		mutex_lock(&state->slot_mutex);
358 		mutex_unlock(&state->slot_mutex);
359 	}
360 
361 	/* Unblock any sending thread. */
362 	quota = &state->service_quotas[service->localport];
363 	complete(&quota->quota_event);
364 }
365 
366 static void
mark_service_closing(struct vchiq_service * service)367 mark_service_closing(struct vchiq_service *service)
368 {
369 	mark_service_closing_internal(service, 0);
370 }
371 
372 static inline enum vchiq_status
make_service_callback(struct vchiq_service * service,enum vchiq_reason reason,struct vchiq_header * header,void * bulk_userdata)373 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
374 		      struct vchiq_header *header, void *bulk_userdata)
375 {
376 	enum vchiq_status status;
377 
378 	vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
379 		service->state->id, service->localport, reason_names[reason],
380 		header, bulk_userdata);
381 	status = service->base.callback(reason, header, service->handle,
382 		bulk_userdata);
383 	if (status == VCHIQ_ERROR) {
384 		vchiq_log_warning(vchiq_core_log_level,
385 			"%d: ignoring ERROR from callback to service %x",
386 			service->state->id, service->handle);
387 		status = VCHIQ_SUCCESS;
388 	}
389 
390 	if (reason != VCHIQ_MESSAGE_AVAILABLE)
391 		vchiq_release_message(service->handle, header);
392 
393 	return status;
394 }
395 
396 inline void
vchiq_set_conn_state(struct vchiq_state * state,enum vchiq_connstate newstate)397 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
398 {
399 	enum vchiq_connstate oldstate = state->conn_state;
400 
401 	vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
402 		conn_state_names[oldstate],
403 		conn_state_names[newstate]);
404 	state->conn_state = newstate;
405 	vchiq_platform_conn_state_changed(state, oldstate, newstate);
406 }
407 
408 static inline void
remote_event_create(wait_queue_head_t * wq,struct remote_event * event)409 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
410 {
411 	event->armed = 0;
412 	/*
413 	 * Don't clear the 'fired' flag because it may already have been set
414 	 * by the other side.
415 	 */
416 	init_waitqueue_head(wq);
417 }
418 
419 /*
420  * All the event waiting routines in VCHIQ used a custom semaphore
421  * implementation that filtered most signals. This achieved a behaviour similar
422  * to the "killable" family of functions. While cleaning up this code all the
423  * routines where switched to the "interruptible" family of functions, as the
424  * former was deemed unjustified and the use "killable" set all VCHIQ's
425  * threads in D state.
426  */
427 static inline int
remote_event_wait(wait_queue_head_t * wq,struct remote_event * event)428 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
429 {
430 	if (!event->fired) {
431 		event->armed = 1;
432 		dsb(sy);
433 		if (wait_event_interruptible(*wq, event->fired)) {
434 			event->armed = 0;
435 			return 0;
436 		}
437 		event->armed = 0;
438 		wmb();
439 	}
440 
441 	event->fired = 0;
442 	return 1;
443 }
444 
445 static inline void
remote_event_signal_local(wait_queue_head_t * wq,struct remote_event * event)446 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
447 {
448 	event->fired = 1;
449 	event->armed = 0;
450 	wake_up_all(wq);
451 }
452 
453 static inline void
remote_event_poll(wait_queue_head_t * wq,struct remote_event * event)454 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
455 {
456 	if (event->fired && event->armed)
457 		remote_event_signal_local(wq, event);
458 }
459 
460 void
remote_event_pollall(struct vchiq_state * state)461 remote_event_pollall(struct vchiq_state *state)
462 {
463 	remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
464 	remote_event_poll(&state->sync_release_event, &state->local->sync_release);
465 	remote_event_poll(&state->trigger_event, &state->local->trigger);
466 	remote_event_poll(&state->recycle_event, &state->local->recycle);
467 }
468 
469 /*
470  * Round up message sizes so that any space at the end of a slot is always big
471  * enough for a header. This relies on header size being a power of two, which
472  * has been verified earlier by a static assertion.
473  */
474 
475 static inline size_t
calc_stride(size_t size)476 calc_stride(size_t size)
477 {
478 	/* Allow room for the header */
479 	size += sizeof(struct vchiq_header);
480 
481 	/* Round up */
482 	return (size + sizeof(struct vchiq_header) - 1) &
483 		~(sizeof(struct vchiq_header) - 1);
484 }
485 
486 /* Called by the slot handler thread */
487 static struct vchiq_service *
get_listening_service(struct vchiq_state * state,int fourcc)488 get_listening_service(struct vchiq_state *state, int fourcc)
489 {
490 	int i;
491 
492 	WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
493 
494 	rcu_read_lock();
495 	for (i = 0; i < state->unused_service; i++) {
496 		struct vchiq_service *service;
497 
498 		service = rcu_dereference(state->services[i]);
499 		if (service &&
500 		    service->public_fourcc == fourcc &&
501 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
502 		     (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
503 		      service->remoteport == VCHIQ_PORT_FREE)) &&
504 		    kref_get_unless_zero(&service->ref_count)) {
505 			service = rcu_pointer_handoff(service);
506 			rcu_read_unlock();
507 			return service;
508 		}
509 	}
510 	rcu_read_unlock();
511 	return NULL;
512 }
513 
514 /* Called by the slot handler thread */
515 static struct vchiq_service *
get_connected_service(struct vchiq_state * state,unsigned int port)516 get_connected_service(struct vchiq_state *state, unsigned int port)
517 {
518 	int i;
519 
520 	rcu_read_lock();
521 	for (i = 0; i < state->unused_service; i++) {
522 		struct vchiq_service *service =
523 			rcu_dereference(state->services[i]);
524 
525 		if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
526 		    service->remoteport == port &&
527 		    kref_get_unless_zero(&service->ref_count)) {
528 			service = rcu_pointer_handoff(service);
529 			rcu_read_unlock();
530 			return service;
531 		}
532 	}
533 	rcu_read_unlock();
534 	return NULL;
535 }
536 
537 inline void
request_poll(struct vchiq_state * state,struct vchiq_service * service,int poll_type)538 request_poll(struct vchiq_state *state, struct vchiq_service *service,
539 	     int poll_type)
540 {
541 	u32 value;
542 	int index;
543 
544 	if (!service)
545 		goto skip_service;
546 
547 	do {
548 		value = atomic_read(&service->poll_flags);
549 	} while (atomic_cmpxchg(&service->poll_flags, value,
550 		 value | BIT(poll_type)) != value);
551 
552 	index = BITSET_WORD(service->localport);
553 	do {
554 		value = atomic_read(&state->poll_services[index]);
555 	} while (atomic_cmpxchg(&state->poll_services[index],
556 		 value, value | BIT(service->localport & 0x1f)) != value);
557 
558 skip_service:
559 	state->poll_needed = 1;
560 	wmb();
561 
562 	/* ... and ensure the slot handler runs. */
563 	remote_event_signal_local(&state->trigger_event, &state->local->trigger);
564 }
565 
566 /*
567  * Called from queue_message, by the slot handler and application threads,
568  * with slot_mutex held
569  */
570 static struct vchiq_header *
reserve_space(struct vchiq_state * state,size_t space,int is_blocking)571 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
572 {
573 	struct vchiq_shared_state *local = state->local;
574 	int tx_pos = state->local_tx_pos;
575 	int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
576 
577 	if (space > slot_space) {
578 		struct vchiq_header *header;
579 		/* Fill the remaining space with padding */
580 		WARN_ON(!state->tx_data);
581 		header = (struct vchiq_header *)
582 			(state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
583 		header->msgid = VCHIQ_MSGID_PADDING;
584 		header->size = slot_space - sizeof(struct vchiq_header);
585 
586 		tx_pos += slot_space;
587 	}
588 
589 	/* If necessary, get the next slot. */
590 	if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
591 		int slot_index;
592 
593 		/* If there is no free slot... */
594 
595 		if (!try_wait_for_completion(&state->slot_available_event)) {
596 			/* ...wait for one. */
597 
598 			VCHIQ_STATS_INC(state, slot_stalls);
599 
600 			/* But first, flush through the last slot. */
601 			state->local_tx_pos = tx_pos;
602 			local->tx_pos = tx_pos;
603 			remote_event_signal(&state->remote->trigger);
604 
605 			if (!is_blocking ||
606 				(wait_for_completion_interruptible(
607 				&state->slot_available_event)))
608 				return NULL; /* No space available */
609 		}
610 
611 		if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
612 			complete(&state->slot_available_event);
613 			pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
614 			return NULL;
615 		}
616 
617 		slot_index = local->slot_queue[
618 			SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
619 		state->tx_data =
620 			(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
621 	}
622 
623 	state->local_tx_pos = tx_pos + space;
624 
625 	return (struct vchiq_header *)(state->tx_data +
626 						(tx_pos & VCHIQ_SLOT_MASK));
627 }
628 
629 /* Called by the recycle thread. */
630 static void
process_free_queue(struct vchiq_state * state,BITSET_T * service_found,size_t length)631 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
632 		   size_t length)
633 {
634 	struct vchiq_shared_state *local = state->local;
635 	int slot_queue_available;
636 
637 	/*
638 	 * Find slots which have been freed by the other side, and return them
639 	 * to the available queue.
640 	 */
641 	slot_queue_available = state->slot_queue_available;
642 
643 	/*
644 	 * Use a memory barrier to ensure that any state that may have been
645 	 * modified by another thread is not masked by stale prefetched
646 	 * values.
647 	 */
648 	mb();
649 
650 	while (slot_queue_available != local->slot_queue_recycle) {
651 		unsigned int pos;
652 		int slot_index = local->slot_queue[slot_queue_available++ &
653 			VCHIQ_SLOT_QUEUE_MASK];
654 		char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
655 		int data_found = 0;
656 
657 		/*
658 		 * Beware of the address dependency - data is calculated
659 		 * using an index written by the other side.
660 		 */
661 		rmb();
662 
663 		vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
664 			state->id, slot_index, data,
665 			local->slot_queue_recycle, slot_queue_available);
666 
667 		/* Initialise the bitmask for services which have used this slot */
668 		memset(service_found, 0, length);
669 
670 		pos = 0;
671 
672 		while (pos < VCHIQ_SLOT_SIZE) {
673 			struct vchiq_header *header =
674 				(struct vchiq_header *)(data + pos);
675 			int msgid = header->msgid;
676 
677 			if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
678 				int port = VCHIQ_MSG_SRCPORT(msgid);
679 				struct vchiq_service_quota *quota =
680 					&state->service_quotas[port];
681 				int count;
682 
683 				spin_lock(&quota_spinlock);
684 				count = quota->message_use_count;
685 				if (count > 0)
686 					quota->message_use_count =
687 						count - 1;
688 				spin_unlock(&quota_spinlock);
689 
690 				if (count == quota->message_quota)
691 					/*
692 					 * Signal the service that it
693 					 * has dropped below its quota
694 					 */
695 					complete(&quota->quota_event);
696 				else if (count == 0) {
697 					vchiq_log_error(vchiq_core_log_level,
698 						"service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
699 						port,
700 						quota->message_use_count,
701 						header, msgid, header->msgid,
702 						header->size);
703 					WARN(1, "invalid message use count\n");
704 				}
705 				if (!BITSET_IS_SET(service_found, port)) {
706 					/* Set the found bit for this service */
707 					BITSET_SET(service_found, port);
708 
709 					spin_lock(&quota_spinlock);
710 					count = quota->slot_use_count;
711 					if (count > 0)
712 						quota->slot_use_count =
713 							count - 1;
714 					spin_unlock(&quota_spinlock);
715 
716 					if (count > 0) {
717 						/*
718 						 * Signal the service in case
719 						 * it has dropped below its quota
720 						 */
721 						complete(&quota->quota_event);
722 						vchiq_log_trace(
723 							vchiq_core_log_level,
724 							"%d: pfq:%d %x@%pK - slot_use->%d",
725 							state->id, port,
726 							header->size, header,
727 							count - 1);
728 					} else {
729 						vchiq_log_error(
730 							vchiq_core_log_level,
731 								"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
732 							port, count, header,
733 							msgid, header->msgid,
734 							header->size);
735 						WARN(1, "bad slot use count\n");
736 					}
737 				}
738 
739 				data_found = 1;
740 			}
741 
742 			pos += calc_stride(header->size);
743 			if (pos > VCHIQ_SLOT_SIZE) {
744 				vchiq_log_error(vchiq_core_log_level,
745 					"pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
746 					pos, header, msgid, header->msgid,
747 					header->size);
748 				WARN(1, "invalid slot position\n");
749 			}
750 		}
751 
752 		if (data_found) {
753 			int count;
754 
755 			spin_lock(&quota_spinlock);
756 			count = state->data_use_count;
757 			if (count > 0)
758 				state->data_use_count =
759 					count - 1;
760 			spin_unlock(&quota_spinlock);
761 			if (count == state->data_quota)
762 				complete(&state->data_quota_event);
763 		}
764 
765 		/*
766 		 * Don't allow the slot to be reused until we are no
767 		 * longer interested in it.
768 		 */
769 		mb();
770 
771 		state->slot_queue_available = slot_queue_available;
772 		complete(&state->slot_available_event);
773 	}
774 }
775 
776 static ssize_t
memcpy_copy_callback(void * context,void * dest,size_t offset,size_t maxsize)777 memcpy_copy_callback(
778 	void *context, void *dest,
779 	size_t offset, size_t maxsize)
780 {
781 	memcpy(dest + offset, context + offset, maxsize);
782 	return maxsize;
783 }
784 
785 static ssize_t
copy_message_data(ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,void * dest,size_t size)786 copy_message_data(
787 	ssize_t (*copy_callback)(void *context, void *dest,
788 				 size_t offset, size_t maxsize),
789 	void *context,
790 	void *dest,
791 	size_t size)
792 {
793 	size_t pos = 0;
794 
795 	while (pos < size) {
796 		ssize_t callback_result;
797 		size_t max_bytes = size - pos;
798 
799 		callback_result =
800 			copy_callback(context, dest + pos,
801 				      pos, max_bytes);
802 
803 		if (callback_result < 0)
804 			return callback_result;
805 
806 		if (!callback_result)
807 			return -EIO;
808 
809 		if (callback_result > max_bytes)
810 			return -EIO;
811 
812 		pos += callback_result;
813 	}
814 
815 	return size;
816 }
817 
818 /* Called by the slot handler and application threads */
819 static enum vchiq_status
queue_message(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size,int flags)820 queue_message(struct vchiq_state *state, struct vchiq_service *service,
821 	      int msgid,
822 	      ssize_t (*copy_callback)(void *context, void *dest,
823 				       size_t offset, size_t maxsize),
824 	      void *context, size_t size, int flags)
825 {
826 	struct vchiq_shared_state *local;
827 	struct vchiq_service_quota *quota = NULL;
828 	struct vchiq_header *header;
829 	int type = VCHIQ_MSG_TYPE(msgid);
830 
831 	size_t stride;
832 
833 	local = state->local;
834 
835 	stride = calc_stride(size);
836 
837 	WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
838 
839 	if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
840 	    mutex_lock_killable(&state->slot_mutex))
841 		return VCHIQ_RETRY;
842 
843 	if (type == VCHIQ_MSG_DATA) {
844 		int tx_end_index;
845 
846 		if (!service) {
847 			WARN(1, "%s: service is NULL\n", __func__);
848 			mutex_unlock(&state->slot_mutex);
849 			return VCHIQ_ERROR;
850 		}
851 
852 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
853 				 QMFLAGS_NO_MUTEX_UNLOCK));
854 
855 		if (service->closing) {
856 			/* The service has been closed */
857 			mutex_unlock(&state->slot_mutex);
858 			return VCHIQ_ERROR;
859 		}
860 
861 		quota = &state->service_quotas[service->localport];
862 
863 		spin_lock(&quota_spinlock);
864 
865 		/*
866 		 * Ensure this service doesn't use more than its quota of
867 		 * messages or slots
868 		 */
869 		tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
870 			state->local_tx_pos + stride - 1);
871 
872 		/*
873 		 * Ensure data messages don't use more than their quota of
874 		 * slots
875 		 */
876 		while ((tx_end_index != state->previous_data_index) &&
877 			(state->data_use_count == state->data_quota)) {
878 			VCHIQ_STATS_INC(state, data_stalls);
879 			spin_unlock(&quota_spinlock);
880 			mutex_unlock(&state->slot_mutex);
881 
882 			if (wait_for_completion_interruptible(
883 						&state->data_quota_event))
884 				return VCHIQ_RETRY;
885 
886 			mutex_lock(&state->slot_mutex);
887 			spin_lock(&quota_spinlock);
888 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
889 				state->local_tx_pos + stride - 1);
890 			if ((tx_end_index == state->previous_data_index) ||
891 				(state->data_use_count < state->data_quota)) {
892 				/* Pass the signal on to other waiters */
893 				complete(&state->data_quota_event);
894 				break;
895 			}
896 		}
897 
898 		while ((quota->message_use_count == quota->message_quota) ||
899 			((tx_end_index != quota->previous_tx_index) &&
900 			(quota->slot_use_count ==
901 				quota->slot_quota))) {
902 			spin_unlock(&quota_spinlock);
903 			vchiq_log_trace(vchiq_core_log_level,
904 				"%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
905 				state->id, service->localport,
906 				msg_type_str(type), size,
907 				quota->message_use_count,
908 				quota->slot_use_count);
909 			VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
910 			mutex_unlock(&state->slot_mutex);
911 			if (wait_for_completion_interruptible(
912 						&quota->quota_event))
913 				return VCHIQ_RETRY;
914 			if (service->closing)
915 				return VCHIQ_ERROR;
916 			if (mutex_lock_killable(&state->slot_mutex))
917 				return VCHIQ_RETRY;
918 			if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
919 				/* The service has been closed */
920 				mutex_unlock(&state->slot_mutex);
921 				return VCHIQ_ERROR;
922 			}
923 			spin_lock(&quota_spinlock);
924 			tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
925 				state->local_tx_pos + stride - 1);
926 		}
927 
928 		spin_unlock(&quota_spinlock);
929 	}
930 
931 	header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
932 
933 	if (!header) {
934 		if (service)
935 			VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
936 		/*
937 		 * In the event of a failure, return the mutex to the
938 		 * state it was in
939 		 */
940 		if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
941 			mutex_unlock(&state->slot_mutex);
942 		return VCHIQ_RETRY;
943 	}
944 
945 	if (type == VCHIQ_MSG_DATA) {
946 		ssize_t callback_result;
947 		int tx_end_index;
948 		int slot_use_count;
949 
950 		vchiq_log_info(vchiq_core_log_level,
951 			"%d: qm %s@%pK,%zx (%d->%d)",
952 			state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
953 			header, size, VCHIQ_MSG_SRCPORT(msgid),
954 			VCHIQ_MSG_DSTPORT(msgid));
955 
956 		WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
957 				 QMFLAGS_NO_MUTEX_UNLOCK));
958 
959 		callback_result =
960 			copy_message_data(copy_callback, context,
961 					  header->data, size);
962 
963 		if (callback_result < 0) {
964 			mutex_unlock(&state->slot_mutex);
965 			VCHIQ_SERVICE_STATS_INC(service,
966 						error_count);
967 			return VCHIQ_ERROR;
968 		}
969 
970 		if (SRVTRACE_ENABLED(service,
971 				     VCHIQ_LOG_INFO))
972 			vchiq_log_dump_mem("Sent", 0,
973 					   header->data,
974 					   min((size_t)16,
975 					       (size_t)callback_result));
976 
977 		spin_lock(&quota_spinlock);
978 		quota->message_use_count++;
979 
980 		tx_end_index =
981 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
982 
983 		/*
984 		 * If this transmission can't fit in the last slot used by any
985 		 * service, the data_use_count must be increased.
986 		 */
987 		if (tx_end_index != state->previous_data_index) {
988 			state->previous_data_index = tx_end_index;
989 			state->data_use_count++;
990 		}
991 
992 		/*
993 		 * If this isn't the same slot last used by this service,
994 		 * the service's slot_use_count must be increased.
995 		 */
996 		if (tx_end_index != quota->previous_tx_index) {
997 			quota->previous_tx_index = tx_end_index;
998 			slot_use_count = ++quota->slot_use_count;
999 		} else {
1000 			slot_use_count = 0;
1001 		}
1002 
1003 		spin_unlock(&quota_spinlock);
1004 
1005 		if (slot_use_count)
1006 			vchiq_log_trace(vchiq_core_log_level,
1007 				"%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1008 				state->id, service->localport,
1009 				msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1010 				slot_use_count, header);
1011 
1012 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1013 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1014 	} else {
1015 		vchiq_log_info(vchiq_core_log_level,
1016 			"%d: qm %s@%pK,%zx (%d->%d)", state->id,
1017 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1018 			header, size, VCHIQ_MSG_SRCPORT(msgid),
1019 			VCHIQ_MSG_DSTPORT(msgid));
1020 		if (size != 0) {
1021 			/*
1022 			 * It is assumed for now that this code path
1023 			 * only happens from calls inside this file.
1024 			 *
1025 			 * External callers are through the vchiq_queue_message
1026 			 * path which always sets the type to be VCHIQ_MSG_DATA
1027 			 *
1028 			 * At first glance this appears to be correct but
1029 			 * more review is needed.
1030 			 */
1031 			copy_message_data(copy_callback, context,
1032 					  header->data, size);
1033 		}
1034 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1035 	}
1036 
1037 	header->msgid = msgid;
1038 	header->size = size;
1039 
1040 	{
1041 		int svc_fourcc;
1042 
1043 		svc_fourcc = service
1044 			? service->base.fourcc
1045 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1046 
1047 		vchiq_log_info(SRVTRACE_LEVEL(service),
1048 			"Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1049 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1050 			VCHIQ_MSG_TYPE(msgid),
1051 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1052 			VCHIQ_MSG_SRCPORT(msgid),
1053 			VCHIQ_MSG_DSTPORT(msgid),
1054 			size);
1055 	}
1056 
1057 	/* Make sure the new header is visible to the peer. */
1058 	wmb();
1059 
1060 	/* Make the new tx_pos visible to the peer. */
1061 	local->tx_pos = state->local_tx_pos;
1062 	wmb();
1063 
1064 	if (service && (type == VCHIQ_MSG_CLOSE))
1065 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1066 
1067 	if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1068 		mutex_unlock(&state->slot_mutex);
1069 
1070 	remote_event_signal(&state->remote->trigger);
1071 
1072 	return VCHIQ_SUCCESS;
1073 }
1074 
1075 /* Called by the slot handler and application threads */
1076 static enum vchiq_status
queue_message_sync(struct vchiq_state * state,struct vchiq_service * service,int msgid,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,int size,int is_blocking)1077 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1078 		   int msgid,
1079 		   ssize_t (*copy_callback)(void *context, void *dest,
1080 					    size_t offset, size_t maxsize),
1081 		   void *context, int size, int is_blocking)
1082 {
1083 	struct vchiq_shared_state *local;
1084 	struct vchiq_header *header;
1085 	ssize_t callback_result;
1086 
1087 	local = state->local;
1088 
1089 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1090 	    mutex_lock_killable(&state->sync_mutex))
1091 		return VCHIQ_RETRY;
1092 
1093 	remote_event_wait(&state->sync_release_event, &local->sync_release);
1094 
1095 	rmb();
1096 
1097 	header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1098 		local->slot_sync);
1099 
1100 	{
1101 		int oldmsgid = header->msgid;
1102 
1103 		if (oldmsgid != VCHIQ_MSGID_PADDING)
1104 			vchiq_log_error(vchiq_core_log_level,
1105 				"%d: qms - msgid %x, not PADDING",
1106 				state->id, oldmsgid);
1107 	}
1108 
1109 	vchiq_log_info(vchiq_sync_log_level,
1110 		       "%d: qms %s@%pK,%x (%d->%d)", state->id,
1111 		       msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1112 		       header, size, VCHIQ_MSG_SRCPORT(msgid),
1113 		       VCHIQ_MSG_DSTPORT(msgid));
1114 
1115 	callback_result =
1116 		copy_message_data(copy_callback, context,
1117 				  header->data, size);
1118 
1119 	if (callback_result < 0) {
1120 		mutex_unlock(&state->slot_mutex);
1121 		VCHIQ_SERVICE_STATS_INC(service,
1122 					error_count);
1123 		return VCHIQ_ERROR;
1124 	}
1125 
1126 	if (service) {
1127 		if (SRVTRACE_ENABLED(service,
1128 				     VCHIQ_LOG_INFO))
1129 			vchiq_log_dump_mem("Sent", 0,
1130 					   header->data,
1131 					   min((size_t)16,
1132 					       (size_t)callback_result));
1133 
1134 		VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1135 		VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1136 	} else {
1137 		VCHIQ_STATS_INC(state, ctrl_tx_count);
1138 	}
1139 
1140 	header->size = size;
1141 	header->msgid = msgid;
1142 
1143 	if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1144 		int svc_fourcc;
1145 
1146 		svc_fourcc = service
1147 			? service->base.fourcc
1148 			: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1149 
1150 		vchiq_log_trace(vchiq_sync_log_level,
1151 			"Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1152 			msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1153 			VCHIQ_MSG_TYPE(msgid),
1154 			VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1155 			VCHIQ_MSG_SRCPORT(msgid),
1156 			VCHIQ_MSG_DSTPORT(msgid),
1157 			size);
1158 	}
1159 
1160 	remote_event_signal(&state->remote->sync_trigger);
1161 
1162 	if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1163 		mutex_unlock(&state->sync_mutex);
1164 
1165 	return VCHIQ_SUCCESS;
1166 }
1167 
1168 static inline void
claim_slot(struct vchiq_slot_info * slot)1169 claim_slot(struct vchiq_slot_info *slot)
1170 {
1171 	slot->use_count++;
1172 }
1173 
1174 static void
release_slot(struct vchiq_state * state,struct vchiq_slot_info * slot_info,struct vchiq_header * header,struct vchiq_service * service)1175 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1176 	     struct vchiq_header *header, struct vchiq_service *service)
1177 {
1178 	int release_count;
1179 
1180 	mutex_lock(&state->recycle_mutex);
1181 
1182 	if (header) {
1183 		int msgid = header->msgid;
1184 
1185 		if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1186 			(service && service->closing)) {
1187 			mutex_unlock(&state->recycle_mutex);
1188 			return;
1189 		}
1190 
1191 		/* Rewrite the message header to prevent a double release */
1192 		header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1193 	}
1194 
1195 	release_count = slot_info->release_count;
1196 	slot_info->release_count = ++release_count;
1197 
1198 	if (release_count == slot_info->use_count) {
1199 		int slot_queue_recycle;
1200 		/* Add to the freed queue */
1201 
1202 		/*
1203 		 * A read barrier is necessary here to prevent speculative
1204 		 * fetches of remote->slot_queue_recycle from overtaking the
1205 		 * mutex.
1206 		 */
1207 		rmb();
1208 
1209 		slot_queue_recycle = state->remote->slot_queue_recycle;
1210 		state->remote->slot_queue[slot_queue_recycle &
1211 			VCHIQ_SLOT_QUEUE_MASK] =
1212 			SLOT_INDEX_FROM_INFO(state, slot_info);
1213 		state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1214 		vchiq_log_info(vchiq_core_log_level,
1215 			"%d: %s %d - recycle->%x", state->id, __func__,
1216 			SLOT_INDEX_FROM_INFO(state, slot_info),
1217 			state->remote->slot_queue_recycle);
1218 
1219 		/*
1220 		 * A write barrier is necessary, but remote_event_signal
1221 		 * contains one.
1222 		 */
1223 		remote_event_signal(&state->remote->recycle);
1224 	}
1225 
1226 	mutex_unlock(&state->recycle_mutex);
1227 }
1228 
1229 /* Called by the slot handler - don't hold the bulk mutex */
1230 static enum vchiq_status
notify_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue,int retry_poll)1231 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1232 	     int retry_poll)
1233 {
1234 	enum vchiq_status status = VCHIQ_SUCCESS;
1235 
1236 	vchiq_log_trace(vchiq_core_log_level,
1237 		"%d: nb:%d %cx - p=%x rn=%x r=%x",
1238 		service->state->id, service->localport,
1239 		(queue == &service->bulk_tx) ? 't' : 'r',
1240 		queue->process, queue->remote_notify, queue->remove);
1241 
1242 	queue->remote_notify = queue->process;
1243 
1244 	if (status == VCHIQ_SUCCESS) {
1245 		while (queue->remove != queue->remote_notify) {
1246 			struct vchiq_bulk *bulk =
1247 				&queue->bulks[BULK_INDEX(queue->remove)];
1248 
1249 			/*
1250 			 * Only generate callbacks for non-dummy bulk
1251 			 * requests, and non-terminated services
1252 			 */
1253 			if (bulk->data && service->instance) {
1254 				if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1255 					if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1256 						VCHIQ_SERVICE_STATS_INC(service,
1257 							bulk_tx_count);
1258 						VCHIQ_SERVICE_STATS_ADD(service,
1259 							bulk_tx_bytes,
1260 							bulk->actual);
1261 					} else {
1262 						VCHIQ_SERVICE_STATS_INC(service,
1263 							bulk_rx_count);
1264 						VCHIQ_SERVICE_STATS_ADD(service,
1265 							bulk_rx_bytes,
1266 							bulk->actual);
1267 					}
1268 				} else {
1269 					VCHIQ_SERVICE_STATS_INC(service,
1270 						bulk_aborted_count);
1271 				}
1272 				if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1273 					struct bulk_waiter *waiter;
1274 
1275 					spin_lock(&bulk_waiter_spinlock);
1276 					waiter = bulk->userdata;
1277 					if (waiter) {
1278 						waiter->actual = bulk->actual;
1279 						complete(&waiter->event);
1280 					}
1281 					spin_unlock(&bulk_waiter_spinlock);
1282 				} else if (bulk->mode ==
1283 					VCHIQ_BULK_MODE_CALLBACK) {
1284 					enum vchiq_reason reason = (bulk->dir ==
1285 						VCHIQ_BULK_TRANSMIT) ?
1286 						((bulk->actual ==
1287 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1288 						VCHIQ_BULK_TRANSMIT_ABORTED :
1289 						VCHIQ_BULK_TRANSMIT_DONE) :
1290 						((bulk->actual ==
1291 						VCHIQ_BULK_ACTUAL_ABORTED) ?
1292 						VCHIQ_BULK_RECEIVE_ABORTED :
1293 						VCHIQ_BULK_RECEIVE_DONE);
1294 					status = make_service_callback(service,
1295 						reason,	NULL, bulk->userdata);
1296 					if (status == VCHIQ_RETRY)
1297 						break;
1298 				}
1299 			}
1300 
1301 			queue->remove++;
1302 			complete(&service->bulk_remove_event);
1303 		}
1304 		if (!retry_poll)
1305 			status = VCHIQ_SUCCESS;
1306 	}
1307 
1308 	if (status == VCHIQ_RETRY)
1309 		request_poll(service->state, service,
1310 			(queue == &service->bulk_tx) ?
1311 			VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1312 
1313 	return status;
1314 }
1315 
1316 /* Called by the slot handler thread */
1317 static void
poll_services(struct vchiq_state * state)1318 poll_services(struct vchiq_state *state)
1319 {
1320 	int group, i;
1321 
1322 	for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1323 		u32 flags;
1324 
1325 		flags = atomic_xchg(&state->poll_services[group], 0);
1326 		for (i = 0; flags; i++) {
1327 			if (flags & BIT(i)) {
1328 				struct vchiq_service *service =
1329 					find_service_by_port(state,
1330 						(group<<5) + i);
1331 				u32 service_flags;
1332 
1333 				flags &= ~BIT(i);
1334 				if (!service)
1335 					continue;
1336 				service_flags =
1337 					atomic_xchg(&service->poll_flags, 0);
1338 				if (service_flags &
1339 					BIT(VCHIQ_POLL_REMOVE)) {
1340 					vchiq_log_info(vchiq_core_log_level,
1341 						"%d: ps - remove %d<->%d",
1342 						state->id, service->localport,
1343 						service->remoteport);
1344 
1345 					/*
1346 					 * Make it look like a client, because
1347 					 * it must be removed and not left in
1348 					 * the LISTENING state.
1349 					 */
1350 					service->public_fourcc =
1351 						VCHIQ_FOURCC_INVALID;
1352 
1353 					if (vchiq_close_service_internal(
1354 						service, 0/*!close_recvd*/) !=
1355 						VCHIQ_SUCCESS)
1356 						request_poll(state, service,
1357 							VCHIQ_POLL_REMOVE);
1358 				} else if (service_flags &
1359 					BIT(VCHIQ_POLL_TERMINATE)) {
1360 					vchiq_log_info(vchiq_core_log_level,
1361 						"%d: ps - terminate %d<->%d",
1362 						state->id, service->localport,
1363 						service->remoteport);
1364 					if (vchiq_close_service_internal(
1365 						service, 0/*!close_recvd*/) !=
1366 						VCHIQ_SUCCESS)
1367 						request_poll(state, service,
1368 							VCHIQ_POLL_TERMINATE);
1369 				}
1370 				if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1371 					notify_bulks(service,
1372 						&service->bulk_tx,
1373 						1/*retry_poll*/);
1374 				if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1375 					notify_bulks(service,
1376 						&service->bulk_rx,
1377 						1/*retry_poll*/);
1378 				unlock_service(service);
1379 			}
1380 		}
1381 	}
1382 }
1383 
1384 /* Called with the bulk_mutex held */
1385 static void
abort_outstanding_bulks(struct vchiq_service * service,struct vchiq_bulk_queue * queue)1386 abort_outstanding_bulks(struct vchiq_service *service,
1387 			struct vchiq_bulk_queue *queue)
1388 {
1389 	int is_tx = (queue == &service->bulk_tx);
1390 
1391 	vchiq_log_trace(vchiq_core_log_level,
1392 		"%d: aob:%d %cx - li=%x ri=%x p=%x",
1393 		service->state->id, service->localport, is_tx ? 't' : 'r',
1394 		queue->local_insert, queue->remote_insert, queue->process);
1395 
1396 	WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1397 	WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1398 
1399 	while ((queue->process != queue->local_insert) ||
1400 		(queue->process != queue->remote_insert)) {
1401 		struct vchiq_bulk *bulk =
1402 				&queue->bulks[BULK_INDEX(queue->process)];
1403 
1404 		if (queue->process == queue->remote_insert) {
1405 			/* fabricate a matching dummy bulk */
1406 			bulk->remote_data = NULL;
1407 			bulk->remote_size = 0;
1408 			queue->remote_insert++;
1409 		}
1410 
1411 		if (queue->process != queue->local_insert) {
1412 			vchiq_complete_bulk(bulk);
1413 
1414 			vchiq_log_info(SRVTRACE_LEVEL(service),
1415 				"%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1416 				is_tx ? "Send Bulk to" : "Recv Bulk from",
1417 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1418 				service->remoteport,
1419 				bulk->size,
1420 				bulk->remote_size);
1421 		} else {
1422 			/* fabricate a matching dummy bulk */
1423 			bulk->data = 0;
1424 			bulk->size = 0;
1425 			bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1426 			bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1427 				VCHIQ_BULK_RECEIVE;
1428 			queue->local_insert++;
1429 		}
1430 
1431 		queue->process++;
1432 	}
1433 }
1434 
1435 static int
parse_open(struct vchiq_state * state,struct vchiq_header * header)1436 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1437 {
1438 	struct vchiq_service *service = NULL;
1439 	int msgid, size;
1440 	unsigned int localport, remoteport;
1441 
1442 	msgid = header->msgid;
1443 	size = header->size;
1444 	localport = VCHIQ_MSG_DSTPORT(msgid);
1445 	remoteport = VCHIQ_MSG_SRCPORT(msgid);
1446 	if (size >= sizeof(struct vchiq_open_payload)) {
1447 		const struct vchiq_open_payload *payload =
1448 			(struct vchiq_open_payload *)header->data;
1449 		unsigned int fourcc;
1450 
1451 		fourcc = payload->fourcc;
1452 		vchiq_log_info(vchiq_core_log_level,
1453 			"%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1454 			state->id, header, localport,
1455 			VCHIQ_FOURCC_AS_4CHARS(fourcc));
1456 
1457 		service = get_listening_service(state, fourcc);
1458 
1459 		if (service) {
1460 			/* A matching service exists */
1461 			short version = payload->version;
1462 			short version_min = payload->version_min;
1463 
1464 			if ((service->version < version_min) ||
1465 				(version < service->version_min)) {
1466 				/* Version mismatch */
1467 				vchiq_loud_error_header();
1468 				vchiq_loud_error("%d: service %d (%c%c%c%c) "
1469 					"version mismatch - local (%d, min %d)"
1470 					" vs. remote (%d, min %d)",
1471 					state->id, service->localport,
1472 					VCHIQ_FOURCC_AS_4CHARS(fourcc),
1473 					service->version, service->version_min,
1474 					version, version_min);
1475 				vchiq_loud_error_footer();
1476 				unlock_service(service);
1477 				service = NULL;
1478 				goto fail_open;
1479 			}
1480 			service->peer_version = version;
1481 
1482 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1483 				struct vchiq_openack_payload ack_payload = {
1484 					service->version
1485 				};
1486 
1487 				if (state->version_common <
1488 				    VCHIQ_VERSION_SYNCHRONOUS_MODE)
1489 					service->sync = 0;
1490 
1491 				/* Acknowledge the OPEN */
1492 				if (service->sync) {
1493 					if (queue_message_sync(
1494 						state,
1495 						NULL,
1496 						VCHIQ_MAKE_MSG(
1497 							VCHIQ_MSG_OPENACK,
1498 							service->localport,
1499 							remoteport),
1500 						memcpy_copy_callback,
1501 						&ack_payload,
1502 						sizeof(ack_payload),
1503 						0) == VCHIQ_RETRY)
1504 						goto bail_not_ready;
1505 				} else {
1506 					if (queue_message(state,
1507 							NULL,
1508 							VCHIQ_MAKE_MSG(
1509 							VCHIQ_MSG_OPENACK,
1510 							service->localport,
1511 							remoteport),
1512 						memcpy_copy_callback,
1513 						&ack_payload,
1514 						sizeof(ack_payload),
1515 						0) == VCHIQ_RETRY)
1516 						goto bail_not_ready;
1517 				}
1518 
1519 				/* The service is now open */
1520 				vchiq_set_service_state(service,
1521 					service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1522 					: VCHIQ_SRVSTATE_OPEN);
1523 			}
1524 
1525 			/* Success - the message has been dealt with */
1526 			unlock_service(service);
1527 			return 1;
1528 		}
1529 	}
1530 
1531 fail_open:
1532 	/* No available service, or an invalid request - send a CLOSE */
1533 	if (queue_message(state, NULL,
1534 		VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1535 		NULL, NULL, 0, 0) == VCHIQ_RETRY)
1536 		goto bail_not_ready;
1537 
1538 	return 1;
1539 
1540 bail_not_ready:
1541 	if (service)
1542 		unlock_service(service);
1543 
1544 	return 0;
1545 }
1546 
1547 /* Called by the slot handler thread */
1548 static void
parse_rx_slots(struct vchiq_state * state)1549 parse_rx_slots(struct vchiq_state *state)
1550 {
1551 	struct vchiq_shared_state *remote = state->remote;
1552 	struct vchiq_service *service = NULL;
1553 	int tx_pos;
1554 
1555 	DEBUG_INITIALISE(state->local)
1556 
1557 	tx_pos = remote->tx_pos;
1558 
1559 	while (state->rx_pos != tx_pos) {
1560 		struct vchiq_header *header;
1561 		int msgid, size;
1562 		int type;
1563 		unsigned int localport, remoteport;
1564 
1565 		DEBUG_TRACE(PARSE_LINE);
1566 		if (!state->rx_data) {
1567 			int rx_index;
1568 
1569 			WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1570 			rx_index = remote->slot_queue[
1571 				SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1572 			state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1573 				rx_index);
1574 			state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1575 
1576 			/*
1577 			 * Initialise use_count to one, and increment
1578 			 * release_count at the end of the slot to avoid
1579 			 * releasing the slot prematurely.
1580 			 */
1581 			state->rx_info->use_count = 1;
1582 			state->rx_info->release_count = 0;
1583 		}
1584 
1585 		header = (struct vchiq_header *)(state->rx_data +
1586 			(state->rx_pos & VCHIQ_SLOT_MASK));
1587 		DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1588 		msgid = header->msgid;
1589 		DEBUG_VALUE(PARSE_MSGID, msgid);
1590 		size = header->size;
1591 		type = VCHIQ_MSG_TYPE(msgid);
1592 		localport = VCHIQ_MSG_DSTPORT(msgid);
1593 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
1594 
1595 		if (type != VCHIQ_MSG_DATA)
1596 			VCHIQ_STATS_INC(state, ctrl_rx_count);
1597 
1598 		switch (type) {
1599 		case VCHIQ_MSG_OPENACK:
1600 		case VCHIQ_MSG_CLOSE:
1601 		case VCHIQ_MSG_DATA:
1602 		case VCHIQ_MSG_BULK_RX:
1603 		case VCHIQ_MSG_BULK_TX:
1604 		case VCHIQ_MSG_BULK_RX_DONE:
1605 		case VCHIQ_MSG_BULK_TX_DONE:
1606 			service = find_service_by_port(state, localport);
1607 			if ((!service ||
1608 			     ((service->remoteport != remoteport) &&
1609 			      (service->remoteport != VCHIQ_PORT_FREE))) &&
1610 			    (localport == 0) &&
1611 			    (type == VCHIQ_MSG_CLOSE)) {
1612 				/*
1613 				 * This could be a CLOSE from a client which
1614 				 * hadn't yet received the OPENACK - look for
1615 				 * the connected service
1616 				 */
1617 				if (service)
1618 					unlock_service(service);
1619 				service = get_connected_service(state,
1620 					remoteport);
1621 				if (service)
1622 					vchiq_log_warning(vchiq_core_log_level,
1623 						"%d: prs %s@%pK (%d->%d) - found connected service %d",
1624 						state->id, msg_type_str(type),
1625 						header, remoteport, localport,
1626 						service->localport);
1627 			}
1628 
1629 			if (!service) {
1630 				vchiq_log_error(vchiq_core_log_level,
1631 					"%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1632 					state->id, msg_type_str(type),
1633 					header, remoteport, localport,
1634 					localport);
1635 				goto skip_message;
1636 			}
1637 			break;
1638 		default:
1639 			break;
1640 		}
1641 
1642 		if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1643 			int svc_fourcc;
1644 
1645 			svc_fourcc = service
1646 				? service->base.fourcc
1647 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1648 			vchiq_log_info(SRVTRACE_LEVEL(service),
1649 				"Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1650 				msg_type_str(type), type,
1651 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1652 				remoteport, localport, size);
1653 			if (size > 0)
1654 				vchiq_log_dump_mem("Rcvd", 0, header->data,
1655 					min(16, size));
1656 		}
1657 
1658 		if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1659 		    calc_stride(size) > VCHIQ_SLOT_SIZE) {
1660 			vchiq_log_error(vchiq_core_log_level,
1661 				"header %pK (msgid %x) - size %x too big for slot",
1662 				header, (unsigned int)msgid,
1663 				(unsigned int)size);
1664 			WARN(1, "oversized for slot\n");
1665 		}
1666 
1667 		switch (type) {
1668 		case VCHIQ_MSG_OPEN:
1669 			WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1670 			if (!parse_open(state, header))
1671 				goto bail_not_ready;
1672 			break;
1673 		case VCHIQ_MSG_OPENACK:
1674 			if (size >= sizeof(struct vchiq_openack_payload)) {
1675 				const struct vchiq_openack_payload *payload =
1676 					(struct vchiq_openack_payload *)
1677 					header->data;
1678 				service->peer_version = payload->version;
1679 			}
1680 			vchiq_log_info(vchiq_core_log_level,
1681 				"%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1682 				state->id, header, size, remoteport, localport,
1683 				service->peer_version);
1684 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1685 				service->remoteport = remoteport;
1686 				vchiq_set_service_state(service,
1687 					VCHIQ_SRVSTATE_OPEN);
1688 				complete(&service->remove_event);
1689 			} else
1690 				vchiq_log_error(vchiq_core_log_level,
1691 					"OPENACK received in state %s",
1692 					srvstate_names[service->srvstate]);
1693 			break;
1694 		case VCHIQ_MSG_CLOSE:
1695 			WARN_ON(size != 0); /* There should be no data */
1696 
1697 			vchiq_log_info(vchiq_core_log_level,
1698 				"%d: prs CLOSE@%pK (%d->%d)",
1699 				state->id, header, remoteport, localport);
1700 
1701 			mark_service_closing_internal(service, 1);
1702 
1703 			if (vchiq_close_service_internal(service,
1704 				1/*close_recvd*/) == VCHIQ_RETRY)
1705 				goto bail_not_ready;
1706 
1707 			vchiq_log_info(vchiq_core_log_level,
1708 				"Close Service %c%c%c%c s:%u d:%d",
1709 				VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1710 				service->localport,
1711 				service->remoteport);
1712 			break;
1713 		case VCHIQ_MSG_DATA:
1714 			vchiq_log_info(vchiq_core_log_level,
1715 				"%d: prs DATA@%pK,%x (%d->%d)",
1716 				state->id, header, size, remoteport, localport);
1717 
1718 			if ((service->remoteport == remoteport) &&
1719 			    (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1720 				header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1721 				claim_slot(state->rx_info);
1722 				DEBUG_TRACE(PARSE_LINE);
1723 				if (make_service_callback(service,
1724 					VCHIQ_MESSAGE_AVAILABLE, header,
1725 					NULL) == VCHIQ_RETRY) {
1726 					DEBUG_TRACE(PARSE_LINE);
1727 					goto bail_not_ready;
1728 				}
1729 				VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1730 				VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1731 					size);
1732 			} else {
1733 				VCHIQ_STATS_INC(state, error_count);
1734 			}
1735 			break;
1736 		case VCHIQ_MSG_CONNECT:
1737 			vchiq_log_info(vchiq_core_log_level,
1738 				"%d: prs CONNECT@%pK", state->id, header);
1739 			state->version_common =	((struct vchiq_slot_zero *)
1740 						 state->slot_data)->version;
1741 			complete(&state->connect);
1742 			break;
1743 		case VCHIQ_MSG_BULK_RX:
1744 		case VCHIQ_MSG_BULK_TX:
1745 			/*
1746 			 * We should never receive a bulk request from the
1747 			 * other side since we're not setup to perform as the
1748 			 * master.
1749 			 */
1750 			WARN_ON(1);
1751 			break;
1752 		case VCHIQ_MSG_BULK_RX_DONE:
1753 		case VCHIQ_MSG_BULK_TX_DONE:
1754 			if ((service->remoteport == remoteport) &&
1755 			    (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1756 				struct vchiq_bulk_queue *queue;
1757 				struct vchiq_bulk *bulk;
1758 
1759 				queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1760 					&service->bulk_rx : &service->bulk_tx;
1761 
1762 				DEBUG_TRACE(PARSE_LINE);
1763 				if (mutex_lock_killable(&service->bulk_mutex)) {
1764 					DEBUG_TRACE(PARSE_LINE);
1765 					goto bail_not_ready;
1766 				}
1767 				if ((int)(queue->remote_insert -
1768 					queue->local_insert) >= 0) {
1769 					vchiq_log_error(vchiq_core_log_level,
1770 						"%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1771 						state->id, msg_type_str(type),
1772 						header, remoteport, localport,
1773 						queue->remote_insert,
1774 						queue->local_insert);
1775 					mutex_unlock(&service->bulk_mutex);
1776 					break;
1777 				}
1778 				if (queue->process != queue->remote_insert) {
1779 					pr_err("%s: p %x != ri %x\n",
1780 					       __func__,
1781 					       queue->process,
1782 					       queue->remote_insert);
1783 					mutex_unlock(&service->bulk_mutex);
1784 					goto bail_not_ready;
1785 				}
1786 
1787 				bulk = &queue->bulks[
1788 					BULK_INDEX(queue->remote_insert)];
1789 				bulk->actual = *(int *)header->data;
1790 				queue->remote_insert++;
1791 
1792 				vchiq_log_info(vchiq_core_log_level,
1793 					"%d: prs %s@%pK (%d->%d) %x@%pad",
1794 					state->id, msg_type_str(type),
1795 					header, remoteport, localport,
1796 					bulk->actual, &bulk->data);
1797 
1798 				vchiq_log_trace(vchiq_core_log_level,
1799 					"%d: prs:%d %cx li=%x ri=%x p=%x",
1800 					state->id, localport,
1801 					(type == VCHIQ_MSG_BULK_RX_DONE) ?
1802 						'r' : 't',
1803 					queue->local_insert,
1804 					queue->remote_insert, queue->process);
1805 
1806 				DEBUG_TRACE(PARSE_LINE);
1807 				WARN_ON(queue->process == queue->local_insert);
1808 				vchiq_complete_bulk(bulk);
1809 				queue->process++;
1810 				mutex_unlock(&service->bulk_mutex);
1811 				DEBUG_TRACE(PARSE_LINE);
1812 				notify_bulks(service, queue, 1/*retry_poll*/);
1813 				DEBUG_TRACE(PARSE_LINE);
1814 			}
1815 			break;
1816 		case VCHIQ_MSG_PADDING:
1817 			vchiq_log_trace(vchiq_core_log_level,
1818 				"%d: prs PADDING@%pK,%x",
1819 				state->id, header, size);
1820 			break;
1821 		case VCHIQ_MSG_PAUSE:
1822 			/* If initiated, signal the application thread */
1823 			vchiq_log_trace(vchiq_core_log_level,
1824 				"%d: prs PAUSE@%pK,%x",
1825 				state->id, header, size);
1826 			if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1827 				vchiq_log_error(vchiq_core_log_level,
1828 					"%d: PAUSE received in state PAUSED",
1829 					state->id);
1830 				break;
1831 			}
1832 			if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1833 				/* Send a PAUSE in response */
1834 				if (queue_message(state, NULL,
1835 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1836 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1837 				    == VCHIQ_RETRY)
1838 					goto bail_not_ready;
1839 			}
1840 			/* At this point slot_mutex is held */
1841 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1842 			break;
1843 		case VCHIQ_MSG_RESUME:
1844 			vchiq_log_trace(vchiq_core_log_level,
1845 				"%d: prs RESUME@%pK,%x",
1846 				state->id, header, size);
1847 			/* Release the slot mutex */
1848 			mutex_unlock(&state->slot_mutex);
1849 			vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1850 			break;
1851 
1852 		case VCHIQ_MSG_REMOTE_USE:
1853 			vchiq_on_remote_use(state);
1854 			break;
1855 		case VCHIQ_MSG_REMOTE_RELEASE:
1856 			vchiq_on_remote_release(state);
1857 			break;
1858 		case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1859 			break;
1860 
1861 		default:
1862 			vchiq_log_error(vchiq_core_log_level,
1863 				"%d: prs invalid msgid %x@%pK,%x",
1864 				state->id, msgid, header, size);
1865 			WARN(1, "invalid message\n");
1866 			break;
1867 		}
1868 
1869 skip_message:
1870 		if (service) {
1871 			unlock_service(service);
1872 			service = NULL;
1873 		}
1874 
1875 		state->rx_pos += calc_stride(size);
1876 
1877 		DEBUG_TRACE(PARSE_LINE);
1878 		/*
1879 		 * Perform some housekeeping when the end of the slot is
1880 		 * reached.
1881 		 */
1882 		if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1883 			/* Remove the extra reference count. */
1884 			release_slot(state, state->rx_info, NULL, NULL);
1885 			state->rx_data = NULL;
1886 		}
1887 	}
1888 
1889 bail_not_ready:
1890 	if (service)
1891 		unlock_service(service);
1892 }
1893 
1894 /* Called by the slot handler thread */
1895 static int
slot_handler_func(void * v)1896 slot_handler_func(void *v)
1897 {
1898 	struct vchiq_state *state = v;
1899 	struct vchiq_shared_state *local = state->local;
1900 
1901 	DEBUG_INITIALISE(local)
1902 
1903 	while (1) {
1904 		DEBUG_COUNT(SLOT_HANDLER_COUNT);
1905 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1906 		remote_event_wait(&state->trigger_event, &local->trigger);
1907 
1908 		rmb();
1909 
1910 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1911 		if (state->poll_needed) {
1912 
1913 			state->poll_needed = 0;
1914 
1915 			/*
1916 			 * Handle service polling and other rare conditions here
1917 			 * out of the mainline code
1918 			 */
1919 			switch (state->conn_state) {
1920 			case VCHIQ_CONNSTATE_CONNECTED:
1921 				/* Poll the services as requested */
1922 				poll_services(state);
1923 				break;
1924 
1925 			case VCHIQ_CONNSTATE_PAUSING:
1926 				if (queue_message(state, NULL,
1927 					VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1928 					NULL, NULL, 0,
1929 					QMFLAGS_NO_MUTEX_UNLOCK)
1930 				    != VCHIQ_RETRY) {
1931 					vchiq_set_conn_state(state,
1932 						VCHIQ_CONNSTATE_PAUSE_SENT);
1933 				} else {
1934 					/* Retry later */
1935 					state->poll_needed = 1;
1936 				}
1937 				break;
1938 
1939 			case VCHIQ_CONNSTATE_RESUMING:
1940 				if (queue_message(state, NULL,
1941 					VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1942 					NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1943 					!= VCHIQ_RETRY) {
1944 					vchiq_set_conn_state(state,
1945 						VCHIQ_CONNSTATE_CONNECTED);
1946 				} else {
1947 					/*
1948 					 * This should really be impossible,
1949 					 * since the PAUSE should have flushed
1950 					 * through outstanding messages.
1951 					 */
1952 					vchiq_log_error(vchiq_core_log_level,
1953 						"Failed to send RESUME message");
1954 				}
1955 				break;
1956 			default:
1957 				break;
1958 			}
1959 
1960 		}
1961 
1962 		DEBUG_TRACE(SLOT_HANDLER_LINE);
1963 		parse_rx_slots(state);
1964 	}
1965 	return 0;
1966 }
1967 
1968 /* Called by the recycle thread */
1969 static int
recycle_func(void * v)1970 recycle_func(void *v)
1971 {
1972 	struct vchiq_state *state = v;
1973 	struct vchiq_shared_state *local = state->local;
1974 	BITSET_T *found;
1975 	size_t length;
1976 
1977 	length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1978 
1979 	found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1980 			      GFP_KERNEL);
1981 	if (!found)
1982 		return -ENOMEM;
1983 
1984 	while (1) {
1985 		remote_event_wait(&state->recycle_event, &local->recycle);
1986 
1987 		process_free_queue(state, found, length);
1988 	}
1989 	return 0;
1990 }
1991 
1992 /* Called by the sync thread */
1993 static int
sync_func(void * v)1994 sync_func(void *v)
1995 {
1996 	struct vchiq_state *state = v;
1997 	struct vchiq_shared_state *local = state->local;
1998 	struct vchiq_header *header =
1999 		(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2000 			state->remote->slot_sync);
2001 
2002 	while (1) {
2003 		struct vchiq_service *service;
2004 		int msgid, size;
2005 		int type;
2006 		unsigned int localport, remoteport;
2007 
2008 		remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2009 
2010 		rmb();
2011 
2012 		msgid = header->msgid;
2013 		size = header->size;
2014 		type = VCHIQ_MSG_TYPE(msgid);
2015 		localport = VCHIQ_MSG_DSTPORT(msgid);
2016 		remoteport = VCHIQ_MSG_SRCPORT(msgid);
2017 
2018 		service = find_service_by_port(state, localport);
2019 
2020 		if (!service) {
2021 			vchiq_log_error(vchiq_sync_log_level,
2022 				"%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2023 				state->id, msg_type_str(type),
2024 				header, remoteport, localport, localport);
2025 			release_message_sync(state, header);
2026 			continue;
2027 		}
2028 
2029 		if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2030 			int svc_fourcc;
2031 
2032 			svc_fourcc = service
2033 				? service->base.fourcc
2034 				: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2035 			vchiq_log_trace(vchiq_sync_log_level,
2036 				"Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2037 				msg_type_str(type),
2038 				VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2039 				remoteport, localport, size);
2040 			if (size > 0)
2041 				vchiq_log_dump_mem("Rcvd", 0, header->data,
2042 					min(16, size));
2043 		}
2044 
2045 		switch (type) {
2046 		case VCHIQ_MSG_OPENACK:
2047 			if (size >= sizeof(struct vchiq_openack_payload)) {
2048 				const struct vchiq_openack_payload *payload =
2049 					(struct vchiq_openack_payload *)
2050 					header->data;
2051 				service->peer_version = payload->version;
2052 			}
2053 			vchiq_log_info(vchiq_sync_log_level,
2054 				"%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2055 				state->id, header, size, remoteport, localport,
2056 				service->peer_version);
2057 			if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2058 				service->remoteport = remoteport;
2059 				vchiq_set_service_state(service,
2060 					VCHIQ_SRVSTATE_OPENSYNC);
2061 				service->sync = 1;
2062 				complete(&service->remove_event);
2063 			}
2064 			release_message_sync(state, header);
2065 			break;
2066 
2067 		case VCHIQ_MSG_DATA:
2068 			vchiq_log_trace(vchiq_sync_log_level,
2069 				"%d: sf DATA@%pK,%x (%d->%d)",
2070 				state->id, header, size, remoteport, localport);
2071 
2072 			if ((service->remoteport == remoteport) &&
2073 				(service->srvstate ==
2074 				VCHIQ_SRVSTATE_OPENSYNC)) {
2075 				if (make_service_callback(service,
2076 					VCHIQ_MESSAGE_AVAILABLE, header,
2077 					NULL) == VCHIQ_RETRY)
2078 					vchiq_log_error(vchiq_sync_log_level,
2079 						"synchronous callback to service %d returns VCHIQ_RETRY",
2080 						localport);
2081 			}
2082 			break;
2083 
2084 		default:
2085 			vchiq_log_error(vchiq_sync_log_level,
2086 				"%d: sf unexpected msgid %x@%pK,%x",
2087 				state->id, msgid, header, size);
2088 			release_message_sync(state, header);
2089 			break;
2090 		}
2091 
2092 		unlock_service(service);
2093 	}
2094 
2095 	return 0;
2096 }
2097 
2098 static void
init_bulk_queue(struct vchiq_bulk_queue * queue)2099 init_bulk_queue(struct vchiq_bulk_queue *queue)
2100 {
2101 	queue->local_insert = 0;
2102 	queue->remote_insert = 0;
2103 	queue->process = 0;
2104 	queue->remote_notify = 0;
2105 	queue->remove = 0;
2106 }
2107 
2108 inline const char *
get_conn_state_name(enum vchiq_connstate conn_state)2109 get_conn_state_name(enum vchiq_connstate conn_state)
2110 {
2111 	return conn_state_names[conn_state];
2112 }
2113 
2114 struct vchiq_slot_zero *
vchiq_init_slots(void * mem_base,int mem_size)2115 vchiq_init_slots(void *mem_base, int mem_size)
2116 {
2117 	int mem_align =
2118 		(int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2119 	struct vchiq_slot_zero *slot_zero =
2120 		(struct vchiq_slot_zero *)(mem_base + mem_align);
2121 	int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2122 	int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2123 
2124 	/* Ensure there is enough memory to run an absolutely minimum system */
2125 	num_slots -= first_data_slot;
2126 
2127 	if (num_slots < 4) {
2128 		vchiq_log_error(vchiq_core_log_level,
2129 			"%s - insufficient memory %x bytes",
2130 			__func__, mem_size);
2131 		return NULL;
2132 	}
2133 
2134 	memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2135 
2136 	slot_zero->magic = VCHIQ_MAGIC;
2137 	slot_zero->version = VCHIQ_VERSION;
2138 	slot_zero->version_min = VCHIQ_VERSION_MIN;
2139 	slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2140 	slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2141 	slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2142 	slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2143 
2144 	slot_zero->master.slot_sync = first_data_slot;
2145 	slot_zero->master.slot_first = first_data_slot + 1;
2146 	slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2147 	slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2148 	slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2149 	slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2150 
2151 	return slot_zero;
2152 }
2153 
2154 enum vchiq_status
vchiq_init_state(struct vchiq_state * state,struct vchiq_slot_zero * slot_zero)2155 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2156 {
2157 	struct vchiq_shared_state *local;
2158 	struct vchiq_shared_state *remote;
2159 	enum vchiq_status status;
2160 	char threadname[16];
2161 	int i;
2162 
2163 	if (vchiq_states[0]) {
2164 		pr_err("%s: VCHIQ state already initialized\n", __func__);
2165 		return VCHIQ_ERROR;
2166 	}
2167 
2168 	local = &slot_zero->slave;
2169 	remote = &slot_zero->master;
2170 
2171 	if (local->initialised) {
2172 		vchiq_loud_error_header();
2173 		if (remote->initialised)
2174 			vchiq_loud_error("local state has already been initialised");
2175 		else
2176 			vchiq_loud_error("master/slave mismatch two slaves");
2177 		vchiq_loud_error_footer();
2178 		return VCHIQ_ERROR;
2179 	}
2180 
2181 	memset(state, 0, sizeof(struct vchiq_state));
2182 
2183 	/*
2184 	 * initialize shared state pointers
2185 	 */
2186 
2187 	state->local = local;
2188 	state->remote = remote;
2189 	state->slot_data = (struct vchiq_slot *)slot_zero;
2190 
2191 	/*
2192 	 * initialize events and mutexes
2193 	 */
2194 
2195 	init_completion(&state->connect);
2196 	mutex_init(&state->mutex);
2197 	mutex_init(&state->slot_mutex);
2198 	mutex_init(&state->recycle_mutex);
2199 	mutex_init(&state->sync_mutex);
2200 	mutex_init(&state->bulk_transfer_mutex);
2201 
2202 	init_completion(&state->slot_available_event);
2203 	init_completion(&state->slot_remove_event);
2204 	init_completion(&state->data_quota_event);
2205 
2206 	state->slot_queue_available = 0;
2207 
2208 	for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2209 		struct vchiq_service_quota *quota =
2210 			&state->service_quotas[i];
2211 		init_completion(&quota->quota_event);
2212 	}
2213 
2214 	for (i = local->slot_first; i <= local->slot_last; i++) {
2215 		local->slot_queue[state->slot_queue_available++] = i;
2216 		complete(&state->slot_available_event);
2217 	}
2218 
2219 	state->default_slot_quota = state->slot_queue_available/2;
2220 	state->default_message_quota =
2221 		min((unsigned short)(state->default_slot_quota * 256),
2222 		(unsigned short)~0);
2223 
2224 	state->previous_data_index = -1;
2225 	state->data_use_count = 0;
2226 	state->data_quota = state->slot_queue_available - 1;
2227 
2228 	remote_event_create(&state->trigger_event, &local->trigger);
2229 	local->tx_pos = 0;
2230 	remote_event_create(&state->recycle_event, &local->recycle);
2231 	local->slot_queue_recycle = state->slot_queue_available;
2232 	remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2233 	remote_event_create(&state->sync_release_event, &local->sync_release);
2234 
2235 	/* At start-of-day, the slot is empty and available */
2236 	((struct vchiq_header *)
2237 		SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2238 							VCHIQ_MSGID_PADDING;
2239 	remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2240 
2241 	local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2242 
2243 	status = vchiq_platform_init_state(state);
2244 	if (status != VCHIQ_SUCCESS)
2245 		return VCHIQ_ERROR;
2246 
2247 	/*
2248 	 * bring up slot handler thread
2249 	 */
2250 	snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2251 	state->slot_handler_thread = kthread_create(&slot_handler_func,
2252 		(void *)state,
2253 		threadname);
2254 
2255 	if (IS_ERR(state->slot_handler_thread)) {
2256 		vchiq_loud_error_header();
2257 		vchiq_loud_error("couldn't create thread %s", threadname);
2258 		vchiq_loud_error_footer();
2259 		return VCHIQ_ERROR;
2260 	}
2261 	set_user_nice(state->slot_handler_thread, -19);
2262 
2263 	snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2264 	state->recycle_thread = kthread_create(&recycle_func,
2265 		(void *)state,
2266 		threadname);
2267 	if (IS_ERR(state->recycle_thread)) {
2268 		vchiq_loud_error_header();
2269 		vchiq_loud_error("couldn't create thread %s", threadname);
2270 		vchiq_loud_error_footer();
2271 		goto fail_free_handler_thread;
2272 	}
2273 	set_user_nice(state->recycle_thread, -19);
2274 
2275 	snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2276 	state->sync_thread = kthread_create(&sync_func,
2277 		(void *)state,
2278 		threadname);
2279 	if (IS_ERR(state->sync_thread)) {
2280 		vchiq_loud_error_header();
2281 		vchiq_loud_error("couldn't create thread %s", threadname);
2282 		vchiq_loud_error_footer();
2283 		goto fail_free_recycle_thread;
2284 	}
2285 	set_user_nice(state->sync_thread, -20);
2286 
2287 	wake_up_process(state->slot_handler_thread);
2288 	wake_up_process(state->recycle_thread);
2289 	wake_up_process(state->sync_thread);
2290 
2291 	vchiq_states[0] = state;
2292 
2293 	/* Indicate readiness to the other side */
2294 	local->initialised = 1;
2295 
2296 	return status;
2297 
2298 fail_free_recycle_thread:
2299 	kthread_stop(state->recycle_thread);
2300 fail_free_handler_thread:
2301 	kthread_stop(state->slot_handler_thread);
2302 
2303 	return VCHIQ_ERROR;
2304 }
2305 
vchiq_msg_queue_push(unsigned int handle,struct vchiq_header * header)2306 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2307 {
2308 	struct vchiq_service *service = find_service_by_handle(handle);
2309 	int pos;
2310 
2311 	while (service->msg_queue_write == service->msg_queue_read +
2312 		VCHIQ_MAX_SLOTS) {
2313 		if (wait_for_completion_interruptible(&service->msg_queue_pop))
2314 			flush_signals(current);
2315 	}
2316 
2317 	pos = service->msg_queue_write++ & (VCHIQ_MAX_SLOTS - 1);
2318 	service->msg_queue[pos] = header;
2319 
2320 	complete(&service->msg_queue_push);
2321 }
2322 EXPORT_SYMBOL(vchiq_msg_queue_push);
2323 
vchiq_msg_hold(unsigned int handle)2324 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2325 {
2326 	struct vchiq_service *service = find_service_by_handle(handle);
2327 	struct vchiq_header *header;
2328 	int pos;
2329 
2330 	if (service->msg_queue_write == service->msg_queue_read)
2331 		return NULL;
2332 
2333 	while (service->msg_queue_write == service->msg_queue_read) {
2334 		if (wait_for_completion_interruptible(&service->msg_queue_push))
2335 			flush_signals(current);
2336 	}
2337 
2338 	pos = service->msg_queue_read++ & (VCHIQ_MAX_SLOTS - 1);
2339 	header = service->msg_queue[pos];
2340 
2341 	complete(&service->msg_queue_pop);
2342 
2343 	return header;
2344 }
2345 EXPORT_SYMBOL(vchiq_msg_hold);
2346 
vchiq_validate_params(const struct vchiq_service_params_kernel * params)2347 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2348 {
2349 	if (!params->callback || !params->fourcc) {
2350 		vchiq_loud_error("Can't add service, invalid params\n");
2351 		return -EINVAL;
2352 	}
2353 
2354 	return 0;
2355 }
2356 
2357 /* Called from application thread when a client or server service is created. */
2358 struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state * state,const struct vchiq_service_params_kernel * params,int srvstate,struct vchiq_instance * instance,vchiq_userdata_term userdata_term)2359 vchiq_add_service_internal(struct vchiq_state *state,
2360 			   const struct vchiq_service_params_kernel *params,
2361 			   int srvstate, struct vchiq_instance *instance,
2362 			   vchiq_userdata_term userdata_term)
2363 {
2364 	struct vchiq_service *service;
2365 	struct vchiq_service __rcu **pservice = NULL;
2366 	struct vchiq_service_quota *quota;
2367 	int ret;
2368 	int i;
2369 
2370 	ret = vchiq_validate_params(params);
2371 	if (ret)
2372 		return NULL;
2373 
2374 	service = kmalloc(sizeof(*service), GFP_KERNEL);
2375 	if (!service)
2376 		return service;
2377 
2378 	service->base.fourcc   = params->fourcc;
2379 	service->base.callback = params->callback;
2380 	service->base.userdata = params->userdata;
2381 	service->handle        = VCHIQ_SERVICE_HANDLE_INVALID;
2382 	kref_init(&service->ref_count);
2383 	service->srvstate      = VCHIQ_SRVSTATE_FREE;
2384 	service->userdata_term = userdata_term;
2385 	service->localport     = VCHIQ_PORT_FREE;
2386 	service->remoteport    = VCHIQ_PORT_FREE;
2387 
2388 	service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2389 		VCHIQ_FOURCC_INVALID : params->fourcc;
2390 	service->client_id     = 0;
2391 	service->auto_close    = 1;
2392 	service->sync          = 0;
2393 	service->closing       = 0;
2394 	service->trace         = 0;
2395 	atomic_set(&service->poll_flags, 0);
2396 	service->version       = params->version;
2397 	service->version_min   = params->version_min;
2398 	service->state         = state;
2399 	service->instance      = instance;
2400 	service->service_use_count = 0;
2401 	service->msg_queue_read = 0;
2402 	service->msg_queue_write = 0;
2403 	init_bulk_queue(&service->bulk_tx);
2404 	init_bulk_queue(&service->bulk_rx);
2405 	init_completion(&service->remove_event);
2406 	init_completion(&service->bulk_remove_event);
2407 	init_completion(&service->msg_queue_pop);
2408 	init_completion(&service->msg_queue_push);
2409 	mutex_init(&service->bulk_mutex);
2410 	memset(&service->stats, 0, sizeof(service->stats));
2411 	memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2412 
2413 	/*
2414 	 * Although it is perfectly possible to use a spinlock
2415 	 * to protect the creation of services, it is overkill as it
2416 	 * disables interrupts while the array is searched.
2417 	 * The only danger is of another thread trying to create a
2418 	 * service - service deletion is safe.
2419 	 * Therefore it is preferable to use state->mutex which,
2420 	 * although slower to claim, doesn't block interrupts while
2421 	 * it is held.
2422 	 */
2423 
2424 	mutex_lock(&state->mutex);
2425 
2426 	/* Prepare to use a previously unused service */
2427 	if (state->unused_service < VCHIQ_MAX_SERVICES)
2428 		pservice = &state->services[state->unused_service];
2429 
2430 	if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2431 		for (i = 0; i < state->unused_service; i++) {
2432 			if (!rcu_access_pointer(state->services[i])) {
2433 				pservice = &state->services[i];
2434 				break;
2435 			}
2436 		}
2437 	} else {
2438 		rcu_read_lock();
2439 		for (i = (state->unused_service - 1); i >= 0; i--) {
2440 			struct vchiq_service *srv;
2441 
2442 			srv = rcu_dereference(state->services[i]);
2443 			if (!srv)
2444 				pservice = &state->services[i];
2445 			else if ((srv->public_fourcc == params->fourcc) &&
2446 				 ((srv->instance != instance) ||
2447 				  (srv->base.callback != params->callback))) {
2448 				/*
2449 				 * There is another server using this
2450 				 * fourcc which doesn't match.
2451 				 */
2452 				pservice = NULL;
2453 				break;
2454 			}
2455 		}
2456 		rcu_read_unlock();
2457 	}
2458 
2459 	if (pservice) {
2460 		service->localport = (pservice - state->services);
2461 		if (!handle_seq)
2462 			handle_seq = VCHIQ_MAX_STATES *
2463 				 VCHIQ_MAX_SERVICES;
2464 		service->handle = handle_seq |
2465 			(state->id * VCHIQ_MAX_SERVICES) |
2466 			service->localport;
2467 		handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2468 		rcu_assign_pointer(*pservice, service);
2469 		if (pservice == &state->services[state->unused_service])
2470 			state->unused_service++;
2471 	}
2472 
2473 	mutex_unlock(&state->mutex);
2474 
2475 	if (!pservice) {
2476 		kfree(service);
2477 		return NULL;
2478 	}
2479 
2480 	quota = &state->service_quotas[service->localport];
2481 	quota->slot_quota = state->default_slot_quota;
2482 	quota->message_quota = state->default_message_quota;
2483 	if (quota->slot_use_count == 0)
2484 		quota->previous_tx_index =
2485 			SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2486 			- 1;
2487 
2488 	/* Bring this service online */
2489 	vchiq_set_service_state(service, srvstate);
2490 
2491 	vchiq_log_info(vchiq_core_msg_log_level,
2492 		"%s Service %c%c%c%c SrcPort:%d",
2493 		(srvstate == VCHIQ_SRVSTATE_OPENING)
2494 		? "Open" : "Add",
2495 		VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2496 		service->localport);
2497 
2498 	/* Don't unlock the service - leave it with a ref_count of 1. */
2499 
2500 	return service;
2501 }
2502 
2503 enum vchiq_status
vchiq_open_service_internal(struct vchiq_service * service,int client_id)2504 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2505 {
2506 	struct vchiq_open_payload payload = {
2507 		service->base.fourcc,
2508 		client_id,
2509 		service->version,
2510 		service->version_min
2511 	};
2512 	enum vchiq_status status = VCHIQ_SUCCESS;
2513 
2514 	service->client_id = client_id;
2515 	vchiq_use_service_internal(service);
2516 	status = queue_message(service->state,
2517 			       NULL,
2518 			       VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2519 					      service->localport,
2520 					      0),
2521 			       memcpy_copy_callback,
2522 			       &payload,
2523 			       sizeof(payload),
2524 			       QMFLAGS_IS_BLOCKING);
2525 
2526 	if (status != VCHIQ_SUCCESS)
2527 		return status;
2528 
2529 	/* Wait for the ACK/NAK */
2530 	if (wait_for_completion_interruptible(&service->remove_event)) {
2531 		status = VCHIQ_RETRY;
2532 		vchiq_release_service_internal(service);
2533 	} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2534 		   (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2535 		if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2536 			vchiq_log_error(vchiq_core_log_level,
2537 					"%d: osi - srvstate = %s (ref %u)",
2538 					service->state->id,
2539 					srvstate_names[service->srvstate],
2540 					kref_read(&service->ref_count));
2541 		status = VCHIQ_ERROR;
2542 		VCHIQ_SERVICE_STATS_INC(service, error_count);
2543 		vchiq_release_service_internal(service);
2544 	}
2545 
2546 	return status;
2547 }
2548 
2549 static void
release_service_messages(struct vchiq_service * service)2550 release_service_messages(struct vchiq_service *service)
2551 {
2552 	struct vchiq_state *state = service->state;
2553 	int slot_last = state->remote->slot_last;
2554 	int i;
2555 
2556 	/* Release any claimed messages aimed at this service */
2557 
2558 	if (service->sync) {
2559 		struct vchiq_header *header =
2560 			(struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2561 						state->remote->slot_sync);
2562 		if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2563 			release_message_sync(state, header);
2564 
2565 		return;
2566 	}
2567 
2568 	for (i = state->remote->slot_first; i <= slot_last; i++) {
2569 		struct vchiq_slot_info *slot_info =
2570 			SLOT_INFO_FROM_INDEX(state, i);
2571 		if (slot_info->release_count != slot_info->use_count) {
2572 			char *data =
2573 				(char *)SLOT_DATA_FROM_INDEX(state, i);
2574 			unsigned int pos, end;
2575 
2576 			end = VCHIQ_SLOT_SIZE;
2577 			if (data == state->rx_data)
2578 				/*
2579 				 * This buffer is still being read from - stop
2580 				 * at the current read position
2581 				 */
2582 				end = state->rx_pos & VCHIQ_SLOT_MASK;
2583 
2584 			pos = 0;
2585 
2586 			while (pos < end) {
2587 				struct vchiq_header *header =
2588 					(struct vchiq_header *)(data + pos);
2589 				int msgid = header->msgid;
2590 				int port = VCHIQ_MSG_DSTPORT(msgid);
2591 
2592 				if ((port == service->localport) &&
2593 					(msgid & VCHIQ_MSGID_CLAIMED)) {
2594 					vchiq_log_info(vchiq_core_log_level,
2595 						"  fsi - hdr %pK", header);
2596 					release_slot(state, slot_info, header,
2597 						NULL);
2598 				}
2599 				pos += calc_stride(header->size);
2600 				if (pos > VCHIQ_SLOT_SIZE) {
2601 					vchiq_log_error(vchiq_core_log_level,
2602 						"fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2603 						pos, header, msgid,
2604 						header->msgid, header->size);
2605 					WARN(1, "invalid slot position\n");
2606 				}
2607 			}
2608 		}
2609 	}
2610 }
2611 
2612 static int
do_abort_bulks(struct vchiq_service * service)2613 do_abort_bulks(struct vchiq_service *service)
2614 {
2615 	enum vchiq_status status;
2616 
2617 	/* Abort any outstanding bulk transfers */
2618 	if (mutex_lock_killable(&service->bulk_mutex))
2619 		return 0;
2620 	abort_outstanding_bulks(service, &service->bulk_tx);
2621 	abort_outstanding_bulks(service, &service->bulk_rx);
2622 	mutex_unlock(&service->bulk_mutex);
2623 
2624 	status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2625 	if (status == VCHIQ_SUCCESS)
2626 		status = notify_bulks(service, &service->bulk_rx,
2627 			0/*!retry_poll*/);
2628 	return (status == VCHIQ_SUCCESS);
2629 }
2630 
2631 static enum vchiq_status
close_service_complete(struct vchiq_service * service,int failstate)2632 close_service_complete(struct vchiq_service *service, int failstate)
2633 {
2634 	enum vchiq_status status;
2635 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2636 	int newstate;
2637 
2638 	switch (service->srvstate) {
2639 	case VCHIQ_SRVSTATE_OPEN:
2640 	case VCHIQ_SRVSTATE_CLOSESENT:
2641 	case VCHIQ_SRVSTATE_CLOSERECVD:
2642 		if (is_server) {
2643 			if (service->auto_close) {
2644 				service->client_id = 0;
2645 				service->remoteport = VCHIQ_PORT_FREE;
2646 				newstate = VCHIQ_SRVSTATE_LISTENING;
2647 			} else
2648 				newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2649 		} else
2650 			newstate = VCHIQ_SRVSTATE_CLOSED;
2651 		vchiq_set_service_state(service, newstate);
2652 		break;
2653 	case VCHIQ_SRVSTATE_LISTENING:
2654 		break;
2655 	default:
2656 		vchiq_log_error(vchiq_core_log_level,
2657 			"%s(%x) called in state %s", __func__,
2658 			service->handle, srvstate_names[service->srvstate]);
2659 		WARN(1, "%s in unexpected state\n", __func__);
2660 		return VCHIQ_ERROR;
2661 	}
2662 
2663 	status = make_service_callback(service,
2664 		VCHIQ_SERVICE_CLOSED, NULL, NULL);
2665 
2666 	if (status != VCHIQ_RETRY) {
2667 		int uc = service->service_use_count;
2668 		int i;
2669 		/* Complete the close process */
2670 		for (i = 0; i < uc; i++)
2671 			/*
2672 			 * cater for cases where close is forced and the
2673 			 * client may not close all it's handles
2674 			 */
2675 			vchiq_release_service_internal(service);
2676 
2677 		service->client_id = 0;
2678 		service->remoteport = VCHIQ_PORT_FREE;
2679 
2680 		if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2681 			vchiq_free_service_internal(service);
2682 		else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2683 			if (is_server)
2684 				service->closing = 0;
2685 
2686 			complete(&service->remove_event);
2687 		}
2688 	} else
2689 		vchiq_set_service_state(service, failstate);
2690 
2691 	return status;
2692 }
2693 
2694 /* Called by the slot handler */
2695 enum vchiq_status
vchiq_close_service_internal(struct vchiq_service * service,int close_recvd)2696 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2697 {
2698 	struct vchiq_state *state = service->state;
2699 	enum vchiq_status status = VCHIQ_SUCCESS;
2700 	int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2701 
2702 	vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2703 		service->state->id, service->localport, close_recvd,
2704 		srvstate_names[service->srvstate]);
2705 
2706 	switch (service->srvstate) {
2707 	case VCHIQ_SRVSTATE_CLOSED:
2708 	case VCHIQ_SRVSTATE_HIDDEN:
2709 	case VCHIQ_SRVSTATE_LISTENING:
2710 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2711 		if (close_recvd)
2712 			vchiq_log_error(vchiq_core_log_level,
2713 				"%s(1) called in state %s",
2714 				__func__, srvstate_names[service->srvstate]);
2715 		else if (is_server) {
2716 			if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2717 				status = VCHIQ_ERROR;
2718 			} else {
2719 				service->client_id = 0;
2720 				service->remoteport = VCHIQ_PORT_FREE;
2721 				if (service->srvstate ==
2722 					VCHIQ_SRVSTATE_CLOSEWAIT)
2723 					vchiq_set_service_state(service,
2724 						VCHIQ_SRVSTATE_LISTENING);
2725 			}
2726 			complete(&service->remove_event);
2727 		} else
2728 			vchiq_free_service_internal(service);
2729 		break;
2730 	case VCHIQ_SRVSTATE_OPENING:
2731 		if (close_recvd) {
2732 			/* The open was rejected - tell the user */
2733 			vchiq_set_service_state(service,
2734 				VCHIQ_SRVSTATE_CLOSEWAIT);
2735 			complete(&service->remove_event);
2736 		} else {
2737 			/* Shutdown mid-open - let the other side know */
2738 			status = queue_message(state, service,
2739 				VCHIQ_MAKE_MSG
2740 				(VCHIQ_MSG_CLOSE,
2741 				service->localport,
2742 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2743 				NULL, NULL, 0, 0);
2744 		}
2745 		break;
2746 
2747 	case VCHIQ_SRVSTATE_OPENSYNC:
2748 		mutex_lock(&state->sync_mutex);
2749 		fallthrough;
2750 	case VCHIQ_SRVSTATE_OPEN:
2751 		if (close_recvd) {
2752 			if (!do_abort_bulks(service))
2753 				status = VCHIQ_RETRY;
2754 		}
2755 
2756 		release_service_messages(service);
2757 
2758 		if (status == VCHIQ_SUCCESS)
2759 			status = queue_message(state, service,
2760 				VCHIQ_MAKE_MSG
2761 				(VCHIQ_MSG_CLOSE,
2762 				service->localport,
2763 				VCHIQ_MSG_DSTPORT(service->remoteport)),
2764 				NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2765 
2766 		if (status == VCHIQ_SUCCESS) {
2767 			if (!close_recvd) {
2768 				/* Change the state while the mutex is still held */
2769 				vchiq_set_service_state(service,
2770 							VCHIQ_SRVSTATE_CLOSESENT);
2771 				mutex_unlock(&state->slot_mutex);
2772 				if (service->sync)
2773 					mutex_unlock(&state->sync_mutex);
2774 				break;
2775 			}
2776 		} else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2777 			mutex_unlock(&state->sync_mutex);
2778 			break;
2779 		} else
2780 			break;
2781 
2782 		/* Change the state while the mutex is still held */
2783 		vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2784 		mutex_unlock(&state->slot_mutex);
2785 		if (service->sync)
2786 			mutex_unlock(&state->sync_mutex);
2787 
2788 		status = close_service_complete(service,
2789 				VCHIQ_SRVSTATE_CLOSERECVD);
2790 		break;
2791 
2792 	case VCHIQ_SRVSTATE_CLOSESENT:
2793 		if (!close_recvd)
2794 			/* This happens when a process is killed mid-close */
2795 			break;
2796 
2797 		if (!do_abort_bulks(service)) {
2798 			status = VCHIQ_RETRY;
2799 			break;
2800 		}
2801 
2802 		if (status == VCHIQ_SUCCESS)
2803 			status = close_service_complete(service,
2804 				VCHIQ_SRVSTATE_CLOSERECVD);
2805 		break;
2806 
2807 	case VCHIQ_SRVSTATE_CLOSERECVD:
2808 		if (!close_recvd && is_server)
2809 			/* Force into LISTENING mode */
2810 			vchiq_set_service_state(service,
2811 				VCHIQ_SRVSTATE_LISTENING);
2812 		status = close_service_complete(service,
2813 			VCHIQ_SRVSTATE_CLOSERECVD);
2814 		break;
2815 
2816 	default:
2817 		vchiq_log_error(vchiq_core_log_level,
2818 			"%s(%d) called in state %s", __func__,
2819 			close_recvd, srvstate_names[service->srvstate]);
2820 		break;
2821 	}
2822 
2823 	return status;
2824 }
2825 
2826 /* Called from the application process upon process death */
2827 void
vchiq_terminate_service_internal(struct vchiq_service * service)2828 vchiq_terminate_service_internal(struct vchiq_service *service)
2829 {
2830 	struct vchiq_state *state = service->state;
2831 
2832 	vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2833 		state->id, service->localport, service->remoteport);
2834 
2835 	mark_service_closing(service);
2836 
2837 	/* Mark the service for removal by the slot handler */
2838 	request_poll(state, service, VCHIQ_POLL_REMOVE);
2839 }
2840 
2841 /* Called from the slot handler */
2842 void
vchiq_free_service_internal(struct vchiq_service * service)2843 vchiq_free_service_internal(struct vchiq_service *service)
2844 {
2845 	struct vchiq_state *state = service->state;
2846 
2847 	vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2848 		state->id, service->localport);
2849 
2850 	switch (service->srvstate) {
2851 	case VCHIQ_SRVSTATE_OPENING:
2852 	case VCHIQ_SRVSTATE_CLOSED:
2853 	case VCHIQ_SRVSTATE_HIDDEN:
2854 	case VCHIQ_SRVSTATE_LISTENING:
2855 	case VCHIQ_SRVSTATE_CLOSEWAIT:
2856 		break;
2857 	default:
2858 		vchiq_log_error(vchiq_core_log_level,
2859 			"%d: fsi - (%d) in state %s",
2860 			state->id, service->localport,
2861 			srvstate_names[service->srvstate]);
2862 		return;
2863 	}
2864 
2865 	vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2866 
2867 	complete(&service->remove_event);
2868 
2869 	/* Release the initial lock */
2870 	unlock_service(service);
2871 }
2872 
2873 enum vchiq_status
vchiq_connect_internal(struct vchiq_state * state,struct vchiq_instance * instance)2874 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2875 {
2876 	struct vchiq_service *service;
2877 	int i;
2878 
2879 	/* Find all services registered to this client and enable them. */
2880 	i = 0;
2881 	while ((service = next_service_by_instance(state, instance,
2882 		&i)) !=	NULL) {
2883 		if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2884 			vchiq_set_service_state(service,
2885 				VCHIQ_SRVSTATE_LISTENING);
2886 		unlock_service(service);
2887 	}
2888 
2889 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2890 		if (queue_message(state, NULL,
2891 			VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2892 			0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2893 			return VCHIQ_RETRY;
2894 
2895 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2896 	}
2897 
2898 	if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2899 		if (wait_for_completion_interruptible(&state->connect))
2900 			return VCHIQ_RETRY;
2901 
2902 		vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2903 		complete(&state->connect);
2904 	}
2905 
2906 	return VCHIQ_SUCCESS;
2907 }
2908 
2909 enum vchiq_status
vchiq_shutdown_internal(struct vchiq_state * state,struct vchiq_instance * instance)2910 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2911 {
2912 	struct vchiq_service *service;
2913 	int i;
2914 
2915 	/* Find all services registered to this client and enable them. */
2916 	i = 0;
2917 	while ((service = next_service_by_instance(state, instance,
2918 		&i)) !=	NULL) {
2919 		(void)vchiq_remove_service(service->handle);
2920 		unlock_service(service);
2921 	}
2922 
2923 	return VCHIQ_SUCCESS;
2924 }
2925 
2926 enum vchiq_status
vchiq_close_service(unsigned int handle)2927 vchiq_close_service(unsigned int handle)
2928 {
2929 	/* Unregister the service */
2930 	struct vchiq_service *service = find_service_by_handle(handle);
2931 	enum vchiq_status status = VCHIQ_SUCCESS;
2932 
2933 	if (!service)
2934 		return VCHIQ_ERROR;
2935 
2936 	vchiq_log_info(vchiq_core_log_level,
2937 		"%d: close_service:%d",
2938 		service->state->id, service->localport);
2939 
2940 	if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2941 	    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2942 	    (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2943 		unlock_service(service);
2944 		return VCHIQ_ERROR;
2945 	}
2946 
2947 	mark_service_closing(service);
2948 
2949 	if (current == service->state->slot_handler_thread) {
2950 		status = vchiq_close_service_internal(service,
2951 			0/*!close_recvd*/);
2952 		WARN_ON(status == VCHIQ_RETRY);
2953 	} else {
2954 	/* Mark the service for termination by the slot handler */
2955 		request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2956 	}
2957 
2958 	while (1) {
2959 		if (wait_for_completion_interruptible(&service->remove_event)) {
2960 			status = VCHIQ_RETRY;
2961 			break;
2962 		}
2963 
2964 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2965 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2966 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2967 			break;
2968 
2969 		vchiq_log_warning(vchiq_core_log_level,
2970 			"%d: close_service:%d - waiting in state %s",
2971 			service->state->id, service->localport,
2972 			srvstate_names[service->srvstate]);
2973 	}
2974 
2975 	if ((status == VCHIQ_SUCCESS) &&
2976 	    (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2977 	    (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2978 		status = VCHIQ_ERROR;
2979 
2980 	unlock_service(service);
2981 
2982 	return status;
2983 }
2984 EXPORT_SYMBOL(vchiq_close_service);
2985 
2986 enum vchiq_status
vchiq_remove_service(unsigned int handle)2987 vchiq_remove_service(unsigned int handle)
2988 {
2989 	/* Unregister the service */
2990 	struct vchiq_service *service = find_service_by_handle(handle);
2991 	enum vchiq_status status = VCHIQ_SUCCESS;
2992 
2993 	if (!service)
2994 		return VCHIQ_ERROR;
2995 
2996 	vchiq_log_info(vchiq_core_log_level,
2997 		"%d: remove_service:%d",
2998 		service->state->id, service->localport);
2999 
3000 	if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3001 		unlock_service(service);
3002 		return VCHIQ_ERROR;
3003 	}
3004 
3005 	mark_service_closing(service);
3006 
3007 	if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3008 	    (current == service->state->slot_handler_thread)) {
3009 		/*
3010 		 * Make it look like a client, because it must be removed and
3011 		 * not left in the LISTENING state.
3012 		 */
3013 		service->public_fourcc = VCHIQ_FOURCC_INVALID;
3014 
3015 		status = vchiq_close_service_internal(service,
3016 			0/*!close_recvd*/);
3017 		WARN_ON(status == VCHIQ_RETRY);
3018 	} else {
3019 		/* Mark the service for removal by the slot handler */
3020 		request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3021 	}
3022 	while (1) {
3023 		if (wait_for_completion_interruptible(&service->remove_event)) {
3024 			status = VCHIQ_RETRY;
3025 			break;
3026 		}
3027 
3028 		if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3029 		    (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3030 			break;
3031 
3032 		vchiq_log_warning(vchiq_core_log_level,
3033 			"%d: remove_service:%d - waiting in state %s",
3034 			service->state->id, service->localport,
3035 			srvstate_names[service->srvstate]);
3036 	}
3037 
3038 	if ((status == VCHIQ_SUCCESS) &&
3039 	    (service->srvstate != VCHIQ_SRVSTATE_FREE))
3040 		status = VCHIQ_ERROR;
3041 
3042 	unlock_service(service);
3043 
3044 	return status;
3045 }
3046 
3047 /*
3048  * This function may be called by kernel threads or user threads.
3049  * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3050  * received and the call should be retried after being returned to user
3051  * context.
3052  * When called in blocking mode, the userdata field points to a bulk_waiter
3053  * structure.
3054  */
vchiq_bulk_transfer(unsigned int handle,void * offset,void __user * uoffset,int size,void * userdata,enum vchiq_bulk_mode mode,enum vchiq_bulk_dir dir)3055 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3056 				   void *offset, void __user *uoffset,
3057 				   int size, void *userdata,
3058 				   enum vchiq_bulk_mode mode,
3059 				   enum vchiq_bulk_dir dir)
3060 {
3061 	struct vchiq_service *service = find_service_by_handle(handle);
3062 	struct vchiq_bulk_queue *queue;
3063 	struct vchiq_bulk *bulk;
3064 	struct vchiq_state *state;
3065 	struct bulk_waiter *bulk_waiter = NULL;
3066 	const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3067 	const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3068 		VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3069 	enum vchiq_status status = VCHIQ_ERROR;
3070 	int payload[2];
3071 
3072 	if (!service)
3073 		goto error_exit;
3074 
3075 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3076 		goto error_exit;
3077 
3078 	if (!offset && !uoffset)
3079 		goto error_exit;
3080 
3081 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3082 		goto error_exit;
3083 
3084 	switch (mode) {
3085 	case VCHIQ_BULK_MODE_NOCALLBACK:
3086 	case VCHIQ_BULK_MODE_CALLBACK:
3087 		break;
3088 	case VCHIQ_BULK_MODE_BLOCKING:
3089 		bulk_waiter = userdata;
3090 		init_completion(&bulk_waiter->event);
3091 		bulk_waiter->actual = 0;
3092 		bulk_waiter->bulk = NULL;
3093 		break;
3094 	case VCHIQ_BULK_MODE_WAITING:
3095 		bulk_waiter = userdata;
3096 		bulk = bulk_waiter->bulk;
3097 		goto waiting;
3098 	default:
3099 		goto error_exit;
3100 	}
3101 
3102 	state = service->state;
3103 
3104 	queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3105 		&service->bulk_tx : &service->bulk_rx;
3106 
3107 	if (mutex_lock_killable(&service->bulk_mutex)) {
3108 		status = VCHIQ_RETRY;
3109 		goto error_exit;
3110 	}
3111 
3112 	if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3113 		VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3114 		do {
3115 			mutex_unlock(&service->bulk_mutex);
3116 			if (wait_for_completion_interruptible(
3117 						&service->bulk_remove_event)) {
3118 				status = VCHIQ_RETRY;
3119 				goto error_exit;
3120 			}
3121 			if (mutex_lock_killable(&service->bulk_mutex)) {
3122 				status = VCHIQ_RETRY;
3123 				goto error_exit;
3124 			}
3125 		} while (queue->local_insert == queue->remove +
3126 				VCHIQ_NUM_SERVICE_BULKS);
3127 	}
3128 
3129 	bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3130 
3131 	bulk->mode = mode;
3132 	bulk->dir = dir;
3133 	bulk->userdata = userdata;
3134 	bulk->size = size;
3135 	bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3136 
3137 	if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir)
3138 			!= VCHIQ_SUCCESS)
3139 		goto unlock_error_exit;
3140 
3141 	wmb();
3142 
3143 	vchiq_log_info(vchiq_core_log_level,
3144 		"%d: bt (%d->%d) %cx %x@%pad %pK",
3145 		state->id, service->localport, service->remoteport, dir_char,
3146 		size, &bulk->data, userdata);
3147 
3148 	/*
3149 	 * The slot mutex must be held when the service is being closed, so
3150 	 * claim it here to ensure that isn't happening
3151 	 */
3152 	if (mutex_lock_killable(&state->slot_mutex)) {
3153 		status = VCHIQ_RETRY;
3154 		goto cancel_bulk_error_exit;
3155 	}
3156 
3157 	if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3158 		goto unlock_both_error_exit;
3159 
3160 	payload[0] = lower_32_bits(bulk->data);
3161 	payload[1] = bulk->size;
3162 	status = queue_message(state,
3163 			       NULL,
3164 			       VCHIQ_MAKE_MSG(dir_msgtype,
3165 					      service->localport,
3166 					      service->remoteport),
3167 			       memcpy_copy_callback,
3168 			       &payload,
3169 			       sizeof(payload),
3170 			       QMFLAGS_IS_BLOCKING |
3171 			       QMFLAGS_NO_MUTEX_LOCK |
3172 			       QMFLAGS_NO_MUTEX_UNLOCK);
3173 	if (status != VCHIQ_SUCCESS)
3174 		goto unlock_both_error_exit;
3175 
3176 	queue->local_insert++;
3177 
3178 	mutex_unlock(&state->slot_mutex);
3179 	mutex_unlock(&service->bulk_mutex);
3180 
3181 	vchiq_log_trace(vchiq_core_log_level,
3182 		"%d: bt:%d %cx li=%x ri=%x p=%x",
3183 		state->id,
3184 		service->localport, dir_char,
3185 		queue->local_insert, queue->remote_insert, queue->process);
3186 
3187 waiting:
3188 	unlock_service(service);
3189 
3190 	status = VCHIQ_SUCCESS;
3191 
3192 	if (bulk_waiter) {
3193 		bulk_waiter->bulk = bulk;
3194 		if (wait_for_completion_interruptible(&bulk_waiter->event))
3195 			status = VCHIQ_RETRY;
3196 		else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3197 			status = VCHIQ_ERROR;
3198 	}
3199 
3200 	return status;
3201 
3202 unlock_both_error_exit:
3203 	mutex_unlock(&state->slot_mutex);
3204 cancel_bulk_error_exit:
3205 	vchiq_complete_bulk(bulk);
3206 unlock_error_exit:
3207 	mutex_unlock(&service->bulk_mutex);
3208 
3209 error_exit:
3210 	if (service)
3211 		unlock_service(service);
3212 	return status;
3213 }
3214 
3215 enum vchiq_status
vchiq_queue_message(unsigned int handle,ssize_t (* copy_callback)(void * context,void * dest,size_t offset,size_t maxsize),void * context,size_t size)3216 vchiq_queue_message(unsigned int handle,
3217 		    ssize_t (*copy_callback)(void *context, void *dest,
3218 					     size_t offset, size_t maxsize),
3219 		    void *context,
3220 		    size_t size)
3221 {
3222 	struct vchiq_service *service = find_service_by_handle(handle);
3223 	enum vchiq_status status = VCHIQ_ERROR;
3224 
3225 	if (!service)
3226 		goto error_exit;
3227 
3228 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3229 		goto error_exit;
3230 
3231 	if (!size) {
3232 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3233 		goto error_exit;
3234 
3235 	}
3236 
3237 	if (size > VCHIQ_MAX_MSG_SIZE) {
3238 		VCHIQ_SERVICE_STATS_INC(service, error_count);
3239 		goto error_exit;
3240 	}
3241 
3242 	switch (service->srvstate) {
3243 	case VCHIQ_SRVSTATE_OPEN:
3244 		status = queue_message(service->state, service,
3245 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3246 					service->localport,
3247 					service->remoteport),
3248 				copy_callback, context, size, 1);
3249 		break;
3250 	case VCHIQ_SRVSTATE_OPENSYNC:
3251 		status = queue_message_sync(service->state, service,
3252 				VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3253 					service->localport,
3254 					service->remoteport),
3255 				copy_callback, context, size, 1);
3256 		break;
3257 	default:
3258 		status = VCHIQ_ERROR;
3259 		break;
3260 	}
3261 
3262 error_exit:
3263 	if (service)
3264 		unlock_service(service);
3265 
3266 	return status;
3267 }
3268 
vchiq_queue_kernel_message(unsigned int handle,void * data,unsigned int size)3269 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3270 {
3271 	enum vchiq_status status;
3272 
3273 	while (1) {
3274 		status = vchiq_queue_message(handle, memcpy_copy_callback,
3275 					     data, size);
3276 
3277 		/*
3278 		 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3279 		 * implement a retry mechanism since this function is supposed
3280 		 * to block until queued
3281 		 */
3282 		if (status != VCHIQ_RETRY)
3283 			break;
3284 
3285 		msleep(1);
3286 	}
3287 
3288 	return status;
3289 }
3290 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3291 
3292 void
vchiq_release_message(unsigned int handle,struct vchiq_header * header)3293 vchiq_release_message(unsigned int handle,
3294 		      struct vchiq_header *header)
3295 {
3296 	struct vchiq_service *service = find_service_by_handle(handle);
3297 	struct vchiq_shared_state *remote;
3298 	struct vchiq_state *state;
3299 	int slot_index;
3300 
3301 	if (!service)
3302 		return;
3303 
3304 	state = service->state;
3305 	remote = state->remote;
3306 
3307 	slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3308 
3309 	if ((slot_index >= remote->slot_first) &&
3310 	    (slot_index <= remote->slot_last)) {
3311 		int msgid = header->msgid;
3312 
3313 		if (msgid & VCHIQ_MSGID_CLAIMED) {
3314 			struct vchiq_slot_info *slot_info =
3315 				SLOT_INFO_FROM_INDEX(state, slot_index);
3316 
3317 			release_slot(state, slot_info, header, service);
3318 		}
3319 	} else if (slot_index == remote->slot_sync)
3320 		release_message_sync(state, header);
3321 
3322 	unlock_service(service);
3323 }
3324 EXPORT_SYMBOL(vchiq_release_message);
3325 
3326 static void
release_message_sync(struct vchiq_state * state,struct vchiq_header * header)3327 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3328 {
3329 	header->msgid = VCHIQ_MSGID_PADDING;
3330 	remote_event_signal(&state->remote->sync_release);
3331 }
3332 
3333 enum vchiq_status
vchiq_get_peer_version(unsigned int handle,short * peer_version)3334 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3335 {
3336 	enum vchiq_status status = VCHIQ_ERROR;
3337 	struct vchiq_service *service = find_service_by_handle(handle);
3338 
3339 	if (!service)
3340 		goto exit;
3341 
3342 	if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3343 		goto exit;
3344 
3345 	if (!peer_version)
3346 		goto exit;
3347 
3348 	*peer_version = service->peer_version;
3349 	status = VCHIQ_SUCCESS;
3350 
3351 exit:
3352 	if (service)
3353 		unlock_service(service);
3354 	return status;
3355 }
3356 EXPORT_SYMBOL(vchiq_get_peer_version);
3357 
vchiq_get_config(struct vchiq_config * config)3358 void vchiq_get_config(struct vchiq_config *config)
3359 {
3360 	config->max_msg_size           = VCHIQ_MAX_MSG_SIZE;
3361 	config->bulk_threshold         = VCHIQ_MAX_MSG_SIZE;
3362 	config->max_outstanding_bulks  = VCHIQ_NUM_SERVICE_BULKS;
3363 	config->max_services           = VCHIQ_MAX_SERVICES;
3364 	config->version                = VCHIQ_VERSION;
3365 	config->version_min            = VCHIQ_VERSION_MIN;
3366 }
3367 
3368 enum vchiq_status
vchiq_set_service_option(unsigned int handle,enum vchiq_service_option option,int value)3369 vchiq_set_service_option(unsigned int handle,
3370 	enum vchiq_service_option option, int value)
3371 {
3372 	struct vchiq_service *service = find_service_by_handle(handle);
3373 	enum vchiq_status status = VCHIQ_ERROR;
3374 	struct vchiq_service_quota *quota;
3375 
3376 	if (!service)
3377 		return VCHIQ_ERROR;
3378 
3379 	switch (option) {
3380 	case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3381 		service->auto_close = value;
3382 		status = VCHIQ_SUCCESS;
3383 		break;
3384 
3385 	case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3386 		quota = &service->state->service_quotas[service->localport];
3387 		if (value == 0)
3388 			value = service->state->default_slot_quota;
3389 		if ((value >= quota->slot_use_count) &&
3390 		    (value < (unsigned short)~0)) {
3391 			quota->slot_quota = value;
3392 			if ((value >= quota->slot_use_count) &&
3393 			    (quota->message_quota >= quota->message_use_count))
3394 				/*
3395 				 * Signal the service that it may have
3396 				 * dropped below its quota
3397 				 */
3398 				complete(&quota->quota_event);
3399 			status = VCHIQ_SUCCESS;
3400 		}
3401 		break;
3402 
3403 	case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3404 		quota = &service->state->service_quotas[service->localport];
3405 		if (value == 0)
3406 			value = service->state->default_message_quota;
3407 		if ((value >= quota->message_use_count) &&
3408 		    (value < (unsigned short)~0)) {
3409 			quota->message_quota = value;
3410 			if ((value >= quota->message_use_count) &&
3411 			    (quota->slot_quota >= quota->slot_use_count))
3412 				/*
3413 				 * Signal the service that it may have
3414 				 * dropped below its quota
3415 				 */
3416 				complete(&quota->quota_event);
3417 			status = VCHIQ_SUCCESS;
3418 		}
3419 		break;
3420 
3421 	case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3422 		if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3423 		    (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3424 			service->sync = value;
3425 			status = VCHIQ_SUCCESS;
3426 		}
3427 		break;
3428 
3429 	case VCHIQ_SERVICE_OPTION_TRACE:
3430 		service->trace = value;
3431 		status = VCHIQ_SUCCESS;
3432 		break;
3433 
3434 	default:
3435 		break;
3436 	}
3437 	unlock_service(service);
3438 
3439 	return status;
3440 }
3441 
3442 static int
vchiq_dump_shared_state(void * dump_context,struct vchiq_state * state,struct vchiq_shared_state * shared,const char * label)3443 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3444 			struct vchiq_shared_state *shared, const char *label)
3445 {
3446 	static const char *const debug_names[] = {
3447 		"<entries>",
3448 		"SLOT_HANDLER_COUNT",
3449 		"SLOT_HANDLER_LINE",
3450 		"PARSE_LINE",
3451 		"PARSE_HEADER",
3452 		"PARSE_MSGID",
3453 		"AWAIT_COMPLETION_LINE",
3454 		"DEQUEUE_MESSAGE_LINE",
3455 		"SERVICE_CALLBACK_LINE",
3456 		"MSG_QUEUE_FULL_COUNT",
3457 		"COMPLETION_QUEUE_FULL_COUNT"
3458 	};
3459 	int i;
3460 	char buf[80];
3461 	int len;
3462 	int err;
3463 
3464 	len = scnprintf(buf, sizeof(buf),
3465 		"  %s: slots %d-%d tx_pos=%x recycle=%x",
3466 		label, shared->slot_first, shared->slot_last,
3467 		shared->tx_pos, shared->slot_queue_recycle);
3468 	err = vchiq_dump(dump_context, buf, len + 1);
3469 	if (err)
3470 		return err;
3471 
3472 	len = scnprintf(buf, sizeof(buf),
3473 		"    Slots claimed:");
3474 	err = vchiq_dump(dump_context, buf, len + 1);
3475 	if (err)
3476 		return err;
3477 
3478 	for (i = shared->slot_first; i <= shared->slot_last; i++) {
3479 		struct vchiq_slot_info slot_info =
3480 						*SLOT_INFO_FROM_INDEX(state, i);
3481 		if (slot_info.use_count != slot_info.release_count) {
3482 			len = scnprintf(buf, sizeof(buf),
3483 				"      %d: %d/%d", i, slot_info.use_count,
3484 				slot_info.release_count);
3485 			err = vchiq_dump(dump_context, buf, len + 1);
3486 			if (err)
3487 				return err;
3488 		}
3489 	}
3490 
3491 	for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3492 		len = scnprintf(buf, sizeof(buf), "    DEBUG: %s = %d(%x)",
3493 			debug_names[i], shared->debug[i], shared->debug[i]);
3494 		err = vchiq_dump(dump_context, buf, len + 1);
3495 		if (err)
3496 			return err;
3497 	}
3498 	return 0;
3499 }
3500 
vchiq_dump_state(void * dump_context,struct vchiq_state * state)3501 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3502 {
3503 	char buf[80];
3504 	int len;
3505 	int i;
3506 	int err;
3507 
3508 	len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3509 		conn_state_names[state->conn_state]);
3510 	err = vchiq_dump(dump_context, buf, len + 1);
3511 	if (err)
3512 		return err;
3513 
3514 	len = scnprintf(buf, sizeof(buf),
3515 		"  tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3516 		state->local->tx_pos,
3517 		state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3518 		state->rx_pos,
3519 		state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3520 	err = vchiq_dump(dump_context, buf, len + 1);
3521 	if (err)
3522 		return err;
3523 
3524 	len = scnprintf(buf, sizeof(buf),
3525 		"  Version: %d (min %d)",
3526 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3527 	err = vchiq_dump(dump_context, buf, len + 1);
3528 	if (err)
3529 		return err;
3530 
3531 	if (VCHIQ_ENABLE_STATS) {
3532 		len = scnprintf(buf, sizeof(buf),
3533 			"  Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3534 			state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3535 			state->stats.error_count);
3536 		err = vchiq_dump(dump_context, buf, len + 1);
3537 		if (err)
3538 			return err;
3539 	}
3540 
3541 	len = scnprintf(buf, sizeof(buf),
3542 		"  Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3543 		((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3544 			state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3545 		state->data_quota - state->data_use_count,
3546 		state->local->slot_queue_recycle - state->slot_queue_available,
3547 		state->stats.slot_stalls, state->stats.data_stalls);
3548 	err = vchiq_dump(dump_context, buf, len + 1);
3549 	if (err)
3550 		return err;
3551 
3552 	err = vchiq_dump_platform_state(dump_context);
3553 	if (err)
3554 		return err;
3555 
3556 	err = vchiq_dump_shared_state(dump_context,
3557 				      state,
3558 				      state->local,
3559 				      "Local");
3560 	if (err)
3561 		return err;
3562 	err = vchiq_dump_shared_state(dump_context,
3563 				      state,
3564 				      state->remote,
3565 				      "Remote");
3566 	if (err)
3567 		return err;
3568 
3569 	err = vchiq_dump_platform_instances(dump_context);
3570 	if (err)
3571 		return err;
3572 
3573 	for (i = 0; i < state->unused_service; i++) {
3574 		struct vchiq_service *service = find_service_by_port(state, i);
3575 
3576 		if (service) {
3577 			err = vchiq_dump_service_state(dump_context, service);
3578 			unlock_service(service);
3579 			if (err)
3580 				return err;
3581 		}
3582 	}
3583 	return 0;
3584 }
3585 
vchiq_dump_service_state(void * dump_context,struct vchiq_service * service)3586 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3587 {
3588 	char buf[80];
3589 	int len;
3590 	int err;
3591 	unsigned int ref_count;
3592 
3593 	/*Don't include the lock just taken*/
3594 	ref_count = kref_read(&service->ref_count) - 1;
3595 	len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3596 			service->localport, srvstate_names[service->srvstate],
3597 			ref_count);
3598 
3599 	if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3600 		char remoteport[30];
3601 		struct vchiq_service_quota *quota =
3602 			&service->state->service_quotas[service->localport];
3603 		int fourcc = service->base.fourcc;
3604 		int tx_pending, rx_pending;
3605 
3606 		if (service->remoteport != VCHIQ_PORT_FREE) {
3607 			int len2 = scnprintf(remoteport, sizeof(remoteport),
3608 				"%u", service->remoteport);
3609 
3610 			if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3611 				scnprintf(remoteport + len2,
3612 					sizeof(remoteport) - len2,
3613 					" (client %x)", service->client_id);
3614 		} else
3615 			strcpy(remoteport, "n/a");
3616 
3617 		len += scnprintf(buf + len, sizeof(buf) - len,
3618 			" '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3619 			VCHIQ_FOURCC_AS_4CHARS(fourcc),
3620 			remoteport,
3621 			quota->message_use_count,
3622 			quota->message_quota,
3623 			quota->slot_use_count,
3624 			quota->slot_quota);
3625 
3626 		err = vchiq_dump(dump_context, buf, len + 1);
3627 		if (err)
3628 			return err;
3629 
3630 		tx_pending = service->bulk_tx.local_insert -
3631 			service->bulk_tx.remote_insert;
3632 
3633 		rx_pending = service->bulk_rx.local_insert -
3634 			service->bulk_rx.remote_insert;
3635 
3636 		len = scnprintf(buf, sizeof(buf),
3637 			"  Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3638 			tx_pending,
3639 			tx_pending ? service->bulk_tx.bulks[
3640 			BULK_INDEX(service->bulk_tx.remove)].size : 0,
3641 			rx_pending,
3642 			rx_pending ? service->bulk_rx.bulks[
3643 			BULK_INDEX(service->bulk_rx.remove)].size : 0);
3644 
3645 		if (VCHIQ_ENABLE_STATS) {
3646 			err = vchiq_dump(dump_context, buf, len + 1);
3647 			if (err)
3648 				return err;
3649 
3650 			len = scnprintf(buf, sizeof(buf),
3651 				"  Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3652 				service->stats.ctrl_tx_count,
3653 				service->stats.ctrl_tx_bytes,
3654 				service->stats.ctrl_rx_count,
3655 				service->stats.ctrl_rx_bytes);
3656 			err = vchiq_dump(dump_context, buf, len + 1);
3657 			if (err)
3658 				return err;
3659 
3660 			len = scnprintf(buf, sizeof(buf),
3661 				"  Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3662 				service->stats.bulk_tx_count,
3663 				service->stats.bulk_tx_bytes,
3664 				service->stats.bulk_rx_count,
3665 				service->stats.bulk_rx_bytes);
3666 			err = vchiq_dump(dump_context, buf, len + 1);
3667 			if (err)
3668 				return err;
3669 
3670 			len = scnprintf(buf, sizeof(buf),
3671 				"  %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3672 				service->stats.quota_stalls,
3673 				service->stats.slot_stalls,
3674 				service->stats.bulk_stalls,
3675 				service->stats.bulk_aborted_count,
3676 				service->stats.error_count);
3677 		}
3678 	}
3679 
3680 	err = vchiq_dump(dump_context, buf, len + 1);
3681 	if (err)
3682 		return err;
3683 
3684 	if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3685 		err = vchiq_dump_platform_service_state(dump_context, service);
3686 	return err;
3687 }
3688 
3689 void
vchiq_loud_error_header(void)3690 vchiq_loud_error_header(void)
3691 {
3692 	vchiq_log_error(vchiq_core_log_level,
3693 		"============================================================================");
3694 	vchiq_log_error(vchiq_core_log_level,
3695 		"============================================================================");
3696 	vchiq_log_error(vchiq_core_log_level, "=====");
3697 }
3698 
3699 void
vchiq_loud_error_footer(void)3700 vchiq_loud_error_footer(void)
3701 {
3702 	vchiq_log_error(vchiq_core_log_level, "=====");
3703 	vchiq_log_error(vchiq_core_log_level,
3704 		"============================================================================");
3705 	vchiq_log_error(vchiq_core_log_level,
3706 		"============================================================================");
3707 }
3708 
vchiq_send_remote_use(struct vchiq_state * state)3709 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3710 {
3711 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3712 		return VCHIQ_RETRY;
3713 
3714 	return queue_message(state, NULL,
3715 			     VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3716 			     NULL, NULL, 0, 0);
3717 }
3718 
vchiq_send_remote_use_active(struct vchiq_state * state)3719 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3720 {
3721 	if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3722 		return VCHIQ_RETRY;
3723 
3724 	return queue_message(state, NULL,
3725 			     VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3726 			     NULL, NULL, 0, 0);
3727 }
3728 
vchiq_log_dump_mem(const char * label,u32 addr,const void * void_mem,size_t num_bytes)3729 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3730 	size_t num_bytes)
3731 {
3732 	const u8  *mem = void_mem;
3733 	size_t          offset;
3734 	char            line_buf[100];
3735 	char           *s;
3736 
3737 	while (num_bytes > 0) {
3738 		s = line_buf;
3739 
3740 		for (offset = 0; offset < 16; offset++) {
3741 			if (offset < num_bytes)
3742 				s += scnprintf(s, 4, "%02x ", mem[offset]);
3743 			else
3744 				s += scnprintf(s, 4, "   ");
3745 		}
3746 
3747 		for (offset = 0; offset < 16; offset++) {
3748 			if (offset < num_bytes) {
3749 				u8 ch = mem[offset];
3750 
3751 				if ((ch < ' ') || (ch > '~'))
3752 					ch = '.';
3753 				*s++ = (char)ch;
3754 			}
3755 		}
3756 		*s++ = '\0';
3757 
3758 		if (label && (*label != '\0'))
3759 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3760 				"%s: %08x: %s", label, addr, line_buf);
3761 		else
3762 			vchiq_log_trace(VCHIQ_LOG_TRACE,
3763 				"%08x: %s", addr, line_buf);
3764 
3765 		addr += 16;
3766 		mem += 16;
3767 		if (num_bytes > 16)
3768 			num_bytes -= 16;
3769 		else
3770 			num_bytes = 0;
3771 	}
3772 }
3773