1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BCM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *		(now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
27 
28 #include "../include/linux/raspberrypi/vchiq.h"
29 #include "../interface/vchiq_arm/vchiq_arm.h"
30 #include "mmal-common.h"
31 #include "mmal-vchiq.h"
32 #include "mmal-msg.h"
33 
34 /*
35  * maximum number of components supported.
36  * This matches the maximum permitted by default on the VPU
37  */
38 #define VCHIQ_MMAL_MAX_COMPONENTS 64
39 
40 /*
41  * Timeout for synchronous msg responses in seconds.
42  * Helpful to increase this if stopping in the VPU debugger.
43  */
44 #define SYNC_MSG_TIMEOUT       3
45 
46 /*#define FULL_MSG_DUMP 1*/
47 
48 #ifdef DEBUG
49 static const char *const msg_type_names[] = {
50 	"UNKNOWN",
51 	"QUIT",
52 	"SERVICE_CLOSED",
53 	"GET_VERSION",
54 	"COMPONENT_CREATE",
55 	"COMPONENT_DESTROY",
56 	"COMPONENT_ENABLE",
57 	"COMPONENT_DISABLE",
58 	"PORT_INFO_GET",
59 	"PORT_INFO_SET",
60 	"PORT_ACTION",
61 	"BUFFER_FROM_HOST",
62 	"BUFFER_TO_HOST",
63 	"GET_STATS",
64 	"PORT_PARAMETER_SET",
65 	"PORT_PARAMETER_GET",
66 	"EVENT_TO_HOST",
67 	"GET_CORE_STATS_FOR_PORT",
68 	"OPAQUE_ALLOCATOR",
69 	"CONSUME_MEM",
70 	"LMK",
71 	"OPAQUE_ALLOCATOR_DESC",
72 	"DRM_GET_LHS32",
73 	"DRM_GET_TIME",
74 	"BUFFER_FROM_HOST_ZEROLEN",
75 	"PORT_FLUSH",
76 	"HOST_LOG",
77 };
78 #endif
79 
80 static const char *const port_action_type_names[] = {
81 	"UNKNOWN",
82 	"ENABLE",
83 	"DISABLE",
84 	"FLUSH",
85 	"CONNECT",
86 	"DISCONNECT",
87 	"SET_REQUIREMENTS",
88 };
89 
90 #if defined(DEBUG)
91 #if defined(FULL_MSG_DUMP)
92 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
93 	do {								\
94 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
95 			 msg_type_names[(MSG)->h.type],			\
96 			 (MSG)->h.type, (MSG_LEN));			\
97 		print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET,	\
98 			       16, 4, (MSG),				\
99 			       sizeof(struct mmal_msg_header), 1);	\
100 		print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET,	\
101 			       16, 4,					\
102 			       ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
103 			       (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
104 	} while (0)
105 #else
106 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)				\
107 	{								\
108 		pr_debug(TITLE" type:%s(%d) length:%d\n",		\
109 			 msg_type_names[(MSG)->h.type],			\
110 			 (MSG)->h.type, (MSG_LEN));			\
111 	}
112 #endif
113 #else
114 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
115 #endif
116 
117 struct vchiq_mmal_instance;
118 
119 /* normal message context */
120 struct mmal_msg_context {
121 	struct vchiq_mmal_instance *instance;
122 
123 	/* Index in the context_map idr so that we can find the
124 	 * mmal_msg_context again when servicing the VCHI reply.
125 	 */
126 	int handle;
127 
128 	union {
129 		struct {
130 			/* work struct for buffer_cb callback */
131 			struct work_struct work;
132 			/* work struct for deferred callback */
133 			struct work_struct buffer_to_host_work;
134 			/* mmal instance */
135 			struct vchiq_mmal_instance *instance;
136 			/* mmal port */
137 			struct vchiq_mmal_port *port;
138 			/* actual buffer used to store bulk reply */
139 			struct mmal_buffer *buffer;
140 			/* amount of buffer used */
141 			unsigned long buffer_used;
142 			/* MMAL buffer flags */
143 			u32 mmal_flags;
144 			/* Presentation and Decode timestamps */
145 			s64 pts;
146 			s64 dts;
147 
148 			int status;	/* context status */
149 
150 		} bulk;		/* bulk data */
151 
152 		struct {
153 			/* message handle to release */
154 			struct vchiq_header *msg_handle;
155 			/* pointer to received message */
156 			struct mmal_msg *msg;
157 			/* received message length */
158 			u32 msg_len;
159 			/* completion upon reply */
160 			struct completion cmplt;
161 		} sync;		/* synchronous response */
162 	} u;
163 
164 };
165 
166 struct vchiq_mmal_instance {
167 	unsigned int service_handle;
168 
169 	/* ensure serialised access to service */
170 	struct mutex vchiq_mutex;
171 
172 	struct idr context_map;
173 	/* protect accesses to context_map */
174 	struct mutex context_map_lock;
175 
176 	struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
177 
178 	/* ordered workqueue to process all bulk operations */
179 	struct workqueue_struct *bulk_wq;
180 
181 	/* handle for a vchiq instance */
182 	struct vchiq_instance *vchiq_instance;
183 };
184 
185 static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance * instance)186 get_msg_context(struct vchiq_mmal_instance *instance)
187 {
188 	struct mmal_msg_context *msg_context;
189 	int handle;
190 
191 	/* todo: should this be allocated from a pool to avoid kzalloc */
192 	msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
193 
194 	if (!msg_context)
195 		return ERR_PTR(-ENOMEM);
196 
197 	/* Create an ID that will be passed along with our message so
198 	 * that when we service the VCHI reply, we can look up what
199 	 * message is being replied to.
200 	 */
201 	mutex_lock(&instance->context_map_lock);
202 	handle = idr_alloc(&instance->context_map, msg_context,
203 			   0, 0, GFP_KERNEL);
204 	mutex_unlock(&instance->context_map_lock);
205 
206 	if (handle < 0) {
207 		kfree(msg_context);
208 		return ERR_PTR(handle);
209 	}
210 
211 	msg_context->instance = instance;
212 	msg_context->handle = handle;
213 
214 	return msg_context;
215 }
216 
217 static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance * instance,int handle)218 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
219 {
220 	return idr_find(&instance->context_map, handle);
221 }
222 
223 static void
release_msg_context(struct mmal_msg_context * msg_context)224 release_msg_context(struct mmal_msg_context *msg_context)
225 {
226 	struct vchiq_mmal_instance *instance = msg_context->instance;
227 
228 	mutex_lock(&instance->context_map_lock);
229 	idr_remove(&instance->context_map, msg_context->handle);
230 	mutex_unlock(&instance->context_map_lock);
231 	kfree(msg_context);
232 }
233 
234 /* deals with receipt of event to host message */
event_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)235 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
236 			     struct mmal_msg *msg, u32 msg_len)
237 {
238 	pr_debug("unhandled event\n");
239 	pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
240 		 msg->u.event_to_host.client_component,
241 		 msg->u.event_to_host.port_type,
242 		 msg->u.event_to_host.port_num,
243 		 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
244 }
245 
246 /* workqueue scheduled callback
247  *
248  * we do this because it is important we do not call any other vchiq
249  * sync calls from within the message delivery thread
250  */
buffer_work_cb(struct work_struct * work)251 static void buffer_work_cb(struct work_struct *work)
252 {
253 	struct mmal_msg_context *msg_context =
254 		container_of(work, struct mmal_msg_context, u.bulk.work);
255 	struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
256 
257 	if (!buffer) {
258 		pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
259 		       __func__, msg_context);
260 		return;
261 	}
262 
263 	buffer->length = msg_context->u.bulk.buffer_used;
264 	buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
265 	buffer->dts = msg_context->u.bulk.dts;
266 	buffer->pts = msg_context->u.bulk.pts;
267 
268 	atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
269 
270 	msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
271 					    msg_context->u.bulk.port,
272 					    msg_context->u.bulk.status,
273 					    msg_context->u.bulk.buffer);
274 }
275 
276 /* workqueue scheduled callback to handle receiving buffers
277  *
278  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
279  * If we block in the service_callback context then we can't process the
280  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
281  * vchiq_bulk_receive() call to complete.
282  */
buffer_to_host_work_cb(struct work_struct * work)283 static void buffer_to_host_work_cb(struct work_struct *work)
284 {
285 	struct mmal_msg_context *msg_context =
286 		container_of(work, struct mmal_msg_context,
287 			     u.bulk.buffer_to_host_work);
288 	struct vchiq_mmal_instance *instance = msg_context->instance;
289 	unsigned long len = msg_context->u.bulk.buffer_used;
290 	int ret;
291 
292 	if (!len)
293 		/* Dummy receive to ensure the buffers remain in order */
294 		len = 8;
295 	/* queue the bulk submission */
296 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
297 	ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle,
298 				 msg_context->u.bulk.buffer->buffer,
299 				 /* Actual receive needs to be a multiple
300 				  * of 4 bytes
301 				  */
302 				(len + 3) & ~3,
303 				msg_context,
304 				VCHIQ_BULK_MODE_CALLBACK);
305 
306 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
307 
308 	if (ret != 0)
309 		pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n",
310 		       __func__, msg_context, ret);
311 }
312 
313 /* enqueue a bulk receive for a given message context */
bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)314 static int bulk_receive(struct vchiq_mmal_instance *instance,
315 			struct mmal_msg *msg,
316 			struct mmal_msg_context *msg_context)
317 {
318 	unsigned long rd_len;
319 
320 	rd_len = msg->u.buffer_from_host.buffer_header.length;
321 
322 	if (!msg_context->u.bulk.buffer) {
323 		pr_err("bulk.buffer not configured - error in buffer_from_host\n");
324 
325 		/* todo: this is a serious error, we should never have
326 		 * committed a buffer_to_host operation to the mmal
327 		 * port without the buffer to back it up (underflow
328 		 * handling) and there is no obvious way to deal with
329 		 * this - how is the mmal servie going to react when
330 		 * we fail to do the xfer and reschedule a buffer when
331 		 * it arrives? perhaps a starved flag to indicate a
332 		 * waiting bulk receive?
333 		 */
334 
335 		return -EINVAL;
336 	}
337 
338 	/* ensure we do not overrun the available buffer */
339 	if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
340 		rd_len = msg_context->u.bulk.buffer->buffer_size;
341 		pr_warn("short read as not enough receive buffer space\n");
342 		/* todo: is this the correct response, what happens to
343 		 * the rest of the message data?
344 		 */
345 	}
346 
347 	/* store length */
348 	msg_context->u.bulk.buffer_used = rd_len;
349 	msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
350 	msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
351 
352 	queue_work(msg_context->instance->bulk_wq,
353 		   &msg_context->u.bulk.buffer_to_host_work);
354 
355 	return 0;
356 }
357 
358 /* data in message, memcpy from packet into output buffer */
inline_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)359 static int inline_receive(struct vchiq_mmal_instance *instance,
360 			  struct mmal_msg *msg,
361 			  struct mmal_msg_context *msg_context)
362 {
363 	memcpy(msg_context->u.bulk.buffer->buffer,
364 	       msg->u.buffer_from_host.short_data,
365 	       msg->u.buffer_from_host.payload_in_message);
366 
367 	msg_context->u.bulk.buffer_used =
368 	    msg->u.buffer_from_host.payload_in_message;
369 
370 	return 0;
371 }
372 
373 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
374 static int
buffer_from_host(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buf)375 buffer_from_host(struct vchiq_mmal_instance *instance,
376 		 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
377 {
378 	struct mmal_msg_context *msg_context;
379 	struct mmal_msg m;
380 	int ret;
381 
382 	if (!port->enabled)
383 		return -EINVAL;
384 
385 	pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
386 
387 	/* get context */
388 	if (!buf->msg_context) {
389 		pr_err("%s: msg_context not allocated, buf %p\n", __func__,
390 		       buf);
391 		return -EINVAL;
392 	}
393 	msg_context = buf->msg_context;
394 
395 	/* store bulk message context for when data arrives */
396 	msg_context->u.bulk.instance = instance;
397 	msg_context->u.bulk.port = port;
398 	msg_context->u.bulk.buffer = buf;
399 	msg_context->u.bulk.buffer_used = 0;
400 
401 	/* initialise work structure ready to schedule callback */
402 	INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
403 	INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
404 		  buffer_to_host_work_cb);
405 
406 	atomic_inc(&port->buffers_with_vpu);
407 
408 	/* prep the buffer from host message */
409 	memset(&m, 0xbc, sizeof(m));	/* just to make debug clearer */
410 
411 	m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
412 	m.h.magic = MMAL_MAGIC;
413 	m.h.context = msg_context->handle;
414 	m.h.status = 0;
415 
416 	/* drvbuf is our private data passed back */
417 	m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
418 	m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
419 	m.u.buffer_from_host.drvbuf.port_handle = port->handle;
420 	m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
421 
422 	/* buffer header */
423 	m.u.buffer_from_host.buffer_header.cmd = 0;
424 	m.u.buffer_from_host.buffer_header.data =
425 		(u32)(unsigned long)buf->buffer;
426 	m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
427 	m.u.buffer_from_host.buffer_header.length = 0;	/* nothing used yet */
428 	m.u.buffer_from_host.buffer_header.offset = 0;	/* no offset */
429 	m.u.buffer_from_host.buffer_header.flags = 0;	/* no flags */
430 	m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
431 	m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
432 
433 	/* clear buffer type specific data */
434 	memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
435 	       sizeof(m.u.buffer_from_host.buffer_header_type_specific));
436 
437 	/* no payload in message */
438 	m.u.buffer_from_host.payload_in_message = 0;
439 
440 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
441 
442 	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m,
443 					 sizeof(struct mmal_msg_header) +
444 					 sizeof(m.u.buffer_from_host));
445 	if (ret)
446 		atomic_dec(&port->buffers_with_vpu);
447 
448 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
449 
450 	return ret;
451 }
452 
453 /* deals with receipt of buffer to host message */
buffer_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)454 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
455 			      struct mmal_msg *msg, u32 msg_len)
456 {
457 	struct mmal_msg_context *msg_context;
458 	u32 handle;
459 
460 	pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
461 		 __func__, instance, msg, msg_len);
462 
463 	if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
464 		handle = msg->u.buffer_from_host.drvbuf.client_context;
465 		msg_context = lookup_msg_context(instance, handle);
466 
467 		if (!msg_context) {
468 			pr_err("drvbuf.client_context(%u) is invalid\n",
469 			       handle);
470 			return;
471 		}
472 	} else {
473 		pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
474 		return;
475 	}
476 
477 	msg_context->u.bulk.mmal_flags =
478 				msg->u.buffer_from_host.buffer_header.flags;
479 
480 	if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
481 		/* message reception had an error */
482 		pr_warn("error %d in reply\n", msg->h.status);
483 
484 		msg_context->u.bulk.status = msg->h.status;
485 
486 	} else if (msg->u.buffer_from_host.buffer_header.length == 0) {
487 		/* empty buffer */
488 		if (msg->u.buffer_from_host.buffer_header.flags &
489 		    MMAL_BUFFER_HEADER_FLAG_EOS) {
490 			msg_context->u.bulk.status =
491 			    bulk_receive(instance, msg, msg_context);
492 			if (msg_context->u.bulk.status == 0)
493 				return;	/* successful bulk submission, bulk
494 					 * completion will trigger callback
495 					 */
496 		} else {
497 			/* do callback with empty buffer - not EOS though */
498 			msg_context->u.bulk.status = 0;
499 			msg_context->u.bulk.buffer_used = 0;
500 		}
501 	} else if (msg->u.buffer_from_host.payload_in_message == 0) {
502 		/* data is not in message, queue a bulk receive */
503 		msg_context->u.bulk.status =
504 		    bulk_receive(instance, msg, msg_context);
505 		if (msg_context->u.bulk.status == 0)
506 			return;	/* successful bulk submission, bulk
507 				 * completion will trigger callback
508 				 */
509 
510 		/* failed to submit buffer, this will end badly */
511 		pr_err("error %d on bulk submission\n",
512 		       msg_context->u.bulk.status);
513 
514 	} else if (msg->u.buffer_from_host.payload_in_message <=
515 		   MMAL_VC_SHORT_DATA) {
516 		/* data payload within message */
517 		msg_context->u.bulk.status = inline_receive(instance, msg,
518 							    msg_context);
519 	} else {
520 		pr_err("message with invalid short payload\n");
521 
522 		/* signal error */
523 		msg_context->u.bulk.status = -EINVAL;
524 		msg_context->u.bulk.buffer_used =
525 		    msg->u.buffer_from_host.payload_in_message;
526 	}
527 
528 	/* schedule the port callback */
529 	schedule_work(&msg_context->u.bulk.work);
530 }
531 
bulk_receive_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)532 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
533 			    struct mmal_msg_context *msg_context)
534 {
535 	msg_context->u.bulk.status = 0;
536 
537 	/* schedule the port callback */
538 	schedule_work(&msg_context->u.bulk.work);
539 }
540 
bulk_abort_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)541 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
542 			  struct mmal_msg_context *msg_context)
543 {
544 	pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
545 
546 	msg_context->u.bulk.status = -EINTR;
547 
548 	schedule_work(&msg_context->u.bulk.work);
549 }
550 
551 /* incoming event service callback */
mmal_service_callback(struct vchiq_instance * vchiq_instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_ctx)552 static int mmal_service_callback(struct vchiq_instance *vchiq_instance,
553 				 enum vchiq_reason reason, struct vchiq_header *header,
554 				 unsigned int handle, void *bulk_ctx)
555 {
556 	struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
557 	u32 msg_len;
558 	struct mmal_msg *msg;
559 	struct mmal_msg_context *msg_context;
560 
561 	if (!instance) {
562 		pr_err("Message callback passed NULL instance\n");
563 		return 0;
564 	}
565 
566 	switch (reason) {
567 	case VCHIQ_MESSAGE_AVAILABLE:
568 		msg = (void *)header->data;
569 		msg_len = header->size;
570 
571 		DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
572 
573 		/* handling is different for buffer messages */
574 		switch (msg->h.type) {
575 		case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
576 			vchiq_release_message(vchiq_instance, handle, header);
577 			break;
578 
579 		case MMAL_MSG_TYPE_EVENT_TO_HOST:
580 			event_to_host_cb(instance, msg, msg_len);
581 			vchiq_release_message(vchiq_instance, handle, header);
582 
583 			break;
584 
585 		case MMAL_MSG_TYPE_BUFFER_TO_HOST:
586 			buffer_to_host_cb(instance, msg, msg_len);
587 			vchiq_release_message(vchiq_instance, handle, header);
588 			break;
589 
590 		default:
591 			/* messages dependent on header context to complete */
592 			if (!msg->h.context) {
593 				pr_err("received message context was null!\n");
594 				vchiq_release_message(vchiq_instance, handle, header);
595 				break;
596 			}
597 
598 			msg_context = lookup_msg_context(instance,
599 							 msg->h.context);
600 			if (!msg_context) {
601 				pr_err("received invalid message context %u!\n",
602 				       msg->h.context);
603 				vchiq_release_message(vchiq_instance, handle, header);
604 				break;
605 			}
606 
607 			/* fill in context values */
608 			msg_context->u.sync.msg_handle = header;
609 			msg_context->u.sync.msg = msg;
610 			msg_context->u.sync.msg_len = msg_len;
611 
612 			/* todo: should this check (completion_done()
613 			 * == 1) for no one waiting? or do we need a
614 			 * flag to tell us the completion has been
615 			 * interrupted so we can free the message and
616 			 * its context. This probably also solves the
617 			 * message arriving after interruption todo
618 			 * below
619 			 */
620 
621 			/* complete message so caller knows it happened */
622 			complete(&msg_context->u.sync.cmplt);
623 			break;
624 		}
625 
626 		break;
627 
628 	case VCHIQ_BULK_RECEIVE_DONE:
629 		bulk_receive_cb(instance, bulk_ctx);
630 		break;
631 
632 	case VCHIQ_BULK_RECEIVE_ABORTED:
633 		bulk_abort_cb(instance, bulk_ctx);
634 		break;
635 
636 	case VCHIQ_SERVICE_CLOSED:
637 		/* TODO: consider if this requires action if received when
638 		 * driver is not explicitly closing the service
639 		 */
640 		break;
641 
642 	default:
643 		pr_err("Received unhandled message reason %d\n", reason);
644 		break;
645 	}
646 
647 	return 0;
648 }
649 
send_synchronous_mmal_msg(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,unsigned int payload_len,struct mmal_msg ** msg_out,struct vchiq_header ** msg_handle)650 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
651 				     struct mmal_msg *msg,
652 				     unsigned int payload_len,
653 				     struct mmal_msg **msg_out,
654 				     struct vchiq_header **msg_handle)
655 {
656 	struct mmal_msg_context *msg_context;
657 	int ret;
658 	unsigned long time_left;
659 
660 	/* payload size must not cause message to exceed max size */
661 	if (payload_len >
662 	    (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
663 		pr_err("payload length %d exceeds max:%d\n", payload_len,
664 		       (int)(MMAL_MSG_MAX_SIZE -
665 			    sizeof(struct mmal_msg_header)));
666 		return -EINVAL;
667 	}
668 
669 	msg_context = get_msg_context(instance);
670 	if (IS_ERR(msg_context))
671 		return PTR_ERR(msg_context);
672 
673 	init_completion(&msg_context->u.sync.cmplt);
674 
675 	msg->h.magic = MMAL_MAGIC;
676 	msg->h.context = msg_context->handle;
677 	msg->h.status = 0;
678 
679 	DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
680 		     ">>> sync message");
681 
682 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
683 
684 	ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg,
685 					 sizeof(struct mmal_msg_header) +
686 					 payload_len);
687 
688 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
689 
690 	if (ret) {
691 		pr_err("error %d queuing message\n", ret);
692 		release_msg_context(msg_context);
693 		return ret;
694 	}
695 
696 	time_left = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
697 						SYNC_MSG_TIMEOUT * HZ);
698 	if (time_left == 0) {
699 		pr_err("timed out waiting for sync completion\n");
700 		ret = -ETIME;
701 		/* todo: what happens if the message arrives after aborting */
702 		release_msg_context(msg_context);
703 		return ret;
704 	}
705 
706 	*msg_out = msg_context->u.sync.msg;
707 	*msg_handle = msg_context->u.sync.msg_handle;
708 	release_msg_context(msg_context);
709 
710 	return 0;
711 }
712 
dump_port_info(struct vchiq_mmal_port * port)713 static void dump_port_info(struct vchiq_mmal_port *port)
714 {
715 	pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
716 
717 	pr_debug("buffer minimum num:%d size:%d align:%d\n",
718 		 port->minimum_buffer.num,
719 		 port->minimum_buffer.size, port->minimum_buffer.alignment);
720 
721 	pr_debug("buffer recommended num:%d size:%d align:%d\n",
722 		 port->recommended_buffer.num,
723 		 port->recommended_buffer.size,
724 		 port->recommended_buffer.alignment);
725 
726 	pr_debug("buffer current values num:%d size:%d align:%d\n",
727 		 port->current_buffer.num,
728 		 port->current_buffer.size, port->current_buffer.alignment);
729 
730 	pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
731 		 port->format.type,
732 		 port->format.encoding, port->format.encoding_variant);
733 
734 	pr_debug("		    bitrate:%d flags:0x%x\n",
735 		 port->format.bitrate, port->format.flags);
736 
737 	if (port->format.type == MMAL_ES_TYPE_VIDEO) {
738 		pr_debug
739 		    ("es video format: width:%d height:%d colourspace:0x%x\n",
740 		     port->es.video.width, port->es.video.height,
741 		     port->es.video.color_space);
742 
743 		pr_debug("		 : crop xywh %d,%d,%d,%d\n",
744 			 port->es.video.crop.x,
745 			 port->es.video.crop.y,
746 			 port->es.video.crop.width, port->es.video.crop.height);
747 		pr_debug("		 : framerate %d/%d  aspect %d/%d\n",
748 			 port->es.video.frame_rate.numerator,
749 			 port->es.video.frame_rate.denominator,
750 			 port->es.video.par.numerator, port->es.video.par.denominator);
751 	}
752 }
753 
port_to_mmal_msg(struct vchiq_mmal_port * port,struct mmal_port * p)754 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
755 {
756 	/* todo do readonly fields need setting at all? */
757 	p->type = port->type;
758 	p->index = port->index;
759 	p->index_all = 0;
760 	p->is_enabled = port->enabled;
761 	p->buffer_num_min = port->minimum_buffer.num;
762 	p->buffer_size_min = port->minimum_buffer.size;
763 	p->buffer_alignment_min = port->minimum_buffer.alignment;
764 	p->buffer_num_recommended = port->recommended_buffer.num;
765 	p->buffer_size_recommended = port->recommended_buffer.size;
766 
767 	/* only three writable fields in a port */
768 	p->buffer_num = port->current_buffer.num;
769 	p->buffer_size = port->current_buffer.size;
770 	p->userdata = (u32)(unsigned long)port;
771 }
772 
port_info_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)773 static int port_info_set(struct vchiq_mmal_instance *instance,
774 			 struct vchiq_mmal_port *port)
775 {
776 	int ret;
777 	struct mmal_msg m;
778 	struct mmal_msg *rmsg;
779 	struct vchiq_header *rmsg_handle;
780 
781 	pr_debug("setting port info port %p\n", port);
782 	if (!port)
783 		return -1;
784 	dump_port_info(port);
785 
786 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
787 
788 	m.u.port_info_set.component_handle = port->component->handle;
789 	m.u.port_info_set.port_type = port->type;
790 	m.u.port_info_set.port_index = port->index;
791 
792 	port_to_mmal_msg(port, &m.u.port_info_set.port);
793 
794 	/* elementary stream format setup */
795 	m.u.port_info_set.format.type = port->format.type;
796 	m.u.port_info_set.format.encoding = port->format.encoding;
797 	m.u.port_info_set.format.encoding_variant =
798 	    port->format.encoding_variant;
799 	m.u.port_info_set.format.bitrate = port->format.bitrate;
800 	m.u.port_info_set.format.flags = port->format.flags;
801 
802 	memcpy(&m.u.port_info_set.es, &port->es,
803 	       sizeof(union mmal_es_specific_format));
804 
805 	m.u.port_info_set.format.extradata_size = port->format.extradata_size;
806 	memcpy(&m.u.port_info_set.extradata, port->format.extradata,
807 	       port->format.extradata_size);
808 
809 	ret = send_synchronous_mmal_msg(instance, &m,
810 					sizeof(m.u.port_info_set),
811 					&rmsg, &rmsg_handle);
812 	if (ret)
813 		return ret;
814 
815 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
816 		/* got an unexpected message type in reply */
817 		ret = -EINVAL;
818 		goto release_msg;
819 	}
820 
821 	/* return operation status */
822 	ret = -rmsg->u.port_info_get_reply.status;
823 
824 	pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
825 		 port->component->handle, port->handle);
826 
827 release_msg:
828 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
829 
830 	return ret;
831 }
832 
833 /* use port info get message to retrieve port information */
port_info_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)834 static int port_info_get(struct vchiq_mmal_instance *instance,
835 			 struct vchiq_mmal_port *port)
836 {
837 	int ret;
838 	struct mmal_msg m;
839 	struct mmal_msg *rmsg;
840 	struct vchiq_header *rmsg_handle;
841 
842 	/* port info time */
843 	m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
844 	m.u.port_info_get.component_handle = port->component->handle;
845 	m.u.port_info_get.port_type = port->type;
846 	m.u.port_info_get.index = port->index;
847 
848 	ret = send_synchronous_mmal_msg(instance, &m,
849 					sizeof(m.u.port_info_get),
850 					&rmsg, &rmsg_handle);
851 	if (ret)
852 		return ret;
853 
854 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
855 		/* got an unexpected message type in reply */
856 		ret = -EINVAL;
857 		goto release_msg;
858 	}
859 
860 	/* return operation status */
861 	ret = -rmsg->u.port_info_get_reply.status;
862 	if (ret != MMAL_MSG_STATUS_SUCCESS)
863 		goto release_msg;
864 
865 	if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
866 		port->enabled = false;
867 	else
868 		port->enabled = true;
869 
870 	/* copy the values out of the message */
871 	port->handle = rmsg->u.port_info_get_reply.port_handle;
872 
873 	/* port type and index cached to use on port info set because
874 	 * it does not use a port handle
875 	 */
876 	port->type = rmsg->u.port_info_get_reply.port_type;
877 	port->index = rmsg->u.port_info_get_reply.port_index;
878 
879 	port->minimum_buffer.num =
880 	    rmsg->u.port_info_get_reply.port.buffer_num_min;
881 	port->minimum_buffer.size =
882 	    rmsg->u.port_info_get_reply.port.buffer_size_min;
883 	port->minimum_buffer.alignment =
884 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
885 
886 	port->recommended_buffer.alignment =
887 	    rmsg->u.port_info_get_reply.port.buffer_alignment_min;
888 	port->recommended_buffer.num =
889 	    rmsg->u.port_info_get_reply.port.buffer_num_recommended;
890 
891 	port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
892 	port->current_buffer.size =
893 	    rmsg->u.port_info_get_reply.port.buffer_size;
894 
895 	/* stream format */
896 	port->format.type = rmsg->u.port_info_get_reply.format.type;
897 	port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
898 	port->format.encoding_variant =
899 	    rmsg->u.port_info_get_reply.format.encoding_variant;
900 	port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
901 	port->format.flags = rmsg->u.port_info_get_reply.format.flags;
902 
903 	/* elementary stream format */
904 	memcpy(&port->es,
905 	       &rmsg->u.port_info_get_reply.es,
906 	       sizeof(union mmal_es_specific_format));
907 	port->format.es = &port->es;
908 
909 	port->format.extradata_size =
910 	    rmsg->u.port_info_get_reply.format.extradata_size;
911 	memcpy(port->format.extradata,
912 	       rmsg->u.port_info_get_reply.extradata,
913 	       port->format.extradata_size);
914 
915 	pr_debug("received port info\n");
916 	dump_port_info(port);
917 
918 release_msg:
919 
920 	pr_debug("%s:result:%d component:0x%x port:%d\n",
921 		 __func__, ret, port->component->handle, port->handle);
922 
923 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
924 
925 	return ret;
926 }
927 
928 /* create component on vc */
create_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component,const char * name)929 static int create_component(struct vchiq_mmal_instance *instance,
930 			    struct vchiq_mmal_component *component,
931 			    const char *name)
932 {
933 	int ret;
934 	struct mmal_msg m;
935 	struct mmal_msg *rmsg;
936 	struct vchiq_header *rmsg_handle;
937 
938 	/* build component create message */
939 	m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
940 	m.u.component_create.client_component = component->client_component;
941 	strscpy_pad(m.u.component_create.name, name,
942 		    sizeof(m.u.component_create.name));
943 	m.u.component_create.pid = 0;
944 
945 	ret = send_synchronous_mmal_msg(instance, &m,
946 					sizeof(m.u.component_create),
947 					&rmsg, &rmsg_handle);
948 	if (ret)
949 		return ret;
950 
951 	if (rmsg->h.type != m.h.type) {
952 		/* got an unexpected message type in reply */
953 		ret = -EINVAL;
954 		goto release_msg;
955 	}
956 
957 	ret = -rmsg->u.component_create_reply.status;
958 	if (ret != MMAL_MSG_STATUS_SUCCESS)
959 		goto release_msg;
960 
961 	/* a valid component response received */
962 	component->handle = rmsg->u.component_create_reply.component_handle;
963 	component->inputs = rmsg->u.component_create_reply.input_num;
964 	component->outputs = rmsg->u.component_create_reply.output_num;
965 	component->clocks = rmsg->u.component_create_reply.clock_num;
966 
967 	pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
968 		 component->handle,
969 		 component->inputs, component->outputs, component->clocks);
970 
971 release_msg:
972 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
973 
974 	return ret;
975 }
976 
977 /* destroys a component on vc */
destroy_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)978 static int destroy_component(struct vchiq_mmal_instance *instance,
979 			     struct vchiq_mmal_component *component)
980 {
981 	int ret;
982 	struct mmal_msg m;
983 	struct mmal_msg *rmsg;
984 	struct vchiq_header *rmsg_handle;
985 
986 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
987 	m.u.component_destroy.component_handle = component->handle;
988 
989 	ret = send_synchronous_mmal_msg(instance, &m,
990 					sizeof(m.u.component_destroy),
991 					&rmsg, &rmsg_handle);
992 	if (ret)
993 		return ret;
994 
995 	if (rmsg->h.type != m.h.type) {
996 		/* got an unexpected message type in reply */
997 		ret = -EINVAL;
998 		goto release_msg;
999 	}
1000 
1001 	ret = -rmsg->u.component_destroy_reply.status;
1002 
1003 release_msg:
1004 
1005 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1006 
1007 	return ret;
1008 }
1009 
1010 /* enable a component on vc */
enable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1011 static int enable_component(struct vchiq_mmal_instance *instance,
1012 			    struct vchiq_mmal_component *component)
1013 {
1014 	int ret;
1015 	struct mmal_msg m;
1016 	struct mmal_msg *rmsg;
1017 	struct vchiq_header *rmsg_handle;
1018 
1019 	m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1020 	m.u.component_enable.component_handle = component->handle;
1021 
1022 	ret = send_synchronous_mmal_msg(instance, &m,
1023 					sizeof(m.u.component_enable),
1024 					&rmsg, &rmsg_handle);
1025 	if (ret)
1026 		return ret;
1027 
1028 	if (rmsg->h.type != m.h.type) {
1029 		/* got an unexpected message type in reply */
1030 		ret = -EINVAL;
1031 		goto release_msg;
1032 	}
1033 
1034 	ret = -rmsg->u.component_enable_reply.status;
1035 
1036 release_msg:
1037 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1038 
1039 	return ret;
1040 }
1041 
1042 /* disable a component on vc */
disable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1043 static int disable_component(struct vchiq_mmal_instance *instance,
1044 			     struct vchiq_mmal_component *component)
1045 {
1046 	int ret;
1047 	struct mmal_msg m;
1048 	struct mmal_msg *rmsg;
1049 	struct vchiq_header *rmsg_handle;
1050 
1051 	m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1052 	m.u.component_disable.component_handle = component->handle;
1053 
1054 	ret = send_synchronous_mmal_msg(instance, &m,
1055 					sizeof(m.u.component_disable),
1056 					&rmsg, &rmsg_handle);
1057 	if (ret)
1058 		return ret;
1059 
1060 	if (rmsg->h.type != m.h.type) {
1061 		/* got an unexpected message type in reply */
1062 		ret = -EINVAL;
1063 		goto release_msg;
1064 	}
1065 
1066 	ret = -rmsg->u.component_disable_reply.status;
1067 
1068 release_msg:
1069 
1070 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1071 
1072 	return ret;
1073 }
1074 
1075 /* get version of mmal implementation */
get_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1076 static int get_version(struct vchiq_mmal_instance *instance,
1077 		       u32 *major_out, u32 *minor_out)
1078 {
1079 	int ret;
1080 	struct mmal_msg m;
1081 	struct mmal_msg *rmsg;
1082 	struct vchiq_header *rmsg_handle;
1083 
1084 	m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1085 
1086 	ret = send_synchronous_mmal_msg(instance, &m,
1087 					sizeof(m.u.version),
1088 					&rmsg, &rmsg_handle);
1089 	if (ret)
1090 		return ret;
1091 
1092 	if (rmsg->h.type != m.h.type) {
1093 		/* got an unexpected message type in reply */
1094 		ret = -EINVAL;
1095 		goto release_msg;
1096 	}
1097 
1098 	*major_out = rmsg->u.version.major;
1099 	*minor_out = rmsg->u.version.minor;
1100 
1101 release_msg:
1102 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1103 
1104 	return ret;
1105 }
1106 
1107 /* do a port action with a port as a parameter */
port_action_port(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type)1108 static int port_action_port(struct vchiq_mmal_instance *instance,
1109 			    struct vchiq_mmal_port *port,
1110 			    enum mmal_msg_port_action_type action_type)
1111 {
1112 	int ret;
1113 	struct mmal_msg m;
1114 	struct mmal_msg *rmsg;
1115 	struct vchiq_header *rmsg_handle;
1116 
1117 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1118 	m.u.port_action_port.component_handle = port->component->handle;
1119 	m.u.port_action_port.port_handle = port->handle;
1120 	m.u.port_action_port.action = action_type;
1121 
1122 	port_to_mmal_msg(port, &m.u.port_action_port.port);
1123 
1124 	ret = send_synchronous_mmal_msg(instance, &m,
1125 					sizeof(m.u.port_action_port),
1126 					&rmsg, &rmsg_handle);
1127 	if (ret)
1128 		return ret;
1129 
1130 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1131 		/* got an unexpected message type in reply */
1132 		ret = -EINVAL;
1133 		goto release_msg;
1134 	}
1135 
1136 	ret = -rmsg->u.port_action_reply.status;
1137 
1138 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1139 		 __func__,
1140 		 ret, port->component->handle, port->handle,
1141 		 port_action_type_names[action_type], action_type);
1142 
1143 release_msg:
1144 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1145 
1146 	return ret;
1147 }
1148 
1149 /* do a port action with handles as parameters */
port_action_handle(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type,u32 connect_component_handle,u32 connect_port_handle)1150 static int port_action_handle(struct vchiq_mmal_instance *instance,
1151 			      struct vchiq_mmal_port *port,
1152 			      enum mmal_msg_port_action_type action_type,
1153 			      u32 connect_component_handle,
1154 			      u32 connect_port_handle)
1155 {
1156 	int ret;
1157 	struct mmal_msg m;
1158 	struct mmal_msg *rmsg;
1159 	struct vchiq_header *rmsg_handle;
1160 
1161 	m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1162 
1163 	m.u.port_action_handle.component_handle = port->component->handle;
1164 	m.u.port_action_handle.port_handle = port->handle;
1165 	m.u.port_action_handle.action = action_type;
1166 
1167 	m.u.port_action_handle.connect_component_handle =
1168 	    connect_component_handle;
1169 	m.u.port_action_handle.connect_port_handle = connect_port_handle;
1170 
1171 	ret = send_synchronous_mmal_msg(instance, &m,
1172 					sizeof(m.u.port_action_handle),
1173 					&rmsg, &rmsg_handle);
1174 	if (ret)
1175 		return ret;
1176 
1177 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1178 		/* got an unexpected message type in reply */
1179 		ret = -EINVAL;
1180 		goto release_msg;
1181 	}
1182 
1183 	ret = -rmsg->u.port_action_reply.status;
1184 
1185 	pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1186 		 __func__,
1187 		 ret, port->component->handle, port->handle,
1188 		 port_action_type_names[action_type],
1189 		 action_type, connect_component_handle, connect_port_handle);
1190 
1191 release_msg:
1192 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1193 
1194 	return ret;
1195 }
1196 
port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 value_size)1197 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1198 			      struct vchiq_mmal_port *port,
1199 			      u32 parameter_id, void *value, u32 value_size)
1200 {
1201 	int ret;
1202 	struct mmal_msg m;
1203 	struct mmal_msg *rmsg;
1204 	struct vchiq_header *rmsg_handle;
1205 
1206 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1207 
1208 	m.u.port_parameter_set.component_handle = port->component->handle;
1209 	m.u.port_parameter_set.port_handle = port->handle;
1210 	m.u.port_parameter_set.id = parameter_id;
1211 	m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1212 	memcpy(&m.u.port_parameter_set.value, value, value_size);
1213 
1214 	ret = send_synchronous_mmal_msg(instance, &m,
1215 					(4 * sizeof(u32)) + value_size,
1216 					&rmsg, &rmsg_handle);
1217 	if (ret)
1218 		return ret;
1219 
1220 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1221 		/* got an unexpected message type in reply */
1222 		ret = -EINVAL;
1223 		goto release_msg;
1224 	}
1225 
1226 	ret = -rmsg->u.port_parameter_set_reply.status;
1227 
1228 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1229 		 __func__,
1230 		 ret, port->component->handle, port->handle, parameter_id);
1231 
1232 release_msg:
1233 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1234 
1235 	return ret;
1236 }
1237 
port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 * value_size)1238 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1239 			      struct vchiq_mmal_port *port,
1240 			      u32 parameter_id, void *value, u32 *value_size)
1241 {
1242 	int ret;
1243 	struct mmal_msg m;
1244 	struct mmal_msg *rmsg;
1245 	struct vchiq_header *rmsg_handle;
1246 
1247 	m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1248 
1249 	m.u.port_parameter_get.component_handle = port->component->handle;
1250 	m.u.port_parameter_get.port_handle = port->handle;
1251 	m.u.port_parameter_get.id = parameter_id;
1252 	m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1253 
1254 	ret = send_synchronous_mmal_msg(instance, &m,
1255 					sizeof(struct
1256 					       mmal_msg_port_parameter_get),
1257 					&rmsg, &rmsg_handle);
1258 	if (ret)
1259 		return ret;
1260 
1261 	if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1262 		/* got an unexpected message type in reply */
1263 		pr_err("Incorrect reply type %d\n", rmsg->h.type);
1264 		ret = -EINVAL;
1265 		goto release_msg;
1266 	}
1267 
1268 	ret = rmsg->u.port_parameter_get_reply.status;
1269 
1270 	/* port_parameter_get_reply.size includes the header,
1271 	 * whilst *value_size doesn't.
1272 	 */
1273 	rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1274 
1275 	if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1276 		/* Copy only as much as we have space for
1277 		 * but report true size of parameter
1278 		 */
1279 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1280 		       *value_size);
1281 	} else {
1282 		memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1283 		       rmsg->u.port_parameter_get_reply.size);
1284 	}
1285 	/* Always report the size of the returned parameter to the caller */
1286 	*value_size = rmsg->u.port_parameter_get_reply.size;
1287 
1288 	pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1289 		 ret, port->component->handle, port->handle, parameter_id);
1290 
1291 release_msg:
1292 	vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle);
1293 
1294 	return ret;
1295 }
1296 
1297 /* disables a port and drains buffers from it */
port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1298 static int port_disable(struct vchiq_mmal_instance *instance,
1299 			struct vchiq_mmal_port *port)
1300 {
1301 	int ret;
1302 	struct list_head *q, *buf_head;
1303 	unsigned long flags = 0;
1304 
1305 	if (!port->enabled)
1306 		return 0;
1307 
1308 	port->enabled = false;
1309 
1310 	ret = port_action_port(instance, port,
1311 			       MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1312 	if (ret == 0) {
1313 		/*
1314 		 * Drain all queued buffers on port. This should only
1315 		 * apply to buffers that have been queued before the port
1316 		 * has been enabled. If the port has been enabled and buffers
1317 		 * passed, then the buffers should have been removed from this
1318 		 * list, and we should get the relevant callbacks via VCHIQ
1319 		 * to release the buffers.
1320 		 */
1321 		spin_lock_irqsave(&port->slock, flags);
1322 
1323 		list_for_each_safe(buf_head, q, &port->buffers) {
1324 			struct mmal_buffer *mmalbuf;
1325 
1326 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1327 					     list);
1328 			list_del(buf_head);
1329 			if (port->buffer_cb) {
1330 				mmalbuf->length = 0;
1331 				mmalbuf->mmal_flags = 0;
1332 				mmalbuf->dts = MMAL_TIME_UNKNOWN;
1333 				mmalbuf->pts = MMAL_TIME_UNKNOWN;
1334 				port->buffer_cb(instance,
1335 						port, 0, mmalbuf);
1336 			}
1337 		}
1338 
1339 		spin_unlock_irqrestore(&port->slock, flags);
1340 
1341 		ret = port_info_get(instance, port);
1342 	}
1343 
1344 	return ret;
1345 }
1346 
1347 /* enable a port */
port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1348 static int port_enable(struct vchiq_mmal_instance *instance,
1349 		       struct vchiq_mmal_port *port)
1350 {
1351 	unsigned int hdr_count;
1352 	struct list_head *q, *buf_head;
1353 	int ret;
1354 
1355 	if (port->enabled)
1356 		return 0;
1357 
1358 	ret = port_action_port(instance, port,
1359 			       MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1360 	if (ret)
1361 		goto done;
1362 
1363 	port->enabled = true;
1364 
1365 	if (port->buffer_cb) {
1366 		/* send buffer headers to videocore */
1367 		hdr_count = 1;
1368 		list_for_each_safe(buf_head, q, &port->buffers) {
1369 			struct mmal_buffer *mmalbuf;
1370 
1371 			mmalbuf = list_entry(buf_head, struct mmal_buffer,
1372 					     list);
1373 			ret = buffer_from_host(instance, port, mmalbuf);
1374 			if (ret)
1375 				goto done;
1376 
1377 			list_del(buf_head);
1378 			hdr_count++;
1379 			if (hdr_count > port->current_buffer.num)
1380 				break;
1381 		}
1382 	}
1383 
1384 	ret = port_info_get(instance, port);
1385 
1386 done:
1387 	return ret;
1388 }
1389 
1390 /* ------------------------------------------------------------------
1391  * Exported API
1392  *------------------------------------------------------------------
1393  */
1394 
vchiq_mmal_port_set_format(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1395 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1396 			       struct vchiq_mmal_port *port)
1397 {
1398 	int ret;
1399 
1400 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1401 		return -EINTR;
1402 
1403 	ret = port_info_set(instance, port);
1404 	if (ret)
1405 		goto release_unlock;
1406 
1407 	/* read what has actually been set */
1408 	ret = port_info_get(instance, port);
1409 
1410 release_unlock:
1411 	mutex_unlock(&instance->vchiq_mutex);
1412 
1413 	return ret;
1414 }
1415 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1416 
vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 value_size)1417 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1418 				  struct vchiq_mmal_port *port,
1419 				  u32 parameter, void *value, u32 value_size)
1420 {
1421 	int ret;
1422 
1423 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1424 		return -EINTR;
1425 
1426 	ret = port_parameter_set(instance, port, parameter, value, value_size);
1427 
1428 	mutex_unlock(&instance->vchiq_mutex);
1429 
1430 	return ret;
1431 }
1432 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1433 
vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 * value_size)1434 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1435 				  struct vchiq_mmal_port *port,
1436 				  u32 parameter, void *value, u32 *value_size)
1437 {
1438 	int ret;
1439 
1440 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1441 		return -EINTR;
1442 
1443 	ret = port_parameter_get(instance, port, parameter, value, value_size);
1444 
1445 	mutex_unlock(&instance->vchiq_mutex);
1446 
1447 	return ret;
1448 }
1449 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1450 
1451 /* enable a port
1452  *
1453  * enables a port and queues buffers for satisfying callbacks if we
1454  * provide a callback handler
1455  */
vchiq_mmal_port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,vchiq_mmal_buffer_cb buffer_cb)1456 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1457 			   struct vchiq_mmal_port *port,
1458 			   vchiq_mmal_buffer_cb buffer_cb)
1459 {
1460 	int ret;
1461 
1462 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1463 		return -EINTR;
1464 
1465 	/* already enabled - noop */
1466 	if (port->enabled) {
1467 		ret = 0;
1468 		goto unlock;
1469 	}
1470 
1471 	port->buffer_cb = buffer_cb;
1472 
1473 	ret = port_enable(instance, port);
1474 
1475 unlock:
1476 	mutex_unlock(&instance->vchiq_mutex);
1477 
1478 	return ret;
1479 }
1480 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1481 
vchiq_mmal_port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1482 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1483 			    struct vchiq_mmal_port *port)
1484 {
1485 	int ret;
1486 
1487 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1488 		return -EINTR;
1489 
1490 	if (!port->enabled) {
1491 		mutex_unlock(&instance->vchiq_mutex);
1492 		return 0;
1493 	}
1494 
1495 	ret = port_disable(instance, port);
1496 
1497 	mutex_unlock(&instance->vchiq_mutex);
1498 
1499 	return ret;
1500 }
1501 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1502 
1503 /* ports will be connected in a tunneled manner so data buffers
1504  * are not handled by client.
1505  */
vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * src,struct vchiq_mmal_port * dst)1506 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1507 				   struct vchiq_mmal_port *src,
1508 				   struct vchiq_mmal_port *dst)
1509 {
1510 	int ret;
1511 
1512 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1513 		return -EINTR;
1514 
1515 	/* disconnect ports if connected */
1516 	if (src->connected) {
1517 		ret = port_disable(instance, src);
1518 		if (ret) {
1519 			pr_err("failed disabling src port(%d)\n", ret);
1520 			goto release_unlock;
1521 		}
1522 
1523 		/* do not need to disable the destination port as they
1524 		 * are connected and it is done automatically
1525 		 */
1526 
1527 		ret = port_action_handle(instance, src,
1528 					 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1529 					 src->connected->component->handle,
1530 					 src->connected->handle);
1531 		if (ret < 0) {
1532 			pr_err("failed disconnecting src port\n");
1533 			goto release_unlock;
1534 		}
1535 		src->connected->enabled = false;
1536 		src->connected = NULL;
1537 	}
1538 
1539 	if (!dst) {
1540 		/* do not make new connection */
1541 		ret = 0;
1542 		pr_debug("not making new connection\n");
1543 		goto release_unlock;
1544 	}
1545 
1546 	/* copy src port format to dst */
1547 	dst->format.encoding = src->format.encoding;
1548 	dst->es.video.width = src->es.video.width;
1549 	dst->es.video.height = src->es.video.height;
1550 	dst->es.video.crop.x = src->es.video.crop.x;
1551 	dst->es.video.crop.y = src->es.video.crop.y;
1552 	dst->es.video.crop.width = src->es.video.crop.width;
1553 	dst->es.video.crop.height = src->es.video.crop.height;
1554 	dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator;
1555 	dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator;
1556 
1557 	/* set new format */
1558 	ret = port_info_set(instance, dst);
1559 	if (ret) {
1560 		pr_debug("setting port info failed\n");
1561 		goto release_unlock;
1562 	}
1563 
1564 	/* read what has actually been set */
1565 	ret = port_info_get(instance, dst);
1566 	if (ret) {
1567 		pr_debug("read back port info failed\n");
1568 		goto release_unlock;
1569 	}
1570 
1571 	/* connect two ports together */
1572 	ret = port_action_handle(instance, src,
1573 				 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1574 				 dst->component->handle, dst->handle);
1575 	if (ret < 0) {
1576 		pr_debug("connecting port %d:%d to %d:%d failed\n",
1577 			 src->component->handle, src->handle,
1578 			 dst->component->handle, dst->handle);
1579 		goto release_unlock;
1580 	}
1581 	src->connected = dst;
1582 
1583 release_unlock:
1584 
1585 	mutex_unlock(&instance->vchiq_mutex);
1586 
1587 	return ret;
1588 }
1589 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1590 
vchiq_mmal_submit_buffer(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buffer)1591 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1592 			     struct vchiq_mmal_port *port,
1593 			     struct mmal_buffer *buffer)
1594 {
1595 	unsigned long flags = 0;
1596 	int ret;
1597 
1598 	ret = buffer_from_host(instance, port, buffer);
1599 	if (ret == -EINVAL) {
1600 		/* Port is disabled. Queue for when it is enabled. */
1601 		spin_lock_irqsave(&port->slock, flags);
1602 		list_add_tail(&buffer->list, &port->buffers);
1603 		spin_unlock_irqrestore(&port->slock, flags);
1604 	}
1605 
1606 	return 0;
1607 }
1608 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1609 
mmal_vchi_buffer_init(struct vchiq_mmal_instance * instance,struct mmal_buffer * buf)1610 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1611 			  struct mmal_buffer *buf)
1612 {
1613 	struct mmal_msg_context *msg_context = get_msg_context(instance);
1614 
1615 	if (IS_ERR(msg_context))
1616 		return (PTR_ERR(msg_context));
1617 
1618 	buf->msg_context = msg_context;
1619 	return 0;
1620 }
1621 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1622 
mmal_vchi_buffer_cleanup(struct mmal_buffer * buf)1623 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1624 {
1625 	struct mmal_msg_context *msg_context = buf->msg_context;
1626 
1627 	if (msg_context)
1628 		release_msg_context(msg_context);
1629 	buf->msg_context = NULL;
1630 
1631 	return 0;
1632 }
1633 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1634 
1635 /* Initialise a mmal component and its ports
1636  *
1637  */
vchiq_mmal_component_init(struct vchiq_mmal_instance * instance,const char * name,struct vchiq_mmal_component ** component_out)1638 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1639 			      const char *name,
1640 			      struct vchiq_mmal_component **component_out)
1641 {
1642 	int ret;
1643 	int idx;		/* port index */
1644 	struct vchiq_mmal_component *component = NULL;
1645 
1646 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1647 		return -EINTR;
1648 
1649 	for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1650 		if (!instance->component[idx].in_use) {
1651 			component = &instance->component[idx];
1652 			component->in_use = true;
1653 			break;
1654 		}
1655 	}
1656 
1657 	if (!component) {
1658 		ret = -EINVAL;	/* todo is this correct error? */
1659 		goto unlock;
1660 	}
1661 
1662 	/* We need a handle to reference back to our component structure.
1663 	 * Use the array index in instance->component rather than rolling
1664 	 * another IDR.
1665 	 */
1666 	component->client_component = idx;
1667 
1668 	ret = create_component(instance, component, name);
1669 	if (ret < 0) {
1670 		pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1671 		       __func__, ret);
1672 		goto unlock;
1673 	}
1674 
1675 	/* ports info needs gathering */
1676 	component->control.type = MMAL_PORT_TYPE_CONTROL;
1677 	component->control.index = 0;
1678 	component->control.component = component;
1679 	spin_lock_init(&component->control.slock);
1680 	INIT_LIST_HEAD(&component->control.buffers);
1681 	ret = port_info_get(instance, &component->control);
1682 	if (ret < 0)
1683 		goto release_component;
1684 
1685 	for (idx = 0; idx < component->inputs; idx++) {
1686 		component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1687 		component->input[idx].index = idx;
1688 		component->input[idx].component = component;
1689 		spin_lock_init(&component->input[idx].slock);
1690 		INIT_LIST_HEAD(&component->input[idx].buffers);
1691 		ret = port_info_get(instance, &component->input[idx]);
1692 		if (ret < 0)
1693 			goto release_component;
1694 	}
1695 
1696 	for (idx = 0; idx < component->outputs; idx++) {
1697 		component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1698 		component->output[idx].index = idx;
1699 		component->output[idx].component = component;
1700 		spin_lock_init(&component->output[idx].slock);
1701 		INIT_LIST_HEAD(&component->output[idx].buffers);
1702 		ret = port_info_get(instance, &component->output[idx]);
1703 		if (ret < 0)
1704 			goto release_component;
1705 	}
1706 
1707 	for (idx = 0; idx < component->clocks; idx++) {
1708 		component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1709 		component->clock[idx].index = idx;
1710 		component->clock[idx].component = component;
1711 		spin_lock_init(&component->clock[idx].slock);
1712 		INIT_LIST_HEAD(&component->clock[idx].buffers);
1713 		ret = port_info_get(instance, &component->clock[idx]);
1714 		if (ret < 0)
1715 			goto release_component;
1716 	}
1717 
1718 	*component_out = component;
1719 
1720 	mutex_unlock(&instance->vchiq_mutex);
1721 
1722 	return 0;
1723 
1724 release_component:
1725 	destroy_component(instance, component);
1726 unlock:
1727 	if (component)
1728 		component->in_use = false;
1729 	mutex_unlock(&instance->vchiq_mutex);
1730 
1731 	return ret;
1732 }
1733 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1734 
1735 /*
1736  * cause a mmal component to be destroyed
1737  */
vchiq_mmal_component_finalise(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1738 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1739 				  struct vchiq_mmal_component *component)
1740 {
1741 	int ret;
1742 
1743 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1744 		return -EINTR;
1745 
1746 	if (component->enabled)
1747 		ret = disable_component(instance, component);
1748 
1749 	ret = destroy_component(instance, component);
1750 
1751 	component->in_use = false;
1752 
1753 	mutex_unlock(&instance->vchiq_mutex);
1754 
1755 	return ret;
1756 }
1757 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1758 
1759 /*
1760  * cause a mmal component to be enabled
1761  */
vchiq_mmal_component_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1762 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1763 				struct vchiq_mmal_component *component)
1764 {
1765 	int ret;
1766 
1767 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1768 		return -EINTR;
1769 
1770 	if (component->enabled) {
1771 		mutex_unlock(&instance->vchiq_mutex);
1772 		return 0;
1773 	}
1774 
1775 	ret = enable_component(instance, component);
1776 	if (ret == 0)
1777 		component->enabled = true;
1778 
1779 	mutex_unlock(&instance->vchiq_mutex);
1780 
1781 	return ret;
1782 }
1783 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1784 
1785 /*
1786  * cause a mmal component to be enabled
1787  */
vchiq_mmal_component_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1788 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1789 				 struct vchiq_mmal_component *component)
1790 {
1791 	int ret;
1792 
1793 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1794 		return -EINTR;
1795 
1796 	if (!component->enabled) {
1797 		mutex_unlock(&instance->vchiq_mutex);
1798 		return 0;
1799 	}
1800 
1801 	ret = disable_component(instance, component);
1802 	if (ret == 0)
1803 		component->enabled = false;
1804 
1805 	mutex_unlock(&instance->vchiq_mutex);
1806 
1807 	return ret;
1808 }
1809 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1810 
vchiq_mmal_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1811 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1812 		       u32 *major_out, u32 *minor_out)
1813 {
1814 	int ret;
1815 
1816 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1817 		return -EINTR;
1818 
1819 	ret = get_version(instance, major_out, minor_out);
1820 
1821 	mutex_unlock(&instance->vchiq_mutex);
1822 
1823 	return ret;
1824 }
1825 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1826 
vchiq_mmal_finalise(struct vchiq_mmal_instance * instance)1827 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1828 {
1829 	int status = 0;
1830 
1831 	if (!instance)
1832 		return -EINVAL;
1833 
1834 	if (mutex_lock_interruptible(&instance->vchiq_mutex))
1835 		return -EINTR;
1836 
1837 	vchiq_use_service(instance->vchiq_instance, instance->service_handle);
1838 
1839 	status = vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1840 	if (status != 0)
1841 		pr_err("mmal-vchiq: VCHIQ close failed\n");
1842 
1843 	mutex_unlock(&instance->vchiq_mutex);
1844 
1845 	vchiq_shutdown(instance->vchiq_instance);
1846 	destroy_workqueue(instance->bulk_wq);
1847 
1848 	idr_destroy(&instance->context_map);
1849 
1850 	kfree(instance);
1851 
1852 	return status;
1853 }
1854 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1855 
vchiq_mmal_init(struct device * dev,struct vchiq_mmal_instance ** out_instance)1856 int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance)
1857 {
1858 	int status;
1859 	int err = -ENODEV;
1860 	struct vchiq_mmal_instance *instance;
1861 	struct vchiq_instance *vchiq_instance;
1862 	struct vchiq_service_params_kernel params = {
1863 		.version		= VC_MMAL_VER,
1864 		.version_min		= VC_MMAL_MIN_VER,
1865 		.fourcc			= VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'),
1866 		.callback		= mmal_service_callback,
1867 		.userdata		= NULL,
1868 	};
1869 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(dev->parent);
1870 
1871 	/* compile time checks to ensure structure size as they are
1872 	 * directly (de)serialised from memory.
1873 	 */
1874 
1875 	/* ensure the header structure has packed to the correct size */
1876 	BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1877 
1878 	/* ensure message structure does not exceed maximum length */
1879 	BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1880 
1881 	/* mmal port struct is correct size */
1882 	BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1883 
1884 	/* create a vchi instance */
1885 	status = vchiq_initialise(&mgmt->state, &vchiq_instance);
1886 	if (status) {
1887 		pr_err("Failed to initialise VCHI instance (status=%d)\n",
1888 		       status);
1889 		return -EIO;
1890 	}
1891 
1892 	status = vchiq_connect(vchiq_instance);
1893 	if (status) {
1894 		pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1895 		err = -EIO;
1896 		goto err_shutdown_vchiq;
1897 	}
1898 
1899 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1900 
1901 	if (!instance) {
1902 		err = -ENOMEM;
1903 		goto err_shutdown_vchiq;
1904 	}
1905 
1906 	mutex_init(&instance->vchiq_mutex);
1907 
1908 	instance->vchiq_instance = vchiq_instance;
1909 
1910 	mutex_init(&instance->context_map_lock);
1911 	idr_init_base(&instance->context_map, 1);
1912 
1913 	params.userdata = instance;
1914 
1915 	instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1916 						    WQ_MEM_RECLAIM);
1917 	if (!instance->bulk_wq)
1918 		goto err_free;
1919 
1920 	status = vchiq_open_service(vchiq_instance, &params,
1921 				    &instance->service_handle);
1922 	if (status) {
1923 		pr_err("Failed to open VCHI service connection (status=%d)\n",
1924 		       status);
1925 		goto err_close_services;
1926 	}
1927 
1928 	vchiq_release_service(instance->vchiq_instance, instance->service_handle);
1929 
1930 	*out_instance = instance;
1931 
1932 	return 0;
1933 
1934 err_close_services:
1935 	vchiq_close_service(instance->vchiq_instance, instance->service_handle);
1936 	destroy_workqueue(instance->bulk_wq);
1937 err_free:
1938 	kfree(instance);
1939 err_shutdown_vchiq:
1940 	vchiq_shutdown(vchiq_instance);
1941 	return err;
1942 }
1943 EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1944 
1945 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1946 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1947 MODULE_LICENSE("GPL");
1948