1 /**
2  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions, and the following disclaimer,
9  *    without modification.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. The names of the above-listed copyright holders may not be used
14  *    to endorse or promote products derived from this software without
15  *    specific prior written permission.
16  *
17  * ALTERNATIVELY, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2, as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
22  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
25  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
28  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /* ---- Include Files ---------------------------------------------------- */
35 
36 #include "vchiq_core.h"
37 #include "vchiq_arm.h"
38 #include "vchiq_killable.h"
39 
40 /* ---- Public Variables ------------------------------------------------- */
41 
42 /* ---- Private Constants and Types -------------------------------------- */
43 
44 struct bulk_waiter_node {
45 	struct bulk_waiter bulk_waiter;
46 	int pid;
47 	struct list_head list;
48 };
49 
50 struct vchiq_instance_struct {
51 	VCHIQ_STATE_T *state;
52 
53 	int connected;
54 
55 	struct list_head bulk_waiter_list;
56 	struct mutex bulk_waiter_list_mutex;
57 };
58 
59 static VCHIQ_STATUS_T
60 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
61 	unsigned int size, VCHIQ_BULK_DIR_T dir);
62 
63 /****************************************************************************
64 *
65 *   vchiq_initialise
66 *
67 ***************************************************************************/
68 #define VCHIQ_INIT_RETRIES 10
69 VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instanceOut)
70 {
71 	VCHIQ_STATUS_T status = VCHIQ_ERROR;
72 	VCHIQ_STATE_T *state;
73 	VCHIQ_INSTANCE_T instance = NULL;
74         int i;
75 
76 	vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
77 
78         /* VideoCore may not be ready due to boot up timing.
79            It may never be ready if kernel and firmware are mismatched, so don't block forever. */
80         for (i=0; i<VCHIQ_INIT_RETRIES; i++) {
81 		state = vchiq_get_state();
82 		if (state)
83 			break;
84 		udelay(500);
85 	}
86 	if (i==VCHIQ_INIT_RETRIES) {
87 		vchiq_log_error(vchiq_core_log_level,
88 			"%s: videocore not initialized\n", __func__);
89 		goto failed;
90 	} else if (i>0) {
91 		vchiq_log_warning(vchiq_core_log_level,
92 			"%s: videocore initialized after %d retries\n", __func__, i);
93 	}
94 
95 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
96 	if (!instance) {
97 		vchiq_log_error(vchiq_core_log_level,
98 			"%s: error allocating vchiq instance\n", __func__);
99 		goto failed;
100 	}
101 
102 	instance->connected = 0;
103 	instance->state = state;
104 	lmutex_init(&instance->bulk_waiter_list_mutex);
105 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
106 
107 	*instanceOut = instance;
108 
109 	status = VCHIQ_SUCCESS;
110 
111 failed:
112 	vchiq_log_trace(vchiq_core_log_level,
113 		"%s(%p): returning %d", __func__, instance, status);
114 
115 	return status;
116 }
117 EXPORT_SYMBOL(vchiq_initialise);
118 
119 /****************************************************************************
120 *
121 *   vchiq_shutdown
122 *
123 ***************************************************************************/
124 
125 VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
126 {
127 	VCHIQ_STATUS_T status;
128 	VCHIQ_STATE_T *state = instance->state;
129 
130 	vchiq_log_trace(vchiq_core_log_level,
131 		"%s(%p) called", __func__, instance);
132 
133 	if (lmutex_lock_interruptible(&state->mutex) != 0)
134 		return VCHIQ_RETRY;
135 
136 	/* Remove all services */
137 	status = vchiq_shutdown_internal(state, instance);
138 
139 	lmutex_unlock(&state->mutex);
140 
141 	vchiq_log_trace(vchiq_core_log_level,
142 		"%s(%p): returning %d", __func__, instance, status);
143 
144 	if (status == VCHIQ_SUCCESS) {
145 		struct list_head *pos, *next;
146 		list_for_each_safe(pos, next,
147 				&instance->bulk_waiter_list) {
148 			struct bulk_waiter_node *waiter;
149 			waiter = list_entry(pos,
150 					struct bulk_waiter_node,
151 					list);
152 			list_del(pos);
153 			vchiq_log_info(vchiq_arm_log_level,
154 					"bulk_waiter - cleaned up %x "
155 					"for pid %d",
156 					(unsigned int)waiter, waiter->pid);
157 			_sema_destroy(&waiter->bulk_waiter.event);
158 
159 			kfree(waiter);
160 		}
161 
162 		lmutex_destroy(&instance->bulk_waiter_list_mutex);
163 
164 		kfree(instance);
165 	}
166 
167 	return status;
168 }
169 EXPORT_SYMBOL(vchiq_shutdown);
170 
171 /****************************************************************************
172 *
173 *   vchiq_is_connected
174 *
175 ***************************************************************************/
176 
177 static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
178 {
179 	return instance->connected;
180 }
181 
182 /****************************************************************************
183 *
184 *   vchiq_connect
185 *
186 ***************************************************************************/
187 
188 VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
189 {
190 	VCHIQ_STATUS_T status;
191 	VCHIQ_STATE_T *state = instance->state;
192 
193 	vchiq_log_trace(vchiq_core_log_level,
194 		"%s(%p) called", __func__, instance);
195 
196 	if (lmutex_lock_interruptible(&state->mutex) != 0) {
197 		vchiq_log_trace(vchiq_core_log_level,
198 			"%s: call to lmutex_lock failed", __func__);
199 		status = VCHIQ_RETRY;
200 		goto failed;
201 	}
202 	status = vchiq_connect_internal(state, instance);
203 
204 	if (status == VCHIQ_SUCCESS)
205 		instance->connected = 1;
206 
207 	lmutex_unlock(&state->mutex);
208 
209 failed:
210 	vchiq_log_trace(vchiq_core_log_level,
211 		"%s(%p): returning %d", __func__, instance, status);
212 
213 	return status;
214 }
215 EXPORT_SYMBOL(vchiq_connect);
216 
217 /****************************************************************************
218 *
219 *   vchiq_add_service
220 *
221 ***************************************************************************/
222 
223 VCHIQ_STATUS_T vchiq_add_service(
224 	VCHIQ_INSTANCE_T              instance,
225 	const VCHIQ_SERVICE_PARAMS_T *params,
226 	VCHIQ_SERVICE_HANDLE_T       *phandle)
227 {
228 	VCHIQ_STATUS_T status;
229 	VCHIQ_STATE_T *state = instance->state;
230 	VCHIQ_SERVICE_T *service = NULL;
231 	int srvstate;
232 
233 	vchiq_log_trace(vchiq_core_log_level,
234 		"%s(%p) called", __func__, instance);
235 
236 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
237 
238 	srvstate = vchiq_is_connected(instance)
239 		? VCHIQ_SRVSTATE_LISTENING
240 		: VCHIQ_SRVSTATE_HIDDEN;
241 
242 	service = vchiq_add_service_internal(
243 		state,
244 		params,
245 		srvstate,
246 		instance,
247 		NULL);
248 
249 	if (service) {
250 		*phandle = service->handle;
251 		status = VCHIQ_SUCCESS;
252 	} else
253 		status = VCHIQ_ERROR;
254 
255 	vchiq_log_trace(vchiq_core_log_level,
256 		"%s(%p): returning %d", __func__, instance, status);
257 
258 	return status;
259 }
260 EXPORT_SYMBOL(vchiq_add_service);
261 
262 /****************************************************************************
263 *
264 *   vchiq_open_service
265 *
266 ***************************************************************************/
267 
268 VCHIQ_STATUS_T vchiq_open_service(
269 	VCHIQ_INSTANCE_T              instance,
270 	const VCHIQ_SERVICE_PARAMS_T *params,
271 	VCHIQ_SERVICE_HANDLE_T       *phandle)
272 {
273 	VCHIQ_STATUS_T   status = VCHIQ_ERROR;
274 	VCHIQ_STATE_T   *state = instance->state;
275 	VCHIQ_SERVICE_T *service = NULL;
276 
277 	vchiq_log_trace(vchiq_core_log_level,
278 		"%s(%p) called", __func__, instance);
279 
280 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
281 
282 	if (!vchiq_is_connected(instance))
283 		goto failed;
284 
285 	service = vchiq_add_service_internal(state,
286 		params,
287 		VCHIQ_SRVSTATE_OPENING,
288 		instance,
289 		NULL);
290 
291 	if (service) {
292 		*phandle = service->handle;
293 		status = vchiq_open_service_internal(service,
294 		    (uintptr_t)current);
295 		if (status != VCHIQ_SUCCESS) {
296 			vchiq_remove_service(service->handle);
297 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
298 		}
299 	}
300 
301 failed:
302 	vchiq_log_trace(vchiq_core_log_level,
303 		"%s(%p): returning %d", __func__, instance, status);
304 
305 	return status;
306 }
307 EXPORT_SYMBOL(vchiq_open_service);
308 
309 VCHIQ_STATUS_T
310 vchiq_queue_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,
311 	void *data, unsigned int size, void *userdata)
312 {
313 	return vchiq_bulk_transfer(handle,
314 		VCHI_MEM_HANDLE_INVALID, data, size, userdata,
315 		VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_TRANSMIT);
316 }
317 EXPORT_SYMBOL(vchiq_queue_bulk_transmit);
318 
319 VCHIQ_STATUS_T
320 vchiq_queue_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
321 	unsigned int size, void *userdata)
322 {
323 	return vchiq_bulk_transfer(handle,
324 		VCHI_MEM_HANDLE_INVALID, data, size, userdata,
325 		VCHIQ_BULK_MODE_CALLBACK, VCHIQ_BULK_RECEIVE);
326 }
327 EXPORT_SYMBOL(vchiq_queue_bulk_receive);
328 
329 VCHIQ_STATUS_T
330 vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, void *data,
331 	unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
332 {
333 	VCHIQ_STATUS_T status;
334 
335 	switch (mode) {
336 	case VCHIQ_BULK_MODE_NOCALLBACK:
337 	case VCHIQ_BULK_MODE_CALLBACK:
338 		status = vchiq_bulk_transfer(handle,
339 			VCHI_MEM_HANDLE_INVALID, data, size, userdata,
340 			mode, VCHIQ_BULK_TRANSMIT);
341 		break;
342 	case VCHIQ_BULK_MODE_BLOCKING:
343 		status = vchiq_blocking_bulk_transfer(handle,
344 			data, size, VCHIQ_BULK_TRANSMIT);
345 		break;
346 	default:
347 		return VCHIQ_ERROR;
348 	}
349 
350 	return status;
351 }
352 EXPORT_SYMBOL(vchiq_bulk_transmit);
353 
354 VCHIQ_STATUS_T
355 vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
356 	unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
357 {
358 	VCHIQ_STATUS_T status;
359 
360 	switch (mode) {
361 	case VCHIQ_BULK_MODE_NOCALLBACK:
362 	case VCHIQ_BULK_MODE_CALLBACK:
363 		status = vchiq_bulk_transfer(handle,
364 			VCHI_MEM_HANDLE_INVALID, data, size, userdata,
365 			mode, VCHIQ_BULK_RECEIVE);
366 		break;
367 	case VCHIQ_BULK_MODE_BLOCKING:
368 		status = vchiq_blocking_bulk_transfer(handle,
369 			data, size, VCHIQ_BULK_RECEIVE);
370 		break;
371 	default:
372 		return VCHIQ_ERROR;
373 	}
374 
375 	return status;
376 }
377 EXPORT_SYMBOL(vchiq_bulk_receive);
378 
379 static VCHIQ_STATUS_T
380 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
381 	unsigned int size, VCHIQ_BULK_DIR_T dir)
382 {
383 	VCHIQ_INSTANCE_T instance;
384 	VCHIQ_SERVICE_T *service;
385 	VCHIQ_STATUS_T status;
386 	struct bulk_waiter_node *waiter = NULL;
387 	struct list_head *pos;
388 
389 	service = find_service_by_handle(handle);
390 	if (!service)
391 		return VCHIQ_ERROR;
392 
393 	instance = service->instance;
394 
395 	unlock_service(service);
396 
397 	lmutex_lock(&instance->bulk_waiter_list_mutex);
398 	list_for_each(pos, &instance->bulk_waiter_list) {
399 		if (list_entry(pos, struct bulk_waiter_node,
400 				list)->pid == current->p_pid) {
401 			waiter = list_entry(pos,
402 				struct bulk_waiter_node,
403 				list);
404 			list_del(pos);
405 			break;
406 		}
407 	}
408 	lmutex_unlock(&instance->bulk_waiter_list_mutex);
409 
410 	if (waiter) {
411 		VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
412 		if (bulk) {
413 			/* This thread has an outstanding bulk transfer. */
414 			if ((bulk->data != data) ||
415 				(bulk->size != size)) {
416 				/* This is not a retry of the previous one.
417 				** Cancel the signal when the transfer
418 				** completes. */
419 				spin_lock(&bulk_waiter_spinlock);
420 				bulk->userdata = NULL;
421 				spin_unlock(&bulk_waiter_spinlock);
422 			}
423 		}
424 	}
425 
426 	if (!waiter) {
427 		waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
428 		if (!waiter) {
429 			vchiq_log_error(vchiq_core_log_level,
430 				"%s - out of memory", __func__);
431 			return VCHIQ_ERROR;
432 		}
433 	}
434 
435 	status = vchiq_bulk_transfer(handle, VCHI_MEM_HANDLE_INVALID,
436 		data, size, &waiter->bulk_waiter, VCHIQ_BULK_MODE_BLOCKING,
437 		dir);
438 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
439 		!waiter->bulk_waiter.bulk) {
440 		VCHIQ_BULK_T *bulk = waiter->bulk_waiter.bulk;
441 		if (bulk) {
442 			/* Cancel the signal when the transfer
443 			 ** completes. */
444 			spin_lock(&bulk_waiter_spinlock);
445 			bulk->userdata = NULL;
446 			spin_unlock(&bulk_waiter_spinlock);
447 		}
448 		_sema_destroy(&waiter->bulk_waiter.event);
449 
450 		kfree(waiter);
451 	} else {
452 		waiter->pid = current->p_pid;
453 		lmutex_lock(&instance->bulk_waiter_list_mutex);
454 		list_add(&waiter->list, &instance->bulk_waiter_list);
455 		lmutex_unlock(&instance->bulk_waiter_list_mutex);
456 		vchiq_log_info(vchiq_arm_log_level,
457 				"saved bulk_waiter %x for pid %d",
458 				(unsigned int)waiter, current->p_pid);
459 	}
460 
461 	return status;
462 }
463