1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/device/bus.h>
16 #include <linux/mm.h>
17 #include <linux/highmem.h>
18 #include <linux/pagemap.h>
19 #include <linux/bug.h>
20 #include <linux/completion.h>
21 #include <linux/list.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/compat.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/rcupdate.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/io.h>
31 #include <linux/uaccess.h>
32 #include <soc/bcm2835/raspberrypi-firmware.h>
33 
34 #include "vchiq_core.h"
35 #include "vchiq_ioctl.h"
36 #include "vchiq_arm.h"
37 #include "vchiq_bus.h"
38 #include "vchiq_debugfs.h"
39 #include "vchiq_pagelist.h"
40 
41 #define DEVICE_NAME "vchiq"
42 
43 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
44 
45 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
46 
47 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
48 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
49 
50 #define BELL0	0x00
51 #define BELL2	0x08
52 
53 #define ARM_DS_ACTIVE	BIT(2)
54 
55 /* Override the default prefix, which would be vchiq_arm (from the filename) */
56 #undef MODULE_PARAM_PREFIX
57 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
58 
59 #define KEEPALIVE_VER 1
60 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
61 
62 /*
63  * The devices implemented in the VCHIQ firmware are not discoverable,
64  * so we need to maintain a list of them in order to register them with
65  * the interface.
66  */
67 static struct vchiq_device *bcm2835_audio;
68 static struct vchiq_device *bcm2835_camera;
69 
70 static const struct vchiq_platform_info bcm2835_info = {
71 	.cache_line_size = 32,
72 };
73 
74 static const struct vchiq_platform_info bcm2836_info = {
75 	.cache_line_size = 64,
76 };
77 
78 struct vchiq_arm_state {
79 	/* Keepalive-related data */
80 	struct task_struct *ka_thread;
81 	struct completion ka_evt;
82 	atomic_t ka_use_count;
83 	atomic_t ka_use_ack_count;
84 	atomic_t ka_release_count;
85 
86 	rwlock_t susp_res_lock;
87 
88 	struct vchiq_state *state;
89 
90 	/*
91 	 * Global use count for videocore.
92 	 * This is equal to the sum of the use counts for all services.  When
93 	 * this hits zero the videocore suspend procedure will be initiated.
94 	 */
95 	int videocore_use_count;
96 
97 	/*
98 	 * Use count to track requests from videocore peer.
99 	 * This use count is not associated with a service, so needs to be
100 	 * tracked separately with the state.
101 	 */
102 	int peer_use_count;
103 
104 	/*
105 	 * Flag to indicate that the first vchiq connect has made it through.
106 	 * This means that both sides should be fully ready, and we should
107 	 * be able to suspend after this point.
108 	 */
109 	int first_connect;
110 };
111 
112 struct vchiq_pagelist_info {
113 	struct pagelist *pagelist;
114 	size_t pagelist_buffer_size;
115 	dma_addr_t dma_addr;
116 	enum dma_data_direction dma_dir;
117 	unsigned int num_pages;
118 	unsigned int pages_need_release;
119 	struct page **pages;
120 	struct scatterlist *scatterlist;
121 	unsigned int scatterlist_mapped;
122 };
123 
124 static int
125 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
126 			     unsigned int size, enum vchiq_bulk_dir dir);
127 
128 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)129 vchiq_doorbell_irq(int irq, void *dev_id)
130 {
131 	struct vchiq_state *state = dev_id;
132 	struct vchiq_drv_mgmt *mgmt;
133 	irqreturn_t ret = IRQ_NONE;
134 	unsigned int status;
135 
136 	mgmt = dev_get_drvdata(state->dev);
137 
138 	/* Read (and clear) the doorbell */
139 	status = readl(mgmt->regs + BELL0);
140 
141 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
142 		remote_event_pollall(state);
143 		ret = IRQ_HANDLED;
144 	}
145 
146 	return ret;
147 }
148 
149 static void
cleanup_pagelistinfo(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo)150 cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo)
151 {
152 	if (pagelistinfo->scatterlist_mapped) {
153 		dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
154 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
155 	}
156 
157 	if (pagelistinfo->pages_need_release)
158 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
159 
160 	dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size,
161 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
162 }
163 
164 static inline bool
is_adjacent_block(u32 * addrs,dma_addr_t addr,unsigned int k)165 is_adjacent_block(u32 *addrs, dma_addr_t addr, unsigned int k)
166 {
167 	u32 tmp;
168 
169 	if (!k)
170 		return false;
171 
172 	tmp = (addrs[k - 1] & PAGE_MASK) +
173 	      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT);
174 
175 	return tmp == (addr & PAGE_MASK);
176 }
177 
178 /*
179  * This function is called by the vchiq stack once it has been connected to
180  * the videocore and clients can start to use the stack.
181  */
vchiq_call_connected_callbacks(struct vchiq_drv_mgmt * drv_mgmt)182 static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt)
183 {
184 	int i;
185 
186 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
187 		return;
188 
189 	for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++)
190 		drv_mgmt->deferred_callback[i]();
191 
192 	drv_mgmt->num_deferred_callbacks = 0;
193 	drv_mgmt->connected = true;
194 	mutex_unlock(&drv_mgmt->connected_mutex);
195 }
196 
197 /*
198  * This function is used to defer initialization until the vchiq stack is
199  * initialized. If the stack is already initialized, then the callback will
200  * be made immediately, otherwise it will be deferred until
201  * vchiq_call_connected_callbacks is called.
202  */
vchiq_add_connected_callback(struct vchiq_device * device,void (* callback)(void))203 void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void))
204 {
205 	struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt;
206 
207 	if (mutex_lock_killable(&drv_mgmt->connected_mutex))
208 		return;
209 
210 	if (drv_mgmt->connected) {
211 		/* We're already connected. Call the callback immediately. */
212 		callback();
213 	} else {
214 		if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) {
215 			dev_err(&device->dev,
216 				"core: deferred callbacks(%d) exceeded the maximum limit(%d)\n",
217 				drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS);
218 		} else {
219 			drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] =
220 				callback;
221 			drv_mgmt->num_deferred_callbacks++;
222 		}
223 	}
224 	mutex_unlock(&drv_mgmt->connected_mutex);
225 }
226 EXPORT_SYMBOL(vchiq_add_connected_callback);
227 
228 /* There is a potential problem with partial cache lines (pages?)
229  * at the ends of the block when reading. If the CPU accessed anything in
230  * the same line (page?) then it may have pulled old data into the cache,
231  * obscuring the new data underneath. We can solve this by transferring the
232  * partial cache lines separately, and allowing the ARM to copy into the
233  * cached area.
234  */
235 
236 static struct vchiq_pagelist_info *
create_pagelist(struct vchiq_instance * instance,char * buf,char __user * ubuf,size_t count,unsigned short type)237 create_pagelist(struct vchiq_instance *instance, char *buf, char __user *ubuf,
238 		size_t count, unsigned short type)
239 {
240 	struct vchiq_drv_mgmt *drv_mgmt;
241 	struct pagelist *pagelist;
242 	struct vchiq_pagelist_info *pagelistinfo;
243 	struct page **pages;
244 	u32 *addrs;
245 	unsigned int num_pages, offset, i, k;
246 	int actual_pages;
247 	size_t pagelist_size;
248 	struct scatterlist *scatterlist, *sg;
249 	int dma_buffers;
250 	dma_addr_t dma_addr;
251 
252 	if (count >= INT_MAX - PAGE_SIZE)
253 		return NULL;
254 
255 	drv_mgmt = dev_get_drvdata(instance->state->dev);
256 
257 	if (buf)
258 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
259 	else
260 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
261 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
262 
263 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
264 			 sizeof(struct vchiq_pagelist_info)) /
265 			(sizeof(u32) + sizeof(pages[0]) +
266 			 sizeof(struct scatterlist)))
267 		return NULL;
268 
269 	pagelist_size = sizeof(struct pagelist) +
270 			(num_pages * sizeof(u32)) +
271 			(num_pages * sizeof(pages[0]) +
272 			(num_pages * sizeof(struct scatterlist))) +
273 			sizeof(struct vchiq_pagelist_info);
274 
275 	/* Allocate enough storage to hold the page pointers and the page
276 	 * list
277 	 */
278 	pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr,
279 				      GFP_KERNEL);
280 
281 	dev_dbg(instance->state->dev, "arm: %pK\n", pagelist);
282 
283 	if (!pagelist)
284 		return NULL;
285 
286 	addrs		= pagelist->addrs;
287 	pages		= (struct page **)(addrs + num_pages);
288 	scatterlist	= (struct scatterlist *)(pages + num_pages);
289 	pagelistinfo	= (struct vchiq_pagelist_info *)
290 			  (scatterlist + num_pages);
291 
292 	pagelist->length = count;
293 	pagelist->type = type;
294 	pagelist->offset = offset;
295 
296 	/* Populate the fields of the pagelistinfo structure */
297 	pagelistinfo->pagelist = pagelist;
298 	pagelistinfo->pagelist_buffer_size = pagelist_size;
299 	pagelistinfo->dma_addr = dma_addr;
300 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
301 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
302 	pagelistinfo->num_pages = num_pages;
303 	pagelistinfo->pages_need_release = 0;
304 	pagelistinfo->pages = pages;
305 	pagelistinfo->scatterlist = scatterlist;
306 	pagelistinfo->scatterlist_mapped = 0;
307 
308 	if (buf) {
309 		unsigned long length = count;
310 		unsigned int off = offset;
311 
312 		for (actual_pages = 0; actual_pages < num_pages;
313 		     actual_pages++) {
314 			struct page *pg =
315 				vmalloc_to_page((buf +
316 						 (actual_pages * PAGE_SIZE)));
317 			size_t bytes = PAGE_SIZE - off;
318 
319 			if (!pg) {
320 				cleanup_pagelistinfo(instance, pagelistinfo);
321 				return NULL;
322 			}
323 
324 			if (bytes > length)
325 				bytes = length;
326 			pages[actual_pages] = pg;
327 			length -= bytes;
328 			off = 0;
329 		}
330 		/* do not try and release vmalloc pages */
331 	} else {
332 		actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
333 						   type == PAGELIST_READ, pages);
334 
335 		if (actual_pages != num_pages) {
336 			dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n",
337 				actual_pages, num_pages);
338 
339 			/* This is probably due to the process being killed */
340 			if (actual_pages > 0)
341 				unpin_user_pages(pages, actual_pages);
342 			cleanup_pagelistinfo(instance, pagelistinfo);
343 			return NULL;
344 		}
345 		 /* release user pages */
346 		pagelistinfo->pages_need_release = 1;
347 	}
348 
349 	/*
350 	 * Initialize the scatterlist so that the magic cookie
351 	 *  is filled if debugging is enabled
352 	 */
353 	sg_init_table(scatterlist, num_pages);
354 	/* Now set the pages for each scatterlist */
355 	for (i = 0; i < num_pages; i++)	{
356 		unsigned int len = PAGE_SIZE - offset;
357 
358 		if (len > count)
359 			len = count;
360 		sg_set_page(scatterlist + i, pages[i], len, offset);
361 		offset = 0;
362 		count -= len;
363 	}
364 
365 	dma_buffers = dma_map_sg(instance->state->dev,
366 				 scatterlist,
367 				 num_pages,
368 				 pagelistinfo->dma_dir);
369 
370 	if (dma_buffers == 0) {
371 		cleanup_pagelistinfo(instance, pagelistinfo);
372 		return NULL;
373 	}
374 
375 	pagelistinfo->scatterlist_mapped = 1;
376 
377 	/* Combine adjacent blocks for performance */
378 	k = 0;
379 	for_each_sg(scatterlist, sg, dma_buffers, i) {
380 		unsigned int len = sg_dma_len(sg);
381 		dma_addr_t addr = sg_dma_address(sg);
382 
383 		/* Note: addrs is the address + page_count - 1
384 		 * The firmware expects blocks after the first to be page-
385 		 * aligned and a multiple of the page size
386 		 */
387 		WARN_ON(len == 0);
388 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
389 		WARN_ON(i && (addr & ~PAGE_MASK));
390 		if (is_adjacent_block(addrs, addr, k))
391 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
392 		else
393 			addrs[k++] = (addr & PAGE_MASK) |
394 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
395 	}
396 
397 	/* Partial cache lines (fragments) require special measures */
398 	if ((type == PAGELIST_READ) &&
399 	    ((pagelist->offset & (drv_mgmt->info->cache_line_size - 1)) ||
400 	    ((pagelist->offset + pagelist->length) &
401 	    (drv_mgmt->info->cache_line_size - 1)))) {
402 		char *fragments;
403 
404 		if (down_interruptible(&drv_mgmt->free_fragments_sema)) {
405 			cleanup_pagelistinfo(instance, pagelistinfo);
406 			return NULL;
407 		}
408 
409 		WARN_ON(!drv_mgmt->free_fragments);
410 
411 		down(&drv_mgmt->free_fragments_mutex);
412 		fragments = drv_mgmt->free_fragments;
413 		WARN_ON(!fragments);
414 		drv_mgmt->free_fragments = *(char **)drv_mgmt->free_fragments;
415 		up(&drv_mgmt->free_fragments_mutex);
416 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
417 			(fragments - drv_mgmt->fragments_base) / drv_mgmt->fragments_size;
418 	}
419 
420 	return pagelistinfo;
421 }
422 
423 static void
free_pagelist(struct vchiq_instance * instance,struct vchiq_pagelist_info * pagelistinfo,int actual)424 free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo,
425 	      int actual)
426 {
427 	struct vchiq_drv_mgmt *drv_mgmt;
428 	struct pagelist *pagelist = pagelistinfo->pagelist;
429 	struct page **pages = pagelistinfo->pages;
430 	unsigned int num_pages = pagelistinfo->num_pages;
431 
432 	dev_dbg(instance->state->dev, "arm: %pK, %d\n", pagelistinfo->pagelist, actual);
433 
434 	drv_mgmt = dev_get_drvdata(instance->state->dev);
435 
436 	/*
437 	 * NOTE: dma_unmap_sg must be called before the
438 	 * cpu can touch any of the data/pages.
439 	 */
440 	dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist,
441 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
442 	pagelistinfo->scatterlist_mapped = 0;
443 
444 	/* Deal with any partial cache lines (fragments) */
445 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && drv_mgmt->fragments_base) {
446 		char *fragments = drv_mgmt->fragments_base +
447 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
448 			drv_mgmt->fragments_size;
449 		int head_bytes, tail_bytes;
450 
451 		head_bytes = (drv_mgmt->info->cache_line_size - pagelist->offset) &
452 			(drv_mgmt->info->cache_line_size - 1);
453 		tail_bytes = (pagelist->offset + actual) &
454 			(drv_mgmt->info->cache_line_size - 1);
455 
456 		if ((actual >= 0) && (head_bytes != 0)) {
457 			if (head_bytes > actual)
458 				head_bytes = actual;
459 
460 			memcpy_to_page(pages[0],
461 				pagelist->offset,
462 				fragments,
463 				head_bytes);
464 		}
465 		if ((actual >= 0) && (head_bytes < actual) &&
466 		    (tail_bytes != 0))
467 			memcpy_to_page(pages[num_pages - 1],
468 				(pagelist->offset + actual) &
469 				(PAGE_SIZE - 1) & ~(drv_mgmt->info->cache_line_size - 1),
470 				fragments + drv_mgmt->info->cache_line_size,
471 				tail_bytes);
472 
473 		down(&drv_mgmt->free_fragments_mutex);
474 		*(char **)fragments = drv_mgmt->free_fragments;
475 		drv_mgmt->free_fragments = fragments;
476 		up(&drv_mgmt->free_fragments_mutex);
477 		up(&drv_mgmt->free_fragments_sema);
478 	}
479 
480 	/* Need to mark all the pages dirty. */
481 	if (pagelist->type != PAGELIST_WRITE &&
482 	    pagelistinfo->pages_need_release) {
483 		unsigned int i;
484 
485 		for (i = 0; i < num_pages; i++)
486 			set_page_dirty(pages[i]);
487 	}
488 
489 	cleanup_pagelistinfo(instance, pagelistinfo);
490 }
491 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)492 static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
493 {
494 	struct device *dev = &pdev->dev;
495 	struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev);
496 	struct rpi_firmware *fw = drv_mgmt->fw;
497 	struct vchiq_slot_zero *vchiq_slot_zero;
498 	void *slot_mem;
499 	dma_addr_t slot_phys;
500 	u32 channelbase;
501 	int slot_mem_size, frag_mem_size;
502 	int err, irq, i;
503 
504 	/*
505 	 * VCHI messages between the CPU and firmware use
506 	 * 32-bit bus addresses.
507 	 */
508 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
509 
510 	if (err < 0)
511 		return err;
512 
513 	drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size;
514 
515 	/* Allocate space for the channels in coherent memory */
516 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
517 	frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS);
518 
519 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
520 				       &slot_phys, GFP_KERNEL);
521 	if (!slot_mem) {
522 		dev_err(dev, "could not allocate DMA memory\n");
523 		return -ENOMEM;
524 	}
525 
526 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
527 
528 	vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size);
529 	if (!vchiq_slot_zero)
530 		return -ENOMEM;
531 
532 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
533 		(int)slot_phys + slot_mem_size;
534 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
535 		MAX_FRAGMENTS;
536 
537 	drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size;
538 
539 	drv_mgmt->free_fragments = drv_mgmt->fragments_base;
540 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
541 		*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] =
542 			&drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size];
543 	}
544 	*(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL;
545 	sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS);
546 	sema_init(&drv_mgmt->free_fragments_mutex, 1);
547 
548 	err = vchiq_init_state(state, vchiq_slot_zero, dev);
549 	if (err)
550 		return err;
551 
552 	drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0);
553 	if (IS_ERR(drv_mgmt->regs))
554 		return PTR_ERR(drv_mgmt->regs);
555 
556 	irq = platform_get_irq(pdev, 0);
557 	if (irq <= 0)
558 		return irq;
559 
560 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
561 			       "VCHIQ doorbell", state);
562 	if (err) {
563 		dev_err(dev, "failed to register irq=%d\n", irq);
564 		return err;
565 	}
566 
567 	/* Send the base address of the slots to VideoCore */
568 	channelbase = slot_phys;
569 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
570 				    &channelbase, sizeof(channelbase));
571 	if (err) {
572 		dev_err(dev, "failed to send firmware property: %d\n", err);
573 		return err;
574 	}
575 
576 	if (channelbase) {
577 		dev_err(dev, "failed to set channelbase (response: %x)\n",
578 			channelbase);
579 		return -ENXIO;
580 	}
581 
582 	dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %pK, phys %pad)\n",
583 		vchiq_slot_zero, &slot_phys);
584 
585 	mutex_init(&drv_mgmt->connected_mutex);
586 	vchiq_call_connected_callbacks(drv_mgmt);
587 
588 	return 0;
589 }
590 
591 int
vchiq_platform_init_state(struct vchiq_state * state)592 vchiq_platform_init_state(struct vchiq_state *state)
593 {
594 	struct vchiq_arm_state *platform_state;
595 
596 	platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
597 	if (!platform_state)
598 		return -ENOMEM;
599 
600 	rwlock_init(&platform_state->susp_res_lock);
601 
602 	init_completion(&platform_state->ka_evt);
603 	atomic_set(&platform_state->ka_use_count, 0);
604 	atomic_set(&platform_state->ka_use_ack_count, 0);
605 	atomic_set(&platform_state->ka_release_count, 0);
606 
607 	platform_state->state = state;
608 
609 	state->platform_state = (struct opaque_platform_state *)platform_state;
610 
611 	return 0;
612 }
613 
vchiq_platform_get_arm_state(struct vchiq_state * state)614 static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state)
615 {
616 	return (struct vchiq_arm_state *)state->platform_state;
617 }
618 
619 void
remote_event_signal(struct vchiq_state * state,struct remote_event * event)620 remote_event_signal(struct vchiq_state *state, struct remote_event *event)
621 {
622 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(state->dev);
623 
624 	/*
625 	 * Ensure that all writes to shared data structures have completed
626 	 * before signalling the peer.
627 	 */
628 	wmb();
629 
630 	event->fired = 1;
631 
632 	dsb(sy);         /* data barrier operation */
633 
634 	if (event->armed)
635 		writel(0, mgmt->regs + BELL2); /* trigger vc interrupt */
636 }
637 
638 int
vchiq_prepare_bulk_data(struct vchiq_instance * instance,struct vchiq_bulk * bulk,void * offset,void __user * uoffset,int size,int dir)639 vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk, void *offset,
640 			void __user *uoffset, int size, int dir)
641 {
642 	struct vchiq_pagelist_info *pagelistinfo;
643 
644 	pagelistinfo = create_pagelist(instance, offset, uoffset, size,
645 				       (dir == VCHIQ_BULK_RECEIVE)
646 				       ? PAGELIST_READ
647 				       : PAGELIST_WRITE);
648 
649 	if (!pagelistinfo)
650 		return -ENOMEM;
651 
652 	bulk->data = pagelistinfo->dma_addr;
653 
654 	/*
655 	 * Store the pagelistinfo address in remote_data,
656 	 * which isn't used by the slave.
657 	 */
658 	bulk->remote_data = pagelistinfo;
659 
660 	return 0;
661 }
662 
663 void
vchiq_complete_bulk(struct vchiq_instance * instance,struct vchiq_bulk * bulk)664 vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk)
665 {
666 	if (bulk && bulk->remote_data && bulk->actual)
667 		free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data,
668 			      bulk->actual);
669 }
670 
vchiq_dump_platform_state(struct seq_file * f)671 void vchiq_dump_platform_state(struct seq_file *f)
672 {
673 	seq_puts(f, "  Platform: 2835 (VC master)\n");
674 }
675 
676 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_state * state,struct vchiq_instance ** instance_out)677 int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out)
678 {
679 	struct vchiq_instance *instance = NULL;
680 	int i, ret;
681 
682 	/*
683 	 * VideoCore may not be ready due to boot up timing.
684 	 * It may never be ready if kernel and firmware are mismatched,so don't
685 	 * block forever.
686 	 */
687 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
688 		if (vchiq_remote_initialised(state))
689 			break;
690 		usleep_range(500, 600);
691 	}
692 	if (i == VCHIQ_INIT_RETRIES) {
693 		dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__);
694 		ret = -ENOTCONN;
695 		goto failed;
696 	} else if (i > 0) {
697 		dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n",
698 			 __func__, i);
699 	}
700 
701 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
702 	if (!instance) {
703 		ret = -ENOMEM;
704 		goto failed;
705 	}
706 
707 	instance->connected = 0;
708 	instance->state = state;
709 	mutex_init(&instance->bulk_waiter_list_mutex);
710 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
711 
712 	*instance_out = instance;
713 
714 	ret = 0;
715 
716 failed:
717 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
718 
719 	return ret;
720 }
721 EXPORT_SYMBOL(vchiq_initialise);
722 
free_bulk_waiter(struct vchiq_instance * instance)723 void free_bulk_waiter(struct vchiq_instance *instance)
724 {
725 	struct bulk_waiter_node *waiter, *next;
726 
727 	list_for_each_entry_safe(waiter, next,
728 				 &instance->bulk_waiter_list, list) {
729 		list_del(&waiter->list);
730 		dev_dbg(instance->state->dev,
731 			"arm: bulk_waiter - cleaned up %pK for pid %d\n",
732 			waiter, waiter->pid);
733 		kfree(waiter);
734 	}
735 }
736 
vchiq_shutdown(struct vchiq_instance * instance)737 int vchiq_shutdown(struct vchiq_instance *instance)
738 {
739 	struct vchiq_state *state = instance->state;
740 	int ret = 0;
741 
742 	if (mutex_lock_killable(&state->mutex))
743 		return -EAGAIN;
744 
745 	/* Remove all services */
746 	vchiq_shutdown_internal(state, instance);
747 
748 	mutex_unlock(&state->mutex);
749 
750 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
751 
752 	free_bulk_waiter(instance);
753 	kfree(instance);
754 
755 	return ret;
756 }
757 EXPORT_SYMBOL(vchiq_shutdown);
758 
vchiq_is_connected(struct vchiq_instance * instance)759 static int vchiq_is_connected(struct vchiq_instance *instance)
760 {
761 	return instance->connected;
762 }
763 
vchiq_connect(struct vchiq_instance * instance)764 int vchiq_connect(struct vchiq_instance *instance)
765 {
766 	struct vchiq_state *state = instance->state;
767 	int ret;
768 
769 	if (mutex_lock_killable(&state->mutex)) {
770 		dev_dbg(state->dev,
771 			"core: call to mutex_lock failed\n");
772 		ret = -EAGAIN;
773 		goto failed;
774 	}
775 	ret = vchiq_connect_internal(state, instance);
776 
777 	if (!ret)
778 		instance->connected = 1;
779 
780 	mutex_unlock(&state->mutex);
781 
782 failed:
783 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
784 
785 	return ret;
786 }
787 EXPORT_SYMBOL(vchiq_connect);
788 
789 static int
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)790 vchiq_add_service(struct vchiq_instance *instance,
791 		  const struct vchiq_service_params_kernel *params,
792 		  unsigned int *phandle)
793 {
794 	struct vchiq_state *state = instance->state;
795 	struct vchiq_service *service = NULL;
796 	int srvstate, ret;
797 
798 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
799 
800 	srvstate = vchiq_is_connected(instance)
801 		? VCHIQ_SRVSTATE_LISTENING
802 		: VCHIQ_SRVSTATE_HIDDEN;
803 
804 	service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
805 
806 	if (service) {
807 		*phandle = service->handle;
808 		ret = 0;
809 	} else {
810 		ret = -EINVAL;
811 	}
812 
813 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
814 
815 	return ret;
816 }
817 
818 int
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)819 vchiq_open_service(struct vchiq_instance *instance,
820 		   const struct vchiq_service_params_kernel *params,
821 		   unsigned int *phandle)
822 {
823 	struct vchiq_state   *state = instance->state;
824 	struct vchiq_service *service = NULL;
825 	int ret = -EINVAL;
826 
827 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
828 
829 	if (!vchiq_is_connected(instance))
830 		goto failed;
831 
832 	service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
833 
834 	if (service) {
835 		*phandle = service->handle;
836 		ret = vchiq_open_service_internal(service, current->pid);
837 		if (ret) {
838 			vchiq_remove_service(instance, service->handle);
839 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
840 		}
841 	}
842 
843 failed:
844 	dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret);
845 
846 	return ret;
847 }
848 EXPORT_SYMBOL(vchiq_open_service);
849 
850 int
vchiq_bulk_transmit(struct vchiq_instance * instance,unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)851 vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
852 		    unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
853 {
854 	int ret;
855 
856 	while (1) {
857 		switch (mode) {
858 		case VCHIQ_BULK_MODE_NOCALLBACK:
859 		case VCHIQ_BULK_MODE_CALLBACK:
860 			ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
861 								     (void *)data, NULL,
862 								     size, mode, userdata,
863 								     VCHIQ_BULK_TRANSMIT);
864 			break;
865 		case VCHIQ_BULK_MODE_BLOCKING:
866 			ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
867 							   VCHIQ_BULK_TRANSMIT);
868 			break;
869 		default:
870 			return -EINVAL;
871 		}
872 
873 		/*
874 		 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
875 		 * to implement a retry mechanism since this function is
876 		 * supposed to block until queued
877 		 */
878 		if (ret != -EAGAIN)
879 			break;
880 
881 		msleep(1);
882 	}
883 
884 	return ret;
885 }
886 EXPORT_SYMBOL(vchiq_bulk_transmit);
887 
vchiq_bulk_receive(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)888 int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
889 		       void *data, unsigned int size, void *userdata,
890 		       enum vchiq_bulk_mode mode)
891 {
892 	int ret;
893 
894 	while (1) {
895 		switch (mode) {
896 		case VCHIQ_BULK_MODE_NOCALLBACK:
897 		case VCHIQ_BULK_MODE_CALLBACK:
898 			ret = vchiq_bulk_xfer_callback_interruptible(instance, handle,
899 								     (void *)data, NULL,
900 								     size, mode, userdata,
901 								     VCHIQ_BULK_RECEIVE);
902 			break;
903 		case VCHIQ_BULK_MODE_BLOCKING:
904 			ret = vchiq_blocking_bulk_transfer(instance, handle, (void *)data, size,
905 							   VCHIQ_BULK_RECEIVE);
906 			break;
907 		default:
908 			return -EINVAL;
909 		}
910 
911 		/*
912 		 * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
913 		 * to implement a retry mechanism since this function is
914 		 * supposed to block until queued
915 		 */
916 		if (ret != -EAGAIN)
917 			break;
918 
919 		msleep(1);
920 	}
921 
922 	return ret;
923 }
924 EXPORT_SYMBOL(vchiq_bulk_receive);
925 
926 static int
vchiq_blocking_bulk_transfer(struct vchiq_instance * instance,unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)927 vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
928 			     unsigned int size, enum vchiq_bulk_dir dir)
929 {
930 	struct vchiq_service *service;
931 	struct bulk_waiter_node *waiter = NULL, *iter;
932 	int ret;
933 
934 	service = find_service_by_handle(instance, handle);
935 	if (!service)
936 		return -EINVAL;
937 
938 	vchiq_service_put(service);
939 
940 	mutex_lock(&instance->bulk_waiter_list_mutex);
941 	list_for_each_entry(iter, &instance->bulk_waiter_list, list) {
942 		if (iter->pid == current->pid) {
943 			list_del(&iter->list);
944 			waiter = iter;
945 			break;
946 		}
947 	}
948 	mutex_unlock(&instance->bulk_waiter_list_mutex);
949 
950 	if (waiter) {
951 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
952 
953 		if (bulk) {
954 			/* This thread has an outstanding bulk transfer. */
955 			/* FIXME: why compare a dma address to a pointer? */
956 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
957 				/*
958 				 * This is not a retry of the previous one.
959 				 * Cancel the signal when the transfer completes.
960 				 */
961 				spin_lock(&service->state->bulk_waiter_spinlock);
962 				bulk->userdata = NULL;
963 				spin_unlock(&service->state->bulk_waiter_spinlock);
964 			}
965 		}
966 	} else {
967 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
968 		if (!waiter)
969 			return -ENOMEM;
970 	}
971 
972 	ret = vchiq_bulk_xfer_blocking_interruptible(instance, handle, data, NULL, size,
973 						     &waiter->bulk_waiter, dir);
974 	if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
975 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
976 
977 		if (bulk) {
978 			/* Cancel the signal when the transfer completes. */
979 			spin_lock(&service->state->bulk_waiter_spinlock);
980 			bulk->userdata = NULL;
981 			spin_unlock(&service->state->bulk_waiter_spinlock);
982 		}
983 		kfree(waiter);
984 	} else {
985 		waiter->pid = current->pid;
986 		mutex_lock(&instance->bulk_waiter_list_mutex);
987 		list_add(&waiter->list, &instance->bulk_waiter_list);
988 		mutex_unlock(&instance->bulk_waiter_list_mutex);
989 		dev_dbg(instance->state->dev, "arm: saved bulk_waiter %pK for pid %d\n",
990 			waiter, current->pid);
991 	}
992 
993 	return ret;
994 }
995 
996 static int
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)997 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
998 	       struct vchiq_header *header, struct user_service *user_service,
999 	       void *bulk_userdata)
1000 {
1001 	struct vchiq_completion_data_kernel *completion;
1002 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
1003 	int insert;
1004 
1005 	DEBUG_INITIALISE(mgmt->state.local);
1006 
1007 	insert = instance->completion_insert;
1008 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
1009 		/* Out of space - wait for the client */
1010 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1011 		dev_dbg(instance->state->dev, "core: completion queue full\n");
1012 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
1013 		if (wait_for_completion_interruptible(&instance->remove_event)) {
1014 			dev_dbg(instance->state->dev, "arm: service_callback interrupted\n");
1015 			return -EAGAIN;
1016 		} else if (instance->closing) {
1017 			dev_dbg(instance->state->dev, "arm: service_callback closing\n");
1018 			return 0;
1019 		}
1020 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1021 	}
1022 
1023 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1024 
1025 	completion->header = header;
1026 	completion->reason = reason;
1027 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1028 	completion->service_userdata = user_service->service;
1029 	completion->bulk_userdata = bulk_userdata;
1030 
1031 	if (reason == VCHIQ_SERVICE_CLOSED) {
1032 		/*
1033 		 * Take an extra reference, to be held until
1034 		 * this CLOSED notification is delivered.
1035 		 */
1036 		vchiq_service_get(user_service->service);
1037 		if (instance->use_close_delivered)
1038 			user_service->close_pending = 1;
1039 	}
1040 
1041 	/*
1042 	 * A write barrier is needed here to ensure that the entire completion
1043 	 * record is written out before the insert point.
1044 	 */
1045 	wmb();
1046 
1047 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1048 		user_service->message_available_pos = insert;
1049 
1050 	insert++;
1051 	instance->completion_insert = insert;
1052 
1053 	complete(&instance->insert_event);
1054 
1055 	return 0;
1056 }
1057 
1058 static int
service_single_message(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_service * service,void * bulk_userdata)1059 service_single_message(struct vchiq_instance *instance,
1060 		       enum vchiq_reason reason,
1061 		       struct vchiq_service *service, void *bulk_userdata)
1062 {
1063 	struct user_service *user_service;
1064 
1065 	user_service = (struct user_service *)service->base.userdata;
1066 
1067 	dev_dbg(service->state->dev, "arm: msg queue full\n");
1068 	/*
1069 	 * If there is no MESSAGE_AVAILABLE in the completion
1070 	 * queue, add one
1071 	 */
1072 	if ((user_service->message_available_pos -
1073 	     instance->completion_remove) < 0) {
1074 		int ret;
1075 
1076 		dev_dbg(instance->state->dev,
1077 			"arm: Inserting extra MESSAGE_AVAILABLE\n");
1078 		ret = add_completion(instance, reason, NULL, user_service,
1079 				     bulk_userdata);
1080 		if (ret)
1081 			return ret;
1082 	}
1083 
1084 	if (wait_for_completion_interruptible(&user_service->remove_event)) {
1085 		dev_dbg(instance->state->dev, "arm: interrupted\n");
1086 		return -EAGAIN;
1087 	} else if (instance->closing) {
1088 		dev_dbg(instance->state->dev, "arm: closing\n");
1089 		return -EINVAL;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 int
service_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)1096 service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
1097 		 struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
1098 {
1099 	/*
1100 	 * How do we ensure the callback goes to the right client?
1101 	 * The service_user data points to a user_service record
1102 	 * containing the original callback and the user state structure, which
1103 	 * contains a circular buffer for completion records.
1104 	 */
1105 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev);
1106 	struct user_service *user_service;
1107 	struct vchiq_service *service;
1108 	bool skip_completion = false;
1109 
1110 	DEBUG_INITIALISE(mgmt->state.local);
1111 
1112 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1113 
1114 	rcu_read_lock();
1115 	service = handle_to_service(instance, handle);
1116 	if (WARN_ON(!service)) {
1117 		rcu_read_unlock();
1118 		return 0;
1119 	}
1120 
1121 	user_service = (struct user_service *)service->base.userdata;
1122 
1123 	if (instance->closing) {
1124 		rcu_read_unlock();
1125 		return 0;
1126 	}
1127 
1128 	/*
1129 	 * As hopping around different synchronization mechanism,
1130 	 * taking an extra reference results in simpler implementation.
1131 	 */
1132 	vchiq_service_get(service);
1133 	rcu_read_unlock();
1134 
1135 	dev_dbg(service->state->dev,
1136 		"arm: service %p(%d,%p), reason %d, header %p, instance %p, bulk_userdata %p\n",
1137 		user_service, service->localport, user_service->userdata,
1138 		reason, header, instance, bulk_userdata);
1139 
1140 	if (header && user_service->is_vchi) {
1141 		spin_lock(&service->state->msg_queue_spinlock);
1142 		while (user_service->msg_insert ==
1143 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1144 			int ret;
1145 
1146 			spin_unlock(&service->state->msg_queue_spinlock);
1147 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1148 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1149 
1150 			ret = service_single_message(instance, reason,
1151 						     service, bulk_userdata);
1152 			if (ret) {
1153 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1154 				vchiq_service_put(service);
1155 				return ret;
1156 			}
1157 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1158 			spin_lock(&service->state->msg_queue_spinlock);
1159 		}
1160 
1161 		user_service->msg_queue[user_service->msg_insert &
1162 			(MSG_QUEUE_SIZE - 1)] = header;
1163 		user_service->msg_insert++;
1164 
1165 		/*
1166 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1167 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1168 		 * bypass the completion queue.
1169 		 */
1170 		if (((user_service->message_available_pos -
1171 			instance->completion_remove) >= 0) ||
1172 			user_service->dequeue_pending) {
1173 			user_service->dequeue_pending = 0;
1174 			skip_completion = true;
1175 		}
1176 
1177 		spin_unlock(&service->state->msg_queue_spinlock);
1178 		complete(&user_service->insert_event);
1179 
1180 		header = NULL;
1181 	}
1182 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1183 	vchiq_service_put(service);
1184 
1185 	if (skip_completion)
1186 		return 0;
1187 
1188 	return add_completion(instance, reason, header, user_service,
1189 		bulk_userdata);
1190 }
1191 
vchiq_dump_platform_instances(struct vchiq_state * state,struct seq_file * f)1192 void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f)
1193 {
1194 	int i;
1195 
1196 	if (!vchiq_remote_initialised(state))
1197 		return;
1198 
1199 	/*
1200 	 * There is no list of instances, so instead scan all services,
1201 	 * marking those that have been dumped.
1202 	 */
1203 
1204 	rcu_read_lock();
1205 	for (i = 0; i < state->unused_service; i++) {
1206 		struct vchiq_service *service;
1207 		struct vchiq_instance *instance;
1208 
1209 		service = rcu_dereference(state->services[i]);
1210 		if (!service || service->base.callback != service_callback)
1211 			continue;
1212 
1213 		instance = service->instance;
1214 		if (instance)
1215 			instance->mark = 0;
1216 	}
1217 	rcu_read_unlock();
1218 
1219 	for (i = 0; i < state->unused_service; i++) {
1220 		struct vchiq_service *service;
1221 		struct vchiq_instance *instance;
1222 
1223 		rcu_read_lock();
1224 		service = rcu_dereference(state->services[i]);
1225 		if (!service || service->base.callback != service_callback) {
1226 			rcu_read_unlock();
1227 			continue;
1228 		}
1229 
1230 		instance = service->instance;
1231 		if (!instance || instance->mark) {
1232 			rcu_read_unlock();
1233 			continue;
1234 		}
1235 		rcu_read_unlock();
1236 
1237 		seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n",
1238 			   instance, instance->pid,
1239 			   instance->connected ? " connected, " :
1240 			   "",
1241 			   instance->completion_insert -
1242 			   instance->completion_remove,
1243 			   MAX_COMPLETIONS);
1244 		instance->mark = 1;
1245 	}
1246 }
1247 
vchiq_dump_platform_service_state(struct seq_file * f,struct vchiq_service * service)1248 void vchiq_dump_platform_service_state(struct seq_file *f,
1249 				       struct vchiq_service *service)
1250 {
1251 	struct user_service *user_service =
1252 			(struct user_service *)service->base.userdata;
1253 
1254 	seq_printf(f, "  instance %pK", service->instance);
1255 
1256 	if ((service->base.callback == service_callback) && user_service->is_vchi) {
1257 		seq_printf(f, ", %d/%d messages",
1258 			   user_service->msg_insert - user_service->msg_remove,
1259 			   MSG_QUEUE_SIZE);
1260 
1261 		if (user_service->dequeue_pending)
1262 			seq_puts(f, " (dequeue pending)");
1263 	}
1264 
1265 	seq_puts(f, "\n");
1266 }
1267 
1268 /*
1269  * Autosuspend related functionality
1270  */
1271 
1272 static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)1273 vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
1274 			       enum vchiq_reason reason,
1275 			       struct vchiq_header *header,
1276 			       unsigned int service_user, void *bulk_user)
1277 {
1278 	dev_err(instance->state->dev, "suspend: %s: callback reason %d\n",
1279 		__func__, reason);
1280 	return 0;
1281 }
1282 
1283 static int
vchiq_keepalive_thread_func(void * v)1284 vchiq_keepalive_thread_func(void *v)
1285 {
1286 	struct vchiq_state *state = (struct vchiq_state *)v;
1287 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1288 	struct vchiq_instance *instance;
1289 	unsigned int ka_handle;
1290 	int ret;
1291 
1292 	struct vchiq_service_params_kernel params = {
1293 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1294 		.callback    = vchiq_keepalive_vchiq_callback,
1295 		.version     = KEEPALIVE_VER,
1296 		.version_min = KEEPALIVE_VER_MIN
1297 	};
1298 
1299 	ret = vchiq_initialise(state, &instance);
1300 	if (ret) {
1301 		dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret);
1302 		goto exit;
1303 	}
1304 
1305 	ret = vchiq_connect(instance);
1306 	if (ret) {
1307 		dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret);
1308 		goto shutdown;
1309 	}
1310 
1311 	ret = vchiq_add_service(instance, &params, &ka_handle);
1312 	if (ret) {
1313 		dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n",
1314 			__func__, ret);
1315 		goto shutdown;
1316 	}
1317 
1318 	while (!kthread_should_stop()) {
1319 		long rc = 0, uc = 0;
1320 
1321 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1322 			dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__);
1323 			flush_signals(current);
1324 			continue;
1325 		}
1326 
1327 		/*
1328 		 * read and clear counters.  Do release_count then use_count to
1329 		 * prevent getting more releases than uses
1330 		 */
1331 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1332 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1333 
1334 		/*
1335 		 * Call use/release service the requisite number of times.
1336 		 * Process use before release so use counts don't go negative
1337 		 */
1338 		while (uc--) {
1339 			atomic_inc(&arm_state->ka_use_ack_count);
1340 			ret = vchiq_use_service(instance, ka_handle);
1341 			if (ret) {
1342 				dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n",
1343 					__func__, ret);
1344 			}
1345 		}
1346 		while (rc--) {
1347 			ret = vchiq_release_service(instance, ka_handle);
1348 			if (ret) {
1349 				dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n",
1350 					__func__, ret);
1351 			}
1352 		}
1353 	}
1354 
1355 shutdown:
1356 	vchiq_shutdown(instance);
1357 exit:
1358 	return 0;
1359 }
1360 
1361 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1362 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1363 		   enum USE_TYPE_E use_type)
1364 {
1365 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1366 	int ret = 0;
1367 	char entity[64];
1368 	int *entity_uc;
1369 	int local_uc;
1370 
1371 	if (!arm_state) {
1372 		ret = -EINVAL;
1373 		goto out;
1374 	}
1375 
1376 	if (use_type == USE_TYPE_VCHIQ) {
1377 		snprintf(entity, sizeof(entity), "VCHIQ:   ");
1378 		entity_uc = &arm_state->peer_use_count;
1379 	} else if (service) {
1380 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1381 			 &service->base.fourcc,
1382 			 service->client_id);
1383 		entity_uc = &service->service_use_count;
1384 	} else {
1385 		dev_err(state->dev, "suspend: %s: null service ptr\n", __func__);
1386 		ret = -EINVAL;
1387 		goto out;
1388 	}
1389 
1390 	write_lock_bh(&arm_state->susp_res_lock);
1391 	local_uc = ++arm_state->videocore_use_count;
1392 	++(*entity_uc);
1393 
1394 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1395 		entity, *entity_uc, local_uc);
1396 
1397 	write_unlock_bh(&arm_state->susp_res_lock);
1398 
1399 	if (!ret) {
1400 		int ret = 0;
1401 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1402 
1403 		while (ack_cnt && !ret) {
1404 			/* Send the use notify to videocore */
1405 			ret = vchiq_send_remote_use_active(state);
1406 			if (!ret)
1407 				ack_cnt--;
1408 			else
1409 				atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1410 		}
1411 	}
1412 
1413 out:
1414 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1415 	return ret;
1416 }
1417 
1418 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1419 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1420 {
1421 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1422 	int ret = 0;
1423 	char entity[64];
1424 	int *entity_uc;
1425 
1426 	if (!arm_state) {
1427 		ret = -EINVAL;
1428 		goto out;
1429 	}
1430 
1431 	if (service) {
1432 		snprintf(entity, sizeof(entity), "%p4cc:%03d",
1433 			 &service->base.fourcc,
1434 			 service->client_id);
1435 		entity_uc = &service->service_use_count;
1436 	} else {
1437 		snprintf(entity, sizeof(entity), "PEER:   ");
1438 		entity_uc = &arm_state->peer_use_count;
1439 	}
1440 
1441 	write_lock_bh(&arm_state->susp_res_lock);
1442 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1443 		WARN_ON(!arm_state->videocore_use_count);
1444 		WARN_ON(!(*entity_uc));
1445 		ret = -EINVAL;
1446 		goto unlock;
1447 	}
1448 	--arm_state->videocore_use_count;
1449 	--(*entity_uc);
1450 
1451 	dev_dbg(state->dev, "suspend: %s count %d, state count %d\n",
1452 		entity, *entity_uc, arm_state->videocore_use_count);
1453 
1454 unlock:
1455 	write_unlock_bh(&arm_state->susp_res_lock);
1456 
1457 out:
1458 	dev_dbg(state->dev, "suspend: exit %d\n", ret);
1459 	return ret;
1460 }
1461 
1462 void
vchiq_on_remote_use(struct vchiq_state * state)1463 vchiq_on_remote_use(struct vchiq_state *state)
1464 {
1465 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1466 
1467 	atomic_inc(&arm_state->ka_use_count);
1468 	complete(&arm_state->ka_evt);
1469 }
1470 
1471 void
vchiq_on_remote_release(struct vchiq_state * state)1472 vchiq_on_remote_release(struct vchiq_state *state)
1473 {
1474 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1475 
1476 	atomic_inc(&arm_state->ka_release_count);
1477 	complete(&arm_state->ka_evt);
1478 }
1479 
1480 int
vchiq_use_service_internal(struct vchiq_service * service)1481 vchiq_use_service_internal(struct vchiq_service *service)
1482 {
1483 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1484 }
1485 
1486 int
vchiq_release_service_internal(struct vchiq_service * service)1487 vchiq_release_service_internal(struct vchiq_service *service)
1488 {
1489 	return vchiq_release_internal(service->state, service);
1490 }
1491 
1492 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1493 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1494 {
1495 	return &instance->debugfs_node;
1496 }
1497 
1498 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1499 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1500 {
1501 	struct vchiq_service *service;
1502 	int use_count = 0, i;
1503 
1504 	i = 0;
1505 	rcu_read_lock();
1506 	while ((service = __next_service_by_instance(instance->state,
1507 						     instance, &i)))
1508 		use_count += service->service_use_count;
1509 	rcu_read_unlock();
1510 	return use_count;
1511 }
1512 
1513 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1514 vchiq_instance_get_pid(struct vchiq_instance *instance)
1515 {
1516 	return instance->pid;
1517 }
1518 
1519 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1520 vchiq_instance_get_trace(struct vchiq_instance *instance)
1521 {
1522 	return instance->trace;
1523 }
1524 
1525 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1526 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1527 {
1528 	struct vchiq_service *service;
1529 	int i;
1530 
1531 	i = 0;
1532 	rcu_read_lock();
1533 	while ((service = __next_service_by_instance(instance->state,
1534 						     instance, &i)))
1535 		service->trace = trace;
1536 	rcu_read_unlock();
1537 	instance->trace = (trace != 0);
1538 }
1539 
1540 int
vchiq_use_service(struct vchiq_instance * instance,unsigned int handle)1541 vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
1542 {
1543 	int ret = -EINVAL;
1544 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1545 
1546 	if (service) {
1547 		ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1548 		vchiq_service_put(service);
1549 	}
1550 	return ret;
1551 }
1552 EXPORT_SYMBOL(vchiq_use_service);
1553 
1554 int
vchiq_release_service(struct vchiq_instance * instance,unsigned int handle)1555 vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
1556 {
1557 	int ret = -EINVAL;
1558 	struct vchiq_service *service = find_service_by_handle(instance, handle);
1559 
1560 	if (service) {
1561 		ret = vchiq_release_internal(service->state, service);
1562 		vchiq_service_put(service);
1563 	}
1564 	return ret;
1565 }
1566 EXPORT_SYMBOL(vchiq_release_service);
1567 
1568 struct service_data_struct {
1569 	int fourcc;
1570 	int clientid;
1571 	int use_count;
1572 };
1573 
1574 void
vchiq_dump_service_use_state(struct vchiq_state * state)1575 vchiq_dump_service_use_state(struct vchiq_state *state)
1576 {
1577 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1578 	struct service_data_struct *service_data;
1579 	int i, found = 0;
1580 	/*
1581 	 * If there's more than 64 services, only dump ones with
1582 	 * non-zero counts
1583 	 */
1584 	int only_nonzero = 0;
1585 	static const char *nz = "<-- preventing suspend";
1586 
1587 	int peer_count;
1588 	int vc_use_count;
1589 	int active_services;
1590 
1591 	if (!arm_state)
1592 		return;
1593 
1594 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1595 				     GFP_KERNEL);
1596 	if (!service_data)
1597 		return;
1598 
1599 	read_lock_bh(&arm_state->susp_res_lock);
1600 	peer_count = arm_state->peer_use_count;
1601 	vc_use_count = arm_state->videocore_use_count;
1602 	active_services = state->unused_service;
1603 	if (active_services > MAX_SERVICES)
1604 		only_nonzero = 1;
1605 
1606 	rcu_read_lock();
1607 	for (i = 0; i < active_services; i++) {
1608 		struct vchiq_service *service_ptr =
1609 			rcu_dereference(state->services[i]);
1610 
1611 		if (!service_ptr)
1612 			continue;
1613 
1614 		if (only_nonzero && !service_ptr->service_use_count)
1615 			continue;
1616 
1617 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1618 			continue;
1619 
1620 		service_data[found].fourcc = service_ptr->base.fourcc;
1621 		service_data[found].clientid = service_ptr->client_id;
1622 		service_data[found].use_count = service_ptr->service_use_count;
1623 		found++;
1624 		if (found >= MAX_SERVICES)
1625 			break;
1626 	}
1627 	rcu_read_unlock();
1628 
1629 	read_unlock_bh(&arm_state->susp_res_lock);
1630 
1631 	if (only_nonzero)
1632 		dev_warn(state->dev,
1633 			 "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n",
1634 			 active_services, found);
1635 
1636 	for (i = 0; i < found; i++) {
1637 		dev_warn(state->dev,
1638 			 "suspend: %p4cc:%d service count %d %s\n",
1639 			 &service_data[i].fourcc,
1640 			 service_data[i].clientid, service_data[i].use_count,
1641 			 service_data[i].use_count ? nz : "");
1642 	}
1643 	dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count);
1644 	dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count);
1645 
1646 	kfree(service_data);
1647 }
1648 
1649 int
vchiq_check_service(struct vchiq_service * service)1650 vchiq_check_service(struct vchiq_service *service)
1651 {
1652 	struct vchiq_arm_state *arm_state;
1653 	int ret = -EINVAL;
1654 
1655 	if (!service || !service->state)
1656 		goto out;
1657 
1658 	arm_state = vchiq_platform_get_arm_state(service->state);
1659 
1660 	read_lock_bh(&arm_state->susp_res_lock);
1661 	if (service->service_use_count)
1662 		ret = 0;
1663 	read_unlock_bh(&arm_state->susp_res_lock);
1664 
1665 	if (ret) {
1666 		dev_err(service->state->dev,
1667 			"suspend: %s:  %p4cc:%d service count %d, state count %d\n",
1668 			__func__, &service->base.fourcc, service->client_id,
1669 			service->service_use_count, arm_state->videocore_use_count);
1670 		vchiq_dump_service_use_state(service->state);
1671 	}
1672 out:
1673 	return ret;
1674 }
1675 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1676 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1677 				       enum vchiq_connstate oldstate,
1678 				       enum vchiq_connstate newstate)
1679 {
1680 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1681 	char threadname[16];
1682 
1683 	dev_dbg(state->dev, "suspend: %d: %s->%s\n",
1684 		state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate));
1685 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1686 		return;
1687 
1688 	write_lock_bh(&arm_state->susp_res_lock);
1689 	if (arm_state->first_connect) {
1690 		write_unlock_bh(&arm_state->susp_res_lock);
1691 		return;
1692 	}
1693 
1694 	arm_state->first_connect = 1;
1695 	write_unlock_bh(&arm_state->susp_res_lock);
1696 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1697 		 state->id);
1698 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1699 					      (void *)state,
1700 					      threadname);
1701 	if (IS_ERR(arm_state->ka_thread)) {
1702 		dev_err(state->dev, "suspend: Couldn't create thread %s\n",
1703 			threadname);
1704 	} else {
1705 		wake_up_process(arm_state->ka_thread);
1706 	}
1707 }
1708 
1709 static const struct of_device_id vchiq_of_match[] = {
1710 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info },
1711 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info },
1712 	{},
1713 };
1714 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1715 
vchiq_probe(struct platform_device * pdev)1716 static int vchiq_probe(struct platform_device *pdev)
1717 {
1718 	struct device_node *fw_node;
1719 	const struct vchiq_platform_info *info;
1720 	struct vchiq_drv_mgmt *mgmt;
1721 	int ret;
1722 
1723 	info = of_device_get_match_data(&pdev->dev);
1724 	if (!info)
1725 		return -EINVAL;
1726 
1727 	fw_node = of_find_compatible_node(NULL, NULL,
1728 					  "raspberrypi,bcm2835-firmware");
1729 	if (!fw_node) {
1730 		dev_err(&pdev->dev, "Missing firmware node\n");
1731 		return -ENOENT;
1732 	}
1733 
1734 	mgmt = kzalloc(sizeof(*mgmt), GFP_KERNEL);
1735 	if (!mgmt)
1736 		return -ENOMEM;
1737 
1738 	mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1739 	of_node_put(fw_node);
1740 	if (!mgmt->fw)
1741 		return -EPROBE_DEFER;
1742 
1743 	mgmt->info = info;
1744 	platform_set_drvdata(pdev, mgmt);
1745 
1746 	ret = vchiq_platform_init(pdev, &mgmt->state);
1747 	if (ret)
1748 		goto failed_platform_init;
1749 
1750 	vchiq_debugfs_init(&mgmt->state);
1751 
1752 	dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n",
1753 		VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1754 
1755 	/*
1756 	 * Simply exit on error since the function handles cleanup in
1757 	 * cases of failure.
1758 	 */
1759 	ret = vchiq_register_chrdev(&pdev->dev);
1760 	if (ret) {
1761 		dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n");
1762 		goto error_exit;
1763 	}
1764 
1765 	bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio");
1766 	bcm2835_camera = vchiq_device_register(&pdev->dev, "bcm2835-camera");
1767 
1768 	return 0;
1769 
1770 failed_platform_init:
1771 	dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n");
1772 error_exit:
1773 	return ret;
1774 }
1775 
vchiq_remove(struct platform_device * pdev)1776 static void vchiq_remove(struct platform_device *pdev)
1777 {
1778 	struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev);
1779 	struct vchiq_arm_state *arm_state;
1780 
1781 	vchiq_device_unregister(bcm2835_audio);
1782 	vchiq_device_unregister(bcm2835_camera);
1783 	vchiq_debugfs_deinit();
1784 	vchiq_deregister_chrdev();
1785 
1786 	kthread_stop(mgmt->state.sync_thread);
1787 	kthread_stop(mgmt->state.recycle_thread);
1788 	kthread_stop(mgmt->state.slot_handler_thread);
1789 
1790 	arm_state = vchiq_platform_get_arm_state(&mgmt->state);
1791 	kthread_stop(arm_state->ka_thread);
1792 
1793 	kfree(mgmt);
1794 }
1795 
1796 static struct platform_driver vchiq_driver = {
1797 	.driver = {
1798 		.name = "bcm2835_vchiq",
1799 		.of_match_table = vchiq_of_match,
1800 	},
1801 	.probe = vchiq_probe,
1802 	.remove_new = vchiq_remove,
1803 };
1804 
vchiq_driver_init(void)1805 static int __init vchiq_driver_init(void)
1806 {
1807 	int ret;
1808 
1809 	ret = bus_register(&vchiq_bus_type);
1810 	if (ret) {
1811 		pr_err("Failed to register %s\n", vchiq_bus_type.name);
1812 		return ret;
1813 	}
1814 
1815 	ret = platform_driver_register(&vchiq_driver);
1816 	if (ret) {
1817 		pr_err("Failed to register vchiq driver\n");
1818 		bus_unregister(&vchiq_bus_type);
1819 	}
1820 
1821 	return ret;
1822 }
1823 module_init(vchiq_driver_init);
1824 
vchiq_driver_exit(void)1825 static void __exit vchiq_driver_exit(void)
1826 {
1827 	bus_unregister(&vchiq_bus_type);
1828 	platform_driver_unregister(&vchiq_driver);
1829 }
1830 module_exit(vchiq_driver_exit);
1831 
1832 MODULE_LICENSE("Dual BSD/GPL");
1833 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1834 MODULE_AUTHOR("Broadcom Corporation");
1835