1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32 
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
39 
40 #define DEVICE_NAME "vchiq"
41 
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43 
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45 
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
48 
49 #define BELL0	0x00
50 #define BELL2	0x08
51 
52 #define ARM_DS_ACTIVE	BIT(2)
53 
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
57 
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
60 
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
64 
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
67 
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
70 
71 static struct vchiq_drvdata bcm2835_drvdata = {
72 	.cache_line_size = 32,
73 };
74 
75 static struct vchiq_drvdata bcm2836_drvdata = {
76 	.cache_line_size = 64,
77 };
78 
79 struct vchiq_2835_state {
80 	int inited;
81 	struct vchiq_arm_state arm_state;
82 };
83 
84 struct vchiq_pagelist_info {
85 	struct pagelist *pagelist;
86 	size_t pagelist_buffer_size;
87 	dma_addr_t dma_addr;
88 	enum dma_data_direction dma_dir;
89 	unsigned int num_pages;
90 	unsigned int pages_need_release;
91 	struct page **pages;
92 	struct scatterlist *scatterlist;
93 	unsigned int scatterlist_mapped;
94 };
95 
96 static void __iomem *g_regs;
97 /* This value is the size of the L2 cache lines as understood by the
98  * VPU firmware, which determines the required alignment of the
99  * offsets/sizes in pagelists.
100  *
101  * Modern VPU firmware looks for a DT "cache-line-size" property in
102  * the VCHIQ node and will overwrite it with the actual L2 cache size,
103  * which the kernel must then respect.  That property was rejected
104  * upstream, so we have to use the VPU firmware's compatibility value
105  * of 32.
106  */
107 static unsigned int g_cache_line_size = 32;
108 static unsigned int g_fragments_size;
109 static char *g_fragments_base;
110 static char *g_free_fragments;
111 static struct semaphore g_free_fragments_sema;
112 static struct device *g_dev;
113 
114 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
115 
116 static enum vchiq_status
117 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
118 	unsigned int size, enum vchiq_bulk_dir dir);
119 
120 static irqreturn_t
vchiq_doorbell_irq(int irq,void * dev_id)121 vchiq_doorbell_irq(int irq, void *dev_id)
122 {
123 	struct vchiq_state *state = dev_id;
124 	irqreturn_t ret = IRQ_NONE;
125 	unsigned int status;
126 
127 	/* Read (and clear) the doorbell */
128 	status = readl(g_regs + BELL0);
129 
130 	if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
131 		remote_event_pollall(state);
132 		ret = IRQ_HANDLED;
133 	}
134 
135 	return ret;
136 }
137 
138 static void
cleanup_pagelistinfo(struct vchiq_pagelist_info * pagelistinfo)139 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
140 {
141 	if (pagelistinfo->scatterlist_mapped) {
142 		dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
143 			     pagelistinfo->num_pages, pagelistinfo->dma_dir);
144 	}
145 
146 	if (pagelistinfo->pages_need_release)
147 		unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
148 
149 	dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
150 			  pagelistinfo->pagelist, pagelistinfo->dma_addr);
151 }
152 
153 /* There is a potential problem with partial cache lines (pages?)
154  * at the ends of the block when reading. If the CPU accessed anything in
155  * the same line (page?) then it may have pulled old data into the cache,
156  * obscuring the new data underneath. We can solve this by transferring the
157  * partial cache lines separately, and allowing the ARM to copy into the
158  * cached area.
159  */
160 
161 static struct vchiq_pagelist_info *
create_pagelist(char * buf,char __user * ubuf,size_t count,unsigned short type)162 create_pagelist(char *buf, char __user *ubuf,
163 		size_t count, unsigned short type)
164 {
165 	struct pagelist *pagelist;
166 	struct vchiq_pagelist_info *pagelistinfo;
167 	struct page **pages;
168 	u32 *addrs;
169 	unsigned int num_pages, offset, i, k;
170 	int actual_pages;
171 	size_t pagelist_size;
172 	struct scatterlist *scatterlist, *sg;
173 	int dma_buffers;
174 	dma_addr_t dma_addr;
175 
176 	if (count >= INT_MAX - PAGE_SIZE)
177 		return NULL;
178 
179 	if (buf)
180 		offset = (uintptr_t)buf & (PAGE_SIZE - 1);
181 	else
182 		offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
183 	num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
184 
185 	if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
186 			 sizeof(struct vchiq_pagelist_info)) /
187 			(sizeof(u32) + sizeof(pages[0]) +
188 			 sizeof(struct scatterlist)))
189 		return NULL;
190 
191 	pagelist_size = sizeof(struct pagelist) +
192 			(num_pages * sizeof(u32)) +
193 			(num_pages * sizeof(pages[0]) +
194 			(num_pages * sizeof(struct scatterlist))) +
195 			sizeof(struct vchiq_pagelist_info);
196 
197 	/* Allocate enough storage to hold the page pointers and the page
198 	 * list
199 	 */
200 	pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
201 				      GFP_KERNEL);
202 
203 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
204 
205 	if (!pagelist)
206 		return NULL;
207 
208 	addrs		= pagelist->addrs;
209 	pages		= (struct page **)(addrs + num_pages);
210 	scatterlist	= (struct scatterlist *)(pages + num_pages);
211 	pagelistinfo	= (struct vchiq_pagelist_info *)
212 			  (scatterlist + num_pages);
213 
214 	pagelist->length = count;
215 	pagelist->type = type;
216 	pagelist->offset = offset;
217 
218 	/* Populate the fields of the pagelistinfo structure */
219 	pagelistinfo->pagelist = pagelist;
220 	pagelistinfo->pagelist_buffer_size = pagelist_size;
221 	pagelistinfo->dma_addr = dma_addr;
222 	pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
223 				  DMA_TO_DEVICE : DMA_FROM_DEVICE;
224 	pagelistinfo->num_pages = num_pages;
225 	pagelistinfo->pages_need_release = 0;
226 	pagelistinfo->pages = pages;
227 	pagelistinfo->scatterlist = scatterlist;
228 	pagelistinfo->scatterlist_mapped = 0;
229 
230 	if (buf) {
231 		unsigned long length = count;
232 		unsigned int off = offset;
233 
234 		for (actual_pages = 0; actual_pages < num_pages;
235 		     actual_pages++) {
236 			struct page *pg =
237 				vmalloc_to_page((buf +
238 						 (actual_pages * PAGE_SIZE)));
239 			size_t bytes = PAGE_SIZE - off;
240 
241 			if (!pg) {
242 				cleanup_pagelistinfo(pagelistinfo);
243 				return NULL;
244 			}
245 
246 			if (bytes > length)
247 				bytes = length;
248 			pages[actual_pages] = pg;
249 			length -= bytes;
250 			off = 0;
251 		}
252 		/* do not try and release vmalloc pages */
253 	} else {
254 		actual_pages = pin_user_pages_fast(
255 					  (unsigned long)ubuf & PAGE_MASK,
256 					  num_pages,
257 					  type == PAGELIST_READ,
258 					  pages);
259 
260 		if (actual_pages != num_pages) {
261 			vchiq_log_info(vchiq_arm_log_level,
262 				       "%s - only %d/%d pages locked",
263 				       __func__, actual_pages, num_pages);
264 
265 			/* This is probably due to the process being killed */
266 			if (actual_pages > 0)
267 				unpin_user_pages(pages, actual_pages);
268 			cleanup_pagelistinfo(pagelistinfo);
269 			return NULL;
270 		}
271 		 /* release user pages */
272 		pagelistinfo->pages_need_release = 1;
273 	}
274 
275 	/*
276 	 * Initialize the scatterlist so that the magic cookie
277 	 *  is filled if debugging is enabled
278 	 */
279 	sg_init_table(scatterlist, num_pages);
280 	/* Now set the pages for each scatterlist */
281 	for (i = 0; i < num_pages; i++)	{
282 		unsigned int len = PAGE_SIZE - offset;
283 
284 		if (len > count)
285 			len = count;
286 		sg_set_page(scatterlist + i, pages[i], len, offset);
287 		offset = 0;
288 		count -= len;
289 	}
290 
291 	dma_buffers = dma_map_sg(g_dev,
292 				 scatterlist,
293 				 num_pages,
294 				 pagelistinfo->dma_dir);
295 
296 	if (dma_buffers == 0) {
297 		cleanup_pagelistinfo(pagelistinfo);
298 		return NULL;
299 	}
300 
301 	pagelistinfo->scatterlist_mapped = 1;
302 
303 	/* Combine adjacent blocks for performance */
304 	k = 0;
305 	for_each_sg(scatterlist, sg, dma_buffers, i) {
306 		u32 len = sg_dma_len(sg);
307 		u32 addr = sg_dma_address(sg);
308 
309 		/* Note: addrs is the address + page_count - 1
310 		 * The firmware expects blocks after the first to be page-
311 		 * aligned and a multiple of the page size
312 		 */
313 		WARN_ON(len == 0);
314 		WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
315 		WARN_ON(i && (addr & ~PAGE_MASK));
316 		if (k > 0 &&
317 		    ((addrs[k - 1] & PAGE_MASK) +
318 		     (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
319 		    == (addr & PAGE_MASK))
320 			addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
321 		else
322 			addrs[k++] = (addr & PAGE_MASK) |
323 				(((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
324 	}
325 
326 	/* Partial cache lines (fragments) require special measures */
327 	if ((type == PAGELIST_READ) &&
328 		((pagelist->offset & (g_cache_line_size - 1)) ||
329 		((pagelist->offset + pagelist->length) &
330 		(g_cache_line_size - 1)))) {
331 		char *fragments;
332 
333 		if (down_interruptible(&g_free_fragments_sema)) {
334 			cleanup_pagelistinfo(pagelistinfo);
335 			return NULL;
336 		}
337 
338 		WARN_ON(!g_free_fragments);
339 
340 		down(&g_free_fragments_mutex);
341 		fragments = g_free_fragments;
342 		WARN_ON(!fragments);
343 		g_free_fragments = *(char **) g_free_fragments;
344 		up(&g_free_fragments_mutex);
345 		pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
346 			(fragments - g_fragments_base) / g_fragments_size;
347 	}
348 
349 	return pagelistinfo;
350 }
351 
352 static void
free_pagelist(struct vchiq_pagelist_info * pagelistinfo,int actual)353 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
354 	      int actual)
355 {
356 	struct pagelist *pagelist = pagelistinfo->pagelist;
357 	struct page **pages = pagelistinfo->pages;
358 	unsigned int num_pages = pagelistinfo->num_pages;
359 
360 	vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
361 			__func__, pagelistinfo->pagelist, actual);
362 
363 	/*
364 	 * NOTE: dma_unmap_sg must be called before the
365 	 * cpu can touch any of the data/pages.
366 	 */
367 	dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
368 		     pagelistinfo->num_pages, pagelistinfo->dma_dir);
369 	pagelistinfo->scatterlist_mapped = 0;
370 
371 	/* Deal with any partial cache lines (fragments) */
372 	if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
373 		char *fragments = g_fragments_base +
374 			(pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
375 			g_fragments_size;
376 		int head_bytes, tail_bytes;
377 
378 		head_bytes = (g_cache_line_size - pagelist->offset) &
379 			(g_cache_line_size - 1);
380 		tail_bytes = (pagelist->offset + actual) &
381 			(g_cache_line_size - 1);
382 
383 		if ((actual >= 0) && (head_bytes != 0)) {
384 			if (head_bytes > actual)
385 				head_bytes = actual;
386 
387 			memcpy((char *)kmap(pages[0]) +
388 				pagelist->offset,
389 				fragments,
390 				head_bytes);
391 			kunmap(pages[0]);
392 		}
393 		if ((actual >= 0) && (head_bytes < actual) &&
394 			(tail_bytes != 0)) {
395 			memcpy((char *)kmap(pages[num_pages - 1]) +
396 				((pagelist->offset + actual) &
397 				(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
398 				fragments + g_cache_line_size,
399 				tail_bytes);
400 			kunmap(pages[num_pages - 1]);
401 		}
402 
403 		down(&g_free_fragments_mutex);
404 		*(char **)fragments = g_free_fragments;
405 		g_free_fragments = fragments;
406 		up(&g_free_fragments_mutex);
407 		up(&g_free_fragments_sema);
408 	}
409 
410 	/* Need to mark all the pages dirty. */
411 	if (pagelist->type != PAGELIST_WRITE &&
412 	    pagelistinfo->pages_need_release) {
413 		unsigned int i;
414 
415 		for (i = 0; i < num_pages; i++)
416 			set_page_dirty(pages[i]);
417 	}
418 
419 	cleanup_pagelistinfo(pagelistinfo);
420 }
421 
vchiq_platform_init(struct platform_device * pdev,struct vchiq_state * state)422 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
423 {
424 	struct device *dev = &pdev->dev;
425 	struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
426 	struct rpi_firmware *fw = drvdata->fw;
427 	struct vchiq_slot_zero *vchiq_slot_zero;
428 	void *slot_mem;
429 	dma_addr_t slot_phys;
430 	u32 channelbase;
431 	int slot_mem_size, frag_mem_size;
432 	int err, irq, i;
433 
434 	/*
435 	 * VCHI messages between the CPU and firmware use
436 	 * 32-bit bus addresses.
437 	 */
438 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
439 
440 	if (err < 0)
441 		return err;
442 
443 	g_cache_line_size = drvdata->cache_line_size;
444 	g_fragments_size = 2 * g_cache_line_size;
445 
446 	/* Allocate space for the channels in coherent memory */
447 	slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
448 	frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
449 
450 	slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
451 				       &slot_phys, GFP_KERNEL);
452 	if (!slot_mem) {
453 		dev_err(dev, "could not allocate DMA memory\n");
454 		return -ENOMEM;
455 	}
456 
457 	WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
458 
459 	vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
460 	if (!vchiq_slot_zero)
461 		return -EINVAL;
462 
463 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
464 		(int)slot_phys + slot_mem_size;
465 	vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
466 		MAX_FRAGMENTS;
467 
468 	g_fragments_base = (char *)slot_mem + slot_mem_size;
469 
470 	g_free_fragments = g_fragments_base;
471 	for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
472 		*(char **)&g_fragments_base[i*g_fragments_size] =
473 			&g_fragments_base[(i + 1)*g_fragments_size];
474 	}
475 	*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
476 	sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
477 
478 	err = vchiq_init_state(state, vchiq_slot_zero);
479 	if (err)
480 		return err;
481 
482 	g_regs = devm_platform_ioremap_resource(pdev, 0);
483 	if (IS_ERR(g_regs))
484 		return PTR_ERR(g_regs);
485 
486 	irq = platform_get_irq(pdev, 0);
487 	if (irq <= 0)
488 		return irq;
489 
490 	err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
491 			       "VCHIQ doorbell", state);
492 	if (err) {
493 		dev_err(dev, "failed to register irq=%d\n", irq);
494 		return err;
495 	}
496 
497 	/* Send the base address of the slots to VideoCore */
498 	channelbase = slot_phys;
499 	err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
500 				    &channelbase, sizeof(channelbase));
501 	if (err || channelbase) {
502 		dev_err(dev, "failed to set channelbase\n");
503 		return err ? : -ENXIO;
504 	}
505 
506 	g_dev = dev;
507 	vchiq_log_info(vchiq_arm_log_level,
508 		"vchiq_init - done (slots %pK, phys %pad)",
509 		vchiq_slot_zero, &slot_phys);
510 
511 	vchiq_call_connected_callbacks();
512 
513 	return 0;
514 }
515 
516 int
vchiq_platform_init_state(struct vchiq_state * state)517 vchiq_platform_init_state(struct vchiq_state *state)
518 {
519 	struct vchiq_2835_state *platform_state;
520 
521 	state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
522 	if (!state->platform_state)
523 		return -ENOMEM;
524 
525 	platform_state = (struct vchiq_2835_state *)state->platform_state;
526 
527 	platform_state->inited = 1;
528 	vchiq_arm_init_state(state, &platform_state->arm_state);
529 
530 	return 0;
531 }
532 
533 struct vchiq_arm_state*
vchiq_platform_get_arm_state(struct vchiq_state * state)534 vchiq_platform_get_arm_state(struct vchiq_state *state)
535 {
536 	struct vchiq_2835_state *platform_state;
537 
538 	platform_state   = (struct vchiq_2835_state *)state->platform_state;
539 
540 	WARN_ON_ONCE(!platform_state->inited);
541 
542 	return &platform_state->arm_state;
543 }
544 
545 void
remote_event_signal(struct remote_event * event)546 remote_event_signal(struct remote_event *event)
547 {
548 	wmb();
549 
550 	event->fired = 1;
551 
552 	dsb(sy);         /* data barrier operation */
553 
554 	if (event->armed)
555 		writel(0, g_regs + BELL2); /* trigger vc interrupt */
556 }
557 
558 int
vchiq_prepare_bulk_data(struct vchiq_bulk * bulk,void * offset,void __user * uoffset,int size,int dir)559 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
560 			void __user *uoffset, int size, int dir)
561 {
562 	struct vchiq_pagelist_info *pagelistinfo;
563 
564 	pagelistinfo = create_pagelist(offset, uoffset, size,
565 				       (dir == VCHIQ_BULK_RECEIVE)
566 				       ? PAGELIST_READ
567 				       : PAGELIST_WRITE);
568 
569 	if (!pagelistinfo)
570 		return -ENOMEM;
571 
572 	bulk->data = pagelistinfo->dma_addr;
573 
574 	/*
575 	 * Store the pagelistinfo address in remote_data,
576 	 * which isn't used by the slave.
577 	 */
578 	bulk->remote_data = pagelistinfo;
579 
580 	return 0;
581 }
582 
583 void
vchiq_complete_bulk(struct vchiq_bulk * bulk)584 vchiq_complete_bulk(struct vchiq_bulk *bulk)
585 {
586 	if (bulk && bulk->remote_data && bulk->actual)
587 		free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
588 			      bulk->actual);
589 }
590 
vchiq_dump_platform_state(void * dump_context)591 int vchiq_dump_platform_state(void *dump_context)
592 {
593 	char buf[80];
594 	int len;
595 
596 	len = snprintf(buf, sizeof(buf),
597 		"  Platform: 2835 (VC master)");
598 	return vchiq_dump(dump_context, buf, len + 1);
599 }
600 
601 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(struct vchiq_instance ** instance_out)602 int vchiq_initialise(struct vchiq_instance **instance_out)
603 {
604 	struct vchiq_state *state;
605 	struct vchiq_instance *instance = NULL;
606 	int i, ret;
607 
608 	/*
609 	 * VideoCore may not be ready due to boot up timing.
610 	 * It may never be ready if kernel and firmware are mismatched,so don't
611 	 * block forever.
612 	 */
613 	for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
614 		state = vchiq_get_state();
615 		if (state)
616 			break;
617 		usleep_range(500, 600);
618 	}
619 	if (i == VCHIQ_INIT_RETRIES) {
620 		vchiq_log_error(vchiq_core_log_level,
621 			"%s: videocore not initialized\n", __func__);
622 		ret = -ENOTCONN;
623 		goto failed;
624 	} else if (i > 0) {
625 		vchiq_log_warning(vchiq_core_log_level,
626 			"%s: videocore initialized after %d retries\n",
627 			__func__, i);
628 	}
629 
630 	instance = kzalloc(sizeof(*instance), GFP_KERNEL);
631 	if (!instance) {
632 		vchiq_log_error(vchiq_core_log_level,
633 			"%s: error allocating vchiq instance\n", __func__);
634 		ret = -ENOMEM;
635 		goto failed;
636 	}
637 
638 	instance->connected = 0;
639 	instance->state = state;
640 	mutex_init(&instance->bulk_waiter_list_mutex);
641 	INIT_LIST_HEAD(&instance->bulk_waiter_list);
642 
643 	*instance_out = instance;
644 
645 	ret = 0;
646 
647 failed:
648 	vchiq_log_trace(vchiq_core_log_level,
649 		"%s(%p): returning %d", __func__, instance, ret);
650 
651 	return ret;
652 }
653 EXPORT_SYMBOL(vchiq_initialise);
654 
free_bulk_waiter(struct vchiq_instance * instance)655 void free_bulk_waiter(struct vchiq_instance *instance)
656 {
657 	struct bulk_waiter_node *waiter, *next;
658 
659 	list_for_each_entry_safe(waiter, next,
660 				 &instance->bulk_waiter_list, list) {
661 		list_del(&waiter->list);
662 		vchiq_log_info(vchiq_arm_log_level,
663 				"bulk_waiter - cleaned up %pK for pid %d",
664 				waiter, waiter->pid);
665 		kfree(waiter);
666 	}
667 }
668 
vchiq_shutdown(struct vchiq_instance * instance)669 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
670 {
671 	enum vchiq_status status = VCHIQ_SUCCESS;
672 	struct vchiq_state *state = instance->state;
673 
674 	if (mutex_lock_killable(&state->mutex))
675 		return VCHIQ_RETRY;
676 
677 	/* Remove all services */
678 	vchiq_shutdown_internal(state, instance);
679 
680 	mutex_unlock(&state->mutex);
681 
682 	vchiq_log_trace(vchiq_core_log_level,
683 		"%s(%p): returning %d", __func__, instance, status);
684 
685 	free_bulk_waiter(instance);
686 	kfree(instance);
687 
688 	return status;
689 }
690 EXPORT_SYMBOL(vchiq_shutdown);
691 
vchiq_is_connected(struct vchiq_instance * instance)692 static int vchiq_is_connected(struct vchiq_instance *instance)
693 {
694 	return instance->connected;
695 }
696 
vchiq_connect(struct vchiq_instance * instance)697 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
698 {
699 	enum vchiq_status status;
700 	struct vchiq_state *state = instance->state;
701 
702 	if (mutex_lock_killable(&state->mutex)) {
703 		vchiq_log_trace(vchiq_core_log_level,
704 			"%s: call to mutex_lock failed", __func__);
705 		status = VCHIQ_RETRY;
706 		goto failed;
707 	}
708 	status = vchiq_connect_internal(state, instance);
709 
710 	if (status == VCHIQ_SUCCESS)
711 		instance->connected = 1;
712 
713 	mutex_unlock(&state->mutex);
714 
715 failed:
716 	vchiq_log_trace(vchiq_core_log_level,
717 		"%s(%p): returning %d", __func__, instance, status);
718 
719 	return status;
720 }
721 EXPORT_SYMBOL(vchiq_connect);
722 
723 static enum vchiq_status
vchiq_add_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)724 vchiq_add_service(struct vchiq_instance *instance,
725 		  const struct vchiq_service_params_kernel *params,
726 		  unsigned int *phandle)
727 {
728 	enum vchiq_status status;
729 	struct vchiq_state *state = instance->state;
730 	struct vchiq_service *service = NULL;
731 	int srvstate;
732 
733 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
734 
735 	srvstate = vchiq_is_connected(instance)
736 		? VCHIQ_SRVSTATE_LISTENING
737 		: VCHIQ_SRVSTATE_HIDDEN;
738 
739 	service = vchiq_add_service_internal(
740 		state,
741 		params,
742 		srvstate,
743 		instance,
744 		NULL);
745 
746 	if (service) {
747 		*phandle = service->handle;
748 		status = VCHIQ_SUCCESS;
749 	} else {
750 		status = VCHIQ_ERROR;
751 	}
752 
753 	vchiq_log_trace(vchiq_core_log_level,
754 		"%s(%p): returning %d", __func__, instance, status);
755 
756 	return status;
757 }
758 
759 enum vchiq_status
vchiq_open_service(struct vchiq_instance * instance,const struct vchiq_service_params_kernel * params,unsigned int * phandle)760 vchiq_open_service(struct vchiq_instance *instance,
761 		   const struct vchiq_service_params_kernel *params,
762 		   unsigned int *phandle)
763 {
764 	enum vchiq_status   status = VCHIQ_ERROR;
765 	struct vchiq_state   *state = instance->state;
766 	struct vchiq_service *service = NULL;
767 
768 	*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
769 
770 	if (!vchiq_is_connected(instance))
771 		goto failed;
772 
773 	service = vchiq_add_service_internal(state,
774 		params,
775 		VCHIQ_SRVSTATE_OPENING,
776 		instance,
777 		NULL);
778 
779 	if (service) {
780 		*phandle = service->handle;
781 		status = vchiq_open_service_internal(service, current->pid);
782 		if (status != VCHIQ_SUCCESS) {
783 			vchiq_remove_service(service->handle);
784 			*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
785 		}
786 	}
787 
788 failed:
789 	vchiq_log_trace(vchiq_core_log_level,
790 		"%s(%p): returning %d", __func__, instance, status);
791 
792 	return status;
793 }
794 EXPORT_SYMBOL(vchiq_open_service);
795 
796 enum vchiq_status
vchiq_bulk_transmit(unsigned int handle,const void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)797 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
798 		    void *userdata, enum vchiq_bulk_mode mode)
799 {
800 	enum vchiq_status status;
801 
802 	while (1) {
803 		switch (mode) {
804 		case VCHIQ_BULK_MODE_NOCALLBACK:
805 		case VCHIQ_BULK_MODE_CALLBACK:
806 			status = vchiq_bulk_transfer(handle,
807 						     (void *)data, NULL,
808 						     size, userdata, mode,
809 						     VCHIQ_BULK_TRANSMIT);
810 			break;
811 		case VCHIQ_BULK_MODE_BLOCKING:
812 			status = vchiq_blocking_bulk_transfer(handle,
813 				(void *)data, size, VCHIQ_BULK_TRANSMIT);
814 			break;
815 		default:
816 			return VCHIQ_ERROR;
817 		}
818 
819 		/*
820 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
821 		 * to implement a retry mechanism since this function is
822 		 * supposed to block until queued
823 		 */
824 		if (status != VCHIQ_RETRY)
825 			break;
826 
827 		msleep(1);
828 	}
829 
830 	return status;
831 }
832 EXPORT_SYMBOL(vchiq_bulk_transmit);
833 
vchiq_bulk_receive(unsigned int handle,void * data,unsigned int size,void * userdata,enum vchiq_bulk_mode mode)834 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
835 				     unsigned int size, void *userdata,
836 				     enum vchiq_bulk_mode mode)
837 {
838 	enum vchiq_status status;
839 
840 	while (1) {
841 		switch (mode) {
842 		case VCHIQ_BULK_MODE_NOCALLBACK:
843 		case VCHIQ_BULK_MODE_CALLBACK:
844 			status = vchiq_bulk_transfer(handle, data, NULL,
845 						     size, userdata,
846 						     mode, VCHIQ_BULK_RECEIVE);
847 			break;
848 		case VCHIQ_BULK_MODE_BLOCKING:
849 			status = vchiq_blocking_bulk_transfer(handle,
850 				(void *)data, size, VCHIQ_BULK_RECEIVE);
851 			break;
852 		default:
853 			return VCHIQ_ERROR;
854 		}
855 
856 		/*
857 		 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
858 		 * to implement a retry mechanism since this function is
859 		 * supposed to block until queued
860 		 */
861 		if (status != VCHIQ_RETRY)
862 			break;
863 
864 		msleep(1);
865 	}
866 
867 	return status;
868 }
869 EXPORT_SYMBOL(vchiq_bulk_receive);
870 
871 static enum vchiq_status
vchiq_blocking_bulk_transfer(unsigned int handle,void * data,unsigned int size,enum vchiq_bulk_dir dir)872 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
873 			     enum vchiq_bulk_dir dir)
874 {
875 	struct vchiq_instance *instance;
876 	struct vchiq_service *service;
877 	enum vchiq_status status;
878 	struct bulk_waiter_node *waiter = NULL;
879 	bool found = false;
880 
881 	service = find_service_by_handle(handle);
882 	if (!service)
883 		return VCHIQ_ERROR;
884 
885 	instance = service->instance;
886 
887 	vchiq_service_put(service);
888 
889 	mutex_lock(&instance->bulk_waiter_list_mutex);
890 	list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
891 		if (waiter->pid == current->pid) {
892 			list_del(&waiter->list);
893 			found = true;
894 			break;
895 		}
896 	}
897 	mutex_unlock(&instance->bulk_waiter_list_mutex);
898 
899 	if (found) {
900 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
901 
902 		if (bulk) {
903 			/* This thread has an outstanding bulk transfer. */
904 			/* FIXME: why compare a dma address to a pointer? */
905 			if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
906 				(bulk->size != size)) {
907 				/*
908 				 * This is not a retry of the previous one.
909 				 * Cancel the signal when the transfer completes.
910 				 */
911 				spin_lock(&bulk_waiter_spinlock);
912 				bulk->userdata = NULL;
913 				spin_unlock(&bulk_waiter_spinlock);
914 			}
915 		}
916 	} else {
917 		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
918 		if (!waiter) {
919 			vchiq_log_error(vchiq_core_log_level,
920 				"%s - out of memory", __func__);
921 			return VCHIQ_ERROR;
922 		}
923 	}
924 
925 	status = vchiq_bulk_transfer(handle, data, NULL, size,
926 				     &waiter->bulk_waiter,
927 				     VCHIQ_BULK_MODE_BLOCKING, dir);
928 	if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
929 		!waiter->bulk_waiter.bulk) {
930 		struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
931 
932 		if (bulk) {
933 			/* Cancel the signal when the transfer completes. */
934 			spin_lock(&bulk_waiter_spinlock);
935 			bulk->userdata = NULL;
936 			spin_unlock(&bulk_waiter_spinlock);
937 		}
938 		kfree(waiter);
939 	} else {
940 		waiter->pid = current->pid;
941 		mutex_lock(&instance->bulk_waiter_list_mutex);
942 		list_add(&waiter->list, &instance->bulk_waiter_list);
943 		mutex_unlock(&instance->bulk_waiter_list_mutex);
944 		vchiq_log_info(vchiq_arm_log_level,
945 				"saved bulk_waiter %pK for pid %d",
946 				waiter, current->pid);
947 	}
948 
949 	return status;
950 }
951 
952 static enum vchiq_status
add_completion(struct vchiq_instance * instance,enum vchiq_reason reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)953 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
954 	       struct vchiq_header *header, struct user_service *user_service,
955 	       void *bulk_userdata)
956 {
957 	struct vchiq_completion_data_kernel *completion;
958 	int insert;
959 
960 	DEBUG_INITIALISE(g_state.local)
961 
962 	insert = instance->completion_insert;
963 	while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
964 		/* Out of space - wait for the client */
965 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
966 		vchiq_log_trace(vchiq_arm_log_level,
967 			"%s - completion queue full", __func__);
968 		DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
969 		if (wait_for_completion_interruptible(
970 					&instance->remove_event)) {
971 			vchiq_log_info(vchiq_arm_log_level,
972 				"service_callback interrupted");
973 			return VCHIQ_RETRY;
974 		} else if (instance->closing) {
975 			vchiq_log_info(vchiq_arm_log_level,
976 				"service_callback closing");
977 			return VCHIQ_SUCCESS;
978 		}
979 		DEBUG_TRACE(SERVICE_CALLBACK_LINE);
980 	}
981 
982 	completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
983 
984 	completion->header = header;
985 	completion->reason = reason;
986 	/* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
987 	completion->service_userdata = user_service->service;
988 	completion->bulk_userdata = bulk_userdata;
989 
990 	if (reason == VCHIQ_SERVICE_CLOSED) {
991 		/*
992 		 * Take an extra reference, to be held until
993 		 * this CLOSED notification is delivered.
994 		 */
995 		vchiq_service_get(user_service->service);
996 		if (instance->use_close_delivered)
997 			user_service->close_pending = 1;
998 	}
999 
1000 	/*
1001 	 * A write barrier is needed here to ensure that the entire completion
1002 	 * record is written out before the insert point.
1003 	 */
1004 	wmb();
1005 
1006 	if (reason == VCHIQ_MESSAGE_AVAILABLE)
1007 		user_service->message_available_pos = insert;
1008 
1009 	insert++;
1010 	instance->completion_insert = insert;
1011 
1012 	complete(&instance->insert_event);
1013 
1014 	return VCHIQ_SUCCESS;
1015 }
1016 
1017 enum vchiq_status
service_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int handle,void * bulk_userdata)1018 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1019 		 unsigned int handle, void *bulk_userdata)
1020 {
1021 	/*
1022 	 * How do we ensure the callback goes to the right client?
1023 	 * The service_user data points to a user_service record
1024 	 * containing the original callback and the user state structure, which
1025 	 * contains a circular buffer for completion records.
1026 	 */
1027 	struct user_service *user_service;
1028 	struct vchiq_service *service;
1029 	struct vchiq_instance *instance;
1030 	bool skip_completion = false;
1031 
1032 	DEBUG_INITIALISE(g_state.local)
1033 
1034 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1035 
1036 	service = handle_to_service(handle);
1037 	if (WARN_ON(!service))
1038 		return VCHIQ_SUCCESS;
1039 
1040 	user_service = (struct user_service *)service->base.userdata;
1041 	instance = user_service->instance;
1042 
1043 	if (!instance || instance->closing)
1044 		return VCHIQ_SUCCESS;
1045 
1046 	vchiq_log_trace(vchiq_arm_log_level,
1047 		"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1048 		__func__, (unsigned long)user_service,
1049 		service->localport, user_service->userdata,
1050 		reason, (unsigned long)header,
1051 		(unsigned long)instance, (unsigned long)bulk_userdata);
1052 
1053 	if (header && user_service->is_vchi) {
1054 		spin_lock(&msg_queue_spinlock);
1055 		while (user_service->msg_insert ==
1056 			(user_service->msg_remove + MSG_QUEUE_SIZE)) {
1057 			spin_unlock(&msg_queue_spinlock);
1058 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1059 			DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1060 			vchiq_log_trace(vchiq_arm_log_level,
1061 				"service_callback - msg queue full");
1062 			/*
1063 			 * If there is no MESSAGE_AVAILABLE in the completion
1064 			 * queue, add one
1065 			 */
1066 			if ((user_service->message_available_pos -
1067 				instance->completion_remove) < 0) {
1068 				enum vchiq_status status;
1069 
1070 				vchiq_log_info(vchiq_arm_log_level,
1071 					"Inserting extra MESSAGE_AVAILABLE");
1072 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1073 				status = add_completion(instance, reason,
1074 					NULL, user_service, bulk_userdata);
1075 				if (status != VCHIQ_SUCCESS) {
1076 					DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1077 					return status;
1078 				}
1079 			}
1080 
1081 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1082 			if (wait_for_completion_interruptible(
1083 						&user_service->remove_event)) {
1084 				vchiq_log_info(vchiq_arm_log_level,
1085 					"%s interrupted", __func__);
1086 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1087 				return VCHIQ_RETRY;
1088 			} else if (instance->closing) {
1089 				vchiq_log_info(vchiq_arm_log_level,
1090 					"%s closing", __func__);
1091 				DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1092 				return VCHIQ_ERROR;
1093 			}
1094 			DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1095 			spin_lock(&msg_queue_spinlock);
1096 		}
1097 
1098 		user_service->msg_queue[user_service->msg_insert &
1099 			(MSG_QUEUE_SIZE - 1)] = header;
1100 		user_service->msg_insert++;
1101 
1102 		/*
1103 		 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1104 		 * there is a MESSAGE_AVAILABLE in the completion queue then
1105 		 * bypass the completion queue.
1106 		 */
1107 		if (((user_service->message_available_pos -
1108 			instance->completion_remove) >= 0) ||
1109 			user_service->dequeue_pending) {
1110 			user_service->dequeue_pending = 0;
1111 			skip_completion = true;
1112 		}
1113 
1114 		spin_unlock(&msg_queue_spinlock);
1115 		complete(&user_service->insert_event);
1116 
1117 		header = NULL;
1118 	}
1119 	DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1120 
1121 	if (skip_completion)
1122 		return VCHIQ_SUCCESS;
1123 
1124 	return add_completion(instance, reason, header, user_service,
1125 		bulk_userdata);
1126 }
1127 
vchiq_dump(void * dump_context,const char * str,int len)1128 int vchiq_dump(void *dump_context, const char *str, int len)
1129 {
1130 	struct dump_context *context = (struct dump_context *)dump_context;
1131 	int copy_bytes;
1132 
1133 	if (context->actual >= context->space)
1134 		return 0;
1135 
1136 	if (context->offset > 0) {
1137 		int skip_bytes = min_t(int, len, context->offset);
1138 
1139 		str += skip_bytes;
1140 		len -= skip_bytes;
1141 		context->offset -= skip_bytes;
1142 		if (context->offset > 0)
1143 			return 0;
1144 	}
1145 	copy_bytes = min_t(int, len, context->space - context->actual);
1146 	if (copy_bytes == 0)
1147 		return 0;
1148 	if (copy_to_user(context->buf + context->actual, str,
1149 			 copy_bytes))
1150 		return -EFAULT;
1151 	context->actual += copy_bytes;
1152 	len -= copy_bytes;
1153 
1154 	/*
1155 	 * If the terminating NUL is included in the length, then it
1156 	 * marks the end of a line and should be replaced with a
1157 	 * carriage return.
1158 	 */
1159 	if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1160 		char cr = '\n';
1161 
1162 		if (copy_to_user(context->buf + context->actual - 1,
1163 				 &cr, 1))
1164 			return -EFAULT;
1165 	}
1166 	return 0;
1167 }
1168 
vchiq_dump_platform_instances(void * dump_context)1169 int vchiq_dump_platform_instances(void *dump_context)
1170 {
1171 	struct vchiq_state *state = vchiq_get_state();
1172 	char buf[80];
1173 	int len;
1174 	int i;
1175 
1176 	/*
1177 	 * There is no list of instances, so instead scan all services,
1178 	 * marking those that have been dumped.
1179 	 */
1180 
1181 	rcu_read_lock();
1182 	for (i = 0; i < state->unused_service; i++) {
1183 		struct vchiq_service *service;
1184 		struct vchiq_instance *instance;
1185 
1186 		service = rcu_dereference(state->services[i]);
1187 		if (!service || service->base.callback != service_callback)
1188 			continue;
1189 
1190 		instance = service->instance;
1191 		if (instance)
1192 			instance->mark = 0;
1193 	}
1194 	rcu_read_unlock();
1195 
1196 	for (i = 0; i < state->unused_service; i++) {
1197 		struct vchiq_service *service;
1198 		struct vchiq_instance *instance;
1199 		int err;
1200 
1201 		rcu_read_lock();
1202 		service = rcu_dereference(state->services[i]);
1203 		if (!service || service->base.callback != service_callback) {
1204 			rcu_read_unlock();
1205 			continue;
1206 		}
1207 
1208 		instance = service->instance;
1209 		if (!instance || instance->mark) {
1210 			rcu_read_unlock();
1211 			continue;
1212 		}
1213 		rcu_read_unlock();
1214 
1215 		len = snprintf(buf, sizeof(buf),
1216 			       "Instance %pK: pid %d,%s completions %d/%d",
1217 			       instance, instance->pid,
1218 			       instance->connected ? " connected, " :
1219 			       "",
1220 			       instance->completion_insert -
1221 			       instance->completion_remove,
1222 			       MAX_COMPLETIONS);
1223 		err = vchiq_dump(dump_context, buf, len + 1);
1224 		if (err)
1225 			return err;
1226 		instance->mark = 1;
1227 	}
1228 	return 0;
1229 }
1230 
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)1231 int vchiq_dump_platform_service_state(void *dump_context,
1232 				      struct vchiq_service *service)
1233 {
1234 	struct user_service *user_service =
1235 			(struct user_service *)service->base.userdata;
1236 	char buf[80];
1237 	int len;
1238 
1239 	len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1240 
1241 	if ((service->base.callback == service_callback) &&
1242 		user_service->is_vchi) {
1243 		len += scnprintf(buf + len, sizeof(buf) - len,
1244 			", %d/%d messages",
1245 			user_service->msg_insert - user_service->msg_remove,
1246 			MSG_QUEUE_SIZE);
1247 
1248 		if (user_service->dequeue_pending)
1249 			len += scnprintf(buf + len, sizeof(buf) - len,
1250 				" (dequeue pending)");
1251 	}
1252 
1253 	return vchiq_dump(dump_context, buf, len + 1);
1254 }
1255 
1256 struct vchiq_state *
vchiq_get_state(void)1257 vchiq_get_state(void)
1258 {
1259 
1260 	if (!g_state.remote)
1261 		pr_err("%s: g_state.remote == NULL\n", __func__);
1262 	else if (g_state.remote->initialised != 1)
1263 		pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1264 			  __func__, g_state.remote->initialised);
1265 
1266 	return (g_state.remote &&
1267 		(g_state.remote->initialised == 1)) ? &g_state : NULL;
1268 }
1269 
1270 /*
1271  * Autosuspend related functionality
1272  */
1273 
1274 static enum vchiq_status
vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,struct vchiq_header * header,unsigned int service_user,void * bulk_user)1275 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1276 			       struct vchiq_header *header,
1277 			       unsigned int service_user, void *bulk_user)
1278 {
1279 	vchiq_log_error(vchiq_susp_log_level,
1280 		"%s callback reason %d", __func__, reason);
1281 	return 0;
1282 }
1283 
1284 static int
vchiq_keepalive_thread_func(void * v)1285 vchiq_keepalive_thread_func(void *v)
1286 {
1287 	struct vchiq_state *state = (struct vchiq_state *)v;
1288 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1289 
1290 	enum vchiq_status status;
1291 	struct vchiq_instance *instance;
1292 	unsigned int ka_handle;
1293 	int ret;
1294 
1295 	struct vchiq_service_params_kernel params = {
1296 		.fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1297 		.callback    = vchiq_keepalive_vchiq_callback,
1298 		.version     = KEEPALIVE_VER,
1299 		.version_min = KEEPALIVE_VER_MIN
1300 	};
1301 
1302 	ret = vchiq_initialise(&instance);
1303 	if (ret) {
1304 		vchiq_log_error(vchiq_susp_log_level,
1305 			"%s vchiq_initialise failed %d", __func__, ret);
1306 		goto exit;
1307 	}
1308 
1309 	status = vchiq_connect(instance);
1310 	if (status != VCHIQ_SUCCESS) {
1311 		vchiq_log_error(vchiq_susp_log_level,
1312 			"%s vchiq_connect failed %d", __func__, status);
1313 		goto shutdown;
1314 	}
1315 
1316 	status = vchiq_add_service(instance, &params, &ka_handle);
1317 	if (status != VCHIQ_SUCCESS) {
1318 		vchiq_log_error(vchiq_susp_log_level,
1319 			"%s vchiq_open_service failed %d", __func__, status);
1320 		goto shutdown;
1321 	}
1322 
1323 	while (1) {
1324 		long rc = 0, uc = 0;
1325 
1326 		if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1327 			vchiq_log_error(vchiq_susp_log_level,
1328 				"%s interrupted", __func__);
1329 			flush_signals(current);
1330 			continue;
1331 		}
1332 
1333 		/*
1334 		 * read and clear counters.  Do release_count then use_count to
1335 		 * prevent getting more releases than uses
1336 		 */
1337 		rc = atomic_xchg(&arm_state->ka_release_count, 0);
1338 		uc = atomic_xchg(&arm_state->ka_use_count, 0);
1339 
1340 		/*
1341 		 * Call use/release service the requisite number of times.
1342 		 * Process use before release so use counts don't go negative
1343 		 */
1344 		while (uc--) {
1345 			atomic_inc(&arm_state->ka_use_ack_count);
1346 			status = vchiq_use_service(ka_handle);
1347 			if (status != VCHIQ_SUCCESS) {
1348 				vchiq_log_error(vchiq_susp_log_level,
1349 					"%s vchiq_use_service error %d",
1350 					__func__, status);
1351 			}
1352 		}
1353 		while (rc--) {
1354 			status = vchiq_release_service(ka_handle);
1355 			if (status != VCHIQ_SUCCESS) {
1356 				vchiq_log_error(vchiq_susp_log_level,
1357 					"%s vchiq_release_service error %d",
1358 					__func__, status);
1359 			}
1360 		}
1361 	}
1362 
1363 shutdown:
1364 	vchiq_shutdown(instance);
1365 exit:
1366 	return 0;
1367 }
1368 
1369 void
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)1370 vchiq_arm_init_state(struct vchiq_state *state,
1371 		     struct vchiq_arm_state *arm_state)
1372 {
1373 	if (arm_state) {
1374 		rwlock_init(&arm_state->susp_res_lock);
1375 
1376 		init_completion(&arm_state->ka_evt);
1377 		atomic_set(&arm_state->ka_use_count, 0);
1378 		atomic_set(&arm_state->ka_use_ack_count, 0);
1379 		atomic_set(&arm_state->ka_release_count, 0);
1380 
1381 		arm_state->state = state;
1382 		arm_state->first_connect = 0;
1383 
1384 	}
1385 }
1386 
1387 int
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)1388 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1389 		   enum USE_TYPE_E use_type)
1390 {
1391 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1392 	int ret = 0;
1393 	char entity[16];
1394 	int *entity_uc;
1395 	int local_uc;
1396 
1397 	if (!arm_state) {
1398 		ret = -EINVAL;
1399 		goto out;
1400 	}
1401 
1402 	if (use_type == USE_TYPE_VCHIQ) {
1403 		sprintf(entity, "VCHIQ:   ");
1404 		entity_uc = &arm_state->peer_use_count;
1405 	} else if (service) {
1406 		sprintf(entity, "%c%c%c%c:%03d",
1407 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1408 			service->client_id);
1409 		entity_uc = &service->service_use_count;
1410 	} else {
1411 		vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1412 		ret = -EINVAL;
1413 		goto out;
1414 	}
1415 
1416 	write_lock_bh(&arm_state->susp_res_lock);
1417 	local_uc = ++arm_state->videocore_use_count;
1418 	++(*entity_uc);
1419 
1420 	vchiq_log_trace(vchiq_susp_log_level,
1421 		"%s %s count %d, state count %d",
1422 		__func__, entity, *entity_uc, local_uc);
1423 
1424 	write_unlock_bh(&arm_state->susp_res_lock);
1425 
1426 	if (!ret) {
1427 		enum vchiq_status status = VCHIQ_SUCCESS;
1428 		long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1429 
1430 		while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1431 			/* Send the use notify to videocore */
1432 			status = vchiq_send_remote_use_active(state);
1433 			if (status == VCHIQ_SUCCESS)
1434 				ack_cnt--;
1435 			else
1436 				atomic_add(ack_cnt,
1437 					&arm_state->ka_use_ack_count);
1438 		}
1439 	}
1440 
1441 out:
1442 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1443 	return ret;
1444 }
1445 
1446 int
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)1447 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1448 {
1449 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1450 	int ret = 0;
1451 	char entity[16];
1452 	int *entity_uc;
1453 
1454 	if (!arm_state) {
1455 		ret = -EINVAL;
1456 		goto out;
1457 	}
1458 
1459 	if (service) {
1460 		sprintf(entity, "%c%c%c%c:%03d",
1461 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1462 			service->client_id);
1463 		entity_uc = &service->service_use_count;
1464 	} else {
1465 		sprintf(entity, "PEER:   ");
1466 		entity_uc = &arm_state->peer_use_count;
1467 	}
1468 
1469 	write_lock_bh(&arm_state->susp_res_lock);
1470 	if (!arm_state->videocore_use_count || !(*entity_uc)) {
1471 		/* Don't use BUG_ON - don't allow user thread to crash kernel */
1472 		WARN_ON(!arm_state->videocore_use_count);
1473 		WARN_ON(!(*entity_uc));
1474 		ret = -EINVAL;
1475 		goto unlock;
1476 	}
1477 	--arm_state->videocore_use_count;
1478 	--(*entity_uc);
1479 
1480 	vchiq_log_trace(vchiq_susp_log_level,
1481 		"%s %s count %d, state count %d",
1482 		__func__, entity, *entity_uc,
1483 		arm_state->videocore_use_count);
1484 
1485 unlock:
1486 	write_unlock_bh(&arm_state->susp_res_lock);
1487 
1488 out:
1489 	vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1490 	return ret;
1491 }
1492 
1493 void
vchiq_on_remote_use(struct vchiq_state * state)1494 vchiq_on_remote_use(struct vchiq_state *state)
1495 {
1496 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1497 
1498 	atomic_inc(&arm_state->ka_use_count);
1499 	complete(&arm_state->ka_evt);
1500 }
1501 
1502 void
vchiq_on_remote_release(struct vchiq_state * state)1503 vchiq_on_remote_release(struct vchiq_state *state)
1504 {
1505 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1506 
1507 	atomic_inc(&arm_state->ka_release_count);
1508 	complete(&arm_state->ka_evt);
1509 }
1510 
1511 int
vchiq_use_service_internal(struct vchiq_service * service)1512 vchiq_use_service_internal(struct vchiq_service *service)
1513 {
1514 	return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1515 }
1516 
1517 int
vchiq_release_service_internal(struct vchiq_service * service)1518 vchiq_release_service_internal(struct vchiq_service *service)
1519 {
1520 	return vchiq_release_internal(service->state, service);
1521 }
1522 
1523 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(struct vchiq_instance * instance)1524 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1525 {
1526 	return &instance->debugfs_node;
1527 }
1528 
1529 int
vchiq_instance_get_use_count(struct vchiq_instance * instance)1530 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1531 {
1532 	struct vchiq_service *service;
1533 	int use_count = 0, i;
1534 
1535 	i = 0;
1536 	rcu_read_lock();
1537 	while ((service = __next_service_by_instance(instance->state,
1538 						     instance, &i)))
1539 		use_count += service->service_use_count;
1540 	rcu_read_unlock();
1541 	return use_count;
1542 }
1543 
1544 int
vchiq_instance_get_pid(struct vchiq_instance * instance)1545 vchiq_instance_get_pid(struct vchiq_instance *instance)
1546 {
1547 	return instance->pid;
1548 }
1549 
1550 int
vchiq_instance_get_trace(struct vchiq_instance * instance)1551 vchiq_instance_get_trace(struct vchiq_instance *instance)
1552 {
1553 	return instance->trace;
1554 }
1555 
1556 void
vchiq_instance_set_trace(struct vchiq_instance * instance,int trace)1557 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1558 {
1559 	struct vchiq_service *service;
1560 	int i;
1561 
1562 	i = 0;
1563 	rcu_read_lock();
1564 	while ((service = __next_service_by_instance(instance->state,
1565 						     instance, &i)))
1566 		service->trace = trace;
1567 	rcu_read_unlock();
1568 	instance->trace = (trace != 0);
1569 }
1570 
1571 enum vchiq_status
vchiq_use_service(unsigned int handle)1572 vchiq_use_service(unsigned int handle)
1573 {
1574 	enum vchiq_status ret = VCHIQ_ERROR;
1575 	struct vchiq_service *service = find_service_by_handle(handle);
1576 
1577 	if (service) {
1578 		ret = vchiq_use_internal(service->state, service,
1579 				USE_TYPE_SERVICE);
1580 		vchiq_service_put(service);
1581 	}
1582 	return ret;
1583 }
1584 EXPORT_SYMBOL(vchiq_use_service);
1585 
1586 enum vchiq_status
vchiq_release_service(unsigned int handle)1587 vchiq_release_service(unsigned int handle)
1588 {
1589 	enum vchiq_status ret = VCHIQ_ERROR;
1590 	struct vchiq_service *service = find_service_by_handle(handle);
1591 
1592 	if (service) {
1593 		ret = vchiq_release_internal(service->state, service);
1594 		vchiq_service_put(service);
1595 	}
1596 	return ret;
1597 }
1598 EXPORT_SYMBOL(vchiq_release_service);
1599 
1600 struct service_data_struct {
1601 	int fourcc;
1602 	int clientid;
1603 	int use_count;
1604 };
1605 
1606 void
vchiq_dump_service_use_state(struct vchiq_state * state)1607 vchiq_dump_service_use_state(struct vchiq_state *state)
1608 {
1609 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1610 	struct service_data_struct *service_data;
1611 	int i, found = 0;
1612 	/*
1613 	 * If there's more than 64 services, only dump ones with
1614 	 * non-zero counts
1615 	 */
1616 	int only_nonzero = 0;
1617 	static const char *nz = "<-- preventing suspend";
1618 
1619 	int peer_count;
1620 	int vc_use_count;
1621 	int active_services;
1622 
1623 	if (!arm_state)
1624 		return;
1625 
1626 	service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1627 				     GFP_KERNEL);
1628 	if (!service_data)
1629 		return;
1630 
1631 	read_lock_bh(&arm_state->susp_res_lock);
1632 	peer_count = arm_state->peer_use_count;
1633 	vc_use_count = arm_state->videocore_use_count;
1634 	active_services = state->unused_service;
1635 	if (active_services > MAX_SERVICES)
1636 		only_nonzero = 1;
1637 
1638 	rcu_read_lock();
1639 	for (i = 0; i < active_services; i++) {
1640 		struct vchiq_service *service_ptr =
1641 			rcu_dereference(state->services[i]);
1642 
1643 		if (!service_ptr)
1644 			continue;
1645 
1646 		if (only_nonzero && !service_ptr->service_use_count)
1647 			continue;
1648 
1649 		if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1650 			continue;
1651 
1652 		service_data[found].fourcc = service_ptr->base.fourcc;
1653 		service_data[found].clientid = service_ptr->client_id;
1654 		service_data[found].use_count = service_ptr->service_use_count;
1655 		found++;
1656 		if (found >= MAX_SERVICES)
1657 			break;
1658 	}
1659 	rcu_read_unlock();
1660 
1661 	read_unlock_bh(&arm_state->susp_res_lock);
1662 
1663 	if (only_nonzero)
1664 		vchiq_log_warning(vchiq_susp_log_level, "Too many active "
1665 			"services (%d).  Only dumping up to first %d services "
1666 			"with non-zero use-count", active_services, found);
1667 
1668 	for (i = 0; i < found; i++) {
1669 		vchiq_log_warning(vchiq_susp_log_level,
1670 			"----- %c%c%c%c:%d service count %d %s",
1671 			VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1672 			service_data[i].clientid,
1673 			service_data[i].use_count,
1674 			service_data[i].use_count ? nz : "");
1675 	}
1676 	vchiq_log_warning(vchiq_susp_log_level,
1677 		"----- VCHIQ use count count %d", peer_count);
1678 	vchiq_log_warning(vchiq_susp_log_level,
1679 		"--- Overall vchiq instance use count %d", vc_use_count);
1680 
1681 	kfree(service_data);
1682 }
1683 
1684 enum vchiq_status
vchiq_check_service(struct vchiq_service * service)1685 vchiq_check_service(struct vchiq_service *service)
1686 {
1687 	struct vchiq_arm_state *arm_state;
1688 	enum vchiq_status ret = VCHIQ_ERROR;
1689 
1690 	if (!service || !service->state)
1691 		goto out;
1692 
1693 	arm_state = vchiq_platform_get_arm_state(service->state);
1694 
1695 	read_lock_bh(&arm_state->susp_res_lock);
1696 	if (service->service_use_count)
1697 		ret = VCHIQ_SUCCESS;
1698 	read_unlock_bh(&arm_state->susp_res_lock);
1699 
1700 	if (ret == VCHIQ_ERROR) {
1701 		vchiq_log_error(vchiq_susp_log_level,
1702 			"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1703 			VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1704 			service->client_id, service->service_use_count,
1705 			arm_state->videocore_use_count);
1706 		vchiq_dump_service_use_state(service->state);
1707 	}
1708 out:
1709 	return ret;
1710 }
1711 
vchiq_platform_conn_state_changed(struct vchiq_state * state,enum vchiq_connstate oldstate,enum vchiq_connstate newstate)1712 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1713 				       enum vchiq_connstate oldstate,
1714 				       enum vchiq_connstate newstate)
1715 {
1716 	struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1717 	char threadname[16];
1718 
1719 	vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1720 		get_conn_state_name(oldstate), get_conn_state_name(newstate));
1721 	if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1722 		return;
1723 
1724 	write_lock_bh(&arm_state->susp_res_lock);
1725 	if (arm_state->first_connect) {
1726 		write_unlock_bh(&arm_state->susp_res_lock);
1727 		return;
1728 	}
1729 
1730 	arm_state->first_connect = 1;
1731 	write_unlock_bh(&arm_state->susp_res_lock);
1732 	snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1733 		 state->id);
1734 	arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1735 					      (void *)state,
1736 					      threadname);
1737 	if (IS_ERR(arm_state->ka_thread)) {
1738 		vchiq_log_error(vchiq_susp_log_level,
1739 				"vchiq: FATAL: couldn't create thread %s",
1740 				threadname);
1741 	} else {
1742 		wake_up_process(arm_state->ka_thread);
1743 	}
1744 }
1745 
1746 static const struct of_device_id vchiq_of_match[] = {
1747 	{ .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1748 	{ .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1749 	{},
1750 };
1751 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1752 
1753 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)1754 vchiq_register_child(struct platform_device *pdev, const char *name)
1755 {
1756 	struct platform_device_info pdevinfo;
1757 	struct platform_device *child;
1758 
1759 	memset(&pdevinfo, 0, sizeof(pdevinfo));
1760 
1761 	pdevinfo.parent = &pdev->dev;
1762 	pdevinfo.name = name;
1763 	pdevinfo.id = PLATFORM_DEVID_NONE;
1764 	pdevinfo.dma_mask = DMA_BIT_MASK(32);
1765 
1766 	child = platform_device_register_full(&pdevinfo);
1767 	if (IS_ERR(child)) {
1768 		dev_warn(&pdev->dev, "%s not registered\n", name);
1769 		child = NULL;
1770 	}
1771 
1772 	return child;
1773 }
1774 
vchiq_probe(struct platform_device * pdev)1775 static int vchiq_probe(struct platform_device *pdev)
1776 {
1777 	struct device_node *fw_node;
1778 	const struct of_device_id *of_id;
1779 	struct vchiq_drvdata *drvdata;
1780 	int err;
1781 
1782 	of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1783 	drvdata = (struct vchiq_drvdata *)of_id->data;
1784 	if (!drvdata)
1785 		return -EINVAL;
1786 
1787 	fw_node = of_find_compatible_node(NULL, NULL,
1788 					  "raspberrypi,bcm2835-firmware");
1789 	if (!fw_node) {
1790 		dev_err(&pdev->dev, "Missing firmware node\n");
1791 		return -ENOENT;
1792 	}
1793 
1794 	drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1795 	of_node_put(fw_node);
1796 	if (!drvdata->fw)
1797 		return -EPROBE_DEFER;
1798 
1799 	platform_set_drvdata(pdev, drvdata);
1800 
1801 	err = vchiq_platform_init(pdev, &g_state);
1802 	if (err)
1803 		goto failed_platform_init;
1804 
1805 	vchiq_debugfs_init();
1806 
1807 	vchiq_log_info(vchiq_arm_log_level,
1808 		       "vchiq: platform initialised - version %d (min %d)",
1809 		       VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1810 
1811 	/*
1812 	 * Simply exit on error since the function handles cleanup in
1813 	 * cases of failure.
1814 	 */
1815 	err = vchiq_register_chrdev(&pdev->dev);
1816 	if (err) {
1817 		vchiq_log_warning(vchiq_arm_log_level,
1818 				  "Failed to initialize vchiq cdev");
1819 		goto error_exit;
1820 	}
1821 
1822 	bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1823 	bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1824 
1825 	return 0;
1826 
1827 failed_platform_init:
1828 	vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1829 error_exit:
1830 	return err;
1831 }
1832 
vchiq_remove(struct platform_device * pdev)1833 static int vchiq_remove(struct platform_device *pdev)
1834 {
1835 	platform_device_unregister(bcm2835_audio);
1836 	platform_device_unregister(bcm2835_camera);
1837 	vchiq_debugfs_deinit();
1838 	vchiq_deregister_chrdev();
1839 
1840 	return 0;
1841 }
1842 
1843 static struct platform_driver vchiq_driver = {
1844 	.driver = {
1845 		.name = "bcm2835_vchiq",
1846 		.of_match_table = vchiq_of_match,
1847 	},
1848 	.probe = vchiq_probe,
1849 	.remove = vchiq_remove,
1850 };
1851 
vchiq_driver_init(void)1852 static int __init vchiq_driver_init(void)
1853 {
1854 	int ret;
1855 
1856 	ret = platform_driver_register(&vchiq_driver);
1857 	if (ret)
1858 		pr_err("Failed to register vchiq driver\n");
1859 
1860 	return ret;
1861 }
1862 module_init(vchiq_driver_init);
1863 
vchiq_driver_exit(void)1864 static void __exit vchiq_driver_exit(void)
1865 {
1866 	platform_driver_unregister(&vchiq_driver);
1867 }
1868 module_exit(vchiq_driver_exit);
1869 
1870 MODULE_LICENSE("Dual BSD/GPL");
1871 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1872 MODULE_AUTHOR("Broadcom Corporation");
1873