1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <soc/bcm2835/raspberrypi-firmware.h>
26
27 #include "vchiq_core.h"
28 #include "vchiq_ioctl.h"
29 #include "vchiq_arm.h"
30 #include "vchiq_debugfs.h"
31
32 #define DEVICE_NAME "vchiq"
33
34 /* Override the default prefix, which would be vchiq_arm (from the filename) */
35 #undef MODULE_PARAM_PREFIX
36 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
37
38 /* Some per-instance constants */
39 #define MAX_COMPLETIONS 128
40 #define MAX_SERVICES 64
41 #define MAX_ELEMENTS 8
42 #define MSG_QUEUE_SIZE 128
43
44 #define KEEPALIVE_VER 1
45 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
46
47 /* Run time control of log level, based on KERN_XXX level. */
48 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
49 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
50
51 #define SUSPEND_TIMER_TIMEOUT_MS 100
52 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
53
54 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
55 static const char *const suspend_state_names[] = {
56 "VC_SUSPEND_FORCE_CANCELED",
57 "VC_SUSPEND_REJECTED",
58 "VC_SUSPEND_FAILED",
59 "VC_SUSPEND_IDLE",
60 "VC_SUSPEND_REQUESTED",
61 "VC_SUSPEND_IN_PROGRESS",
62 "VC_SUSPEND_SUSPENDED"
63 };
64 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
65 static const char *const resume_state_names[] = {
66 "VC_RESUME_FAILED",
67 "VC_RESUME_IDLE",
68 "VC_RESUME_REQUESTED",
69 "VC_RESUME_IN_PROGRESS",
70 "VC_RESUME_RESUMED"
71 };
72 /* The number of times we allow force suspend to timeout before actually
73 ** _forcing_ suspend. This is to cater for SW which fails to release vchiq
74 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
75 */
76 #define FORCE_SUSPEND_FAIL_MAX 8
77
78 /* The time in ms allowed for videocore to go idle when force suspend has been
79 * requested */
80 #define FORCE_SUSPEND_TIMEOUT_MS 200
81
82 static void suspend_timer_callback(struct timer_list *t);
83
84 struct user_service {
85 struct vchiq_service *service;
86 void *userdata;
87 VCHIQ_INSTANCE_T instance;
88 char is_vchi;
89 char dequeue_pending;
90 char close_pending;
91 int message_available_pos;
92 int msg_insert;
93 int msg_remove;
94 struct completion insert_event;
95 struct completion remove_event;
96 struct completion close_event;
97 struct vchiq_header *msg_queue[MSG_QUEUE_SIZE];
98 };
99
100 struct bulk_waiter_node {
101 struct bulk_waiter bulk_waiter;
102 int pid;
103 struct list_head list;
104 };
105
106 struct vchiq_instance_struct {
107 struct vchiq_state *state;
108 struct vchiq_completion_data completions[MAX_COMPLETIONS];
109 int completion_insert;
110 int completion_remove;
111 struct completion insert_event;
112 struct completion remove_event;
113 struct mutex completion_mutex;
114
115 int connected;
116 int closing;
117 int pid;
118 int mark;
119 int use_close_delivered;
120 int trace;
121
122 struct list_head bulk_waiter_list;
123 struct mutex bulk_waiter_list_mutex;
124
125 struct vchiq_debugfs_node debugfs_node;
126 };
127
128 struct dump_context {
129 char __user *buf;
130 size_t actual;
131 size_t space;
132 loff_t offset;
133 };
134
135 static struct cdev vchiq_cdev;
136 static dev_t vchiq_devid;
137 static struct vchiq_state g_state;
138 static struct class *vchiq_class;
139 static DEFINE_SPINLOCK(msg_queue_spinlock);
140 static struct platform_device *bcm2835_camera;
141 static struct platform_device *bcm2835_audio;
142
143 static struct vchiq_drvdata bcm2835_drvdata = {
144 .cache_line_size = 32,
145 };
146
147 static struct vchiq_drvdata bcm2836_drvdata = {
148 .cache_line_size = 64,
149 };
150
151 static const char *const ioctl_names[] = {
152 "CONNECT",
153 "SHUTDOWN",
154 "CREATE_SERVICE",
155 "REMOVE_SERVICE",
156 "QUEUE_MESSAGE",
157 "QUEUE_BULK_TRANSMIT",
158 "QUEUE_BULK_RECEIVE",
159 "AWAIT_COMPLETION",
160 "DEQUEUE_MESSAGE",
161 "GET_CLIENT_ID",
162 "GET_CONFIG",
163 "CLOSE_SERVICE",
164 "USE_SERVICE",
165 "RELEASE_SERVICE",
166 "SET_SERVICE_OPTION",
167 "DUMP_PHYS_MEM",
168 "LIB_VERSION",
169 "CLOSE_DELIVERED"
170 };
171
172 vchiq_static_assert(ARRAY_SIZE(ioctl_names) ==
173 (VCHIQ_IOC_MAX + 1));
174
175 static VCHIQ_STATUS_T
176 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
177 unsigned int size, VCHIQ_BULK_DIR_T dir);
178
179 #define VCHIQ_INIT_RETRIES 10
vchiq_initialise(VCHIQ_INSTANCE_T * instance_out)180 VCHIQ_STATUS_T vchiq_initialise(VCHIQ_INSTANCE_T *instance_out)
181 {
182 VCHIQ_STATUS_T status = VCHIQ_ERROR;
183 struct vchiq_state *state;
184 VCHIQ_INSTANCE_T instance = NULL;
185 int i;
186
187 vchiq_log_trace(vchiq_core_log_level, "%s called", __func__);
188
189 /* VideoCore may not be ready due to boot up timing.
190 * It may never be ready if kernel and firmware are mismatched,so don't
191 * block forever.
192 */
193 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
194 state = vchiq_get_state();
195 if (state)
196 break;
197 usleep_range(500, 600);
198 }
199 if (i == VCHIQ_INIT_RETRIES) {
200 vchiq_log_error(vchiq_core_log_level,
201 "%s: videocore not initialized\n", __func__);
202 goto failed;
203 } else if (i > 0) {
204 vchiq_log_warning(vchiq_core_log_level,
205 "%s: videocore initialized after %d retries\n",
206 __func__, i);
207 }
208
209 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
210 if (!instance) {
211 vchiq_log_error(vchiq_core_log_level,
212 "%s: error allocating vchiq instance\n", __func__);
213 goto failed;
214 }
215
216 instance->connected = 0;
217 instance->state = state;
218 mutex_init(&instance->bulk_waiter_list_mutex);
219 INIT_LIST_HEAD(&instance->bulk_waiter_list);
220
221 *instance_out = instance;
222
223 status = VCHIQ_SUCCESS;
224
225 failed:
226 vchiq_log_trace(vchiq_core_log_level,
227 "%s(%p): returning %d", __func__, instance, status);
228
229 return status;
230 }
231 EXPORT_SYMBOL(vchiq_initialise);
232
vchiq_shutdown(VCHIQ_INSTANCE_T instance)233 VCHIQ_STATUS_T vchiq_shutdown(VCHIQ_INSTANCE_T instance)
234 {
235 VCHIQ_STATUS_T status;
236 struct vchiq_state *state = instance->state;
237
238 vchiq_log_trace(vchiq_core_log_level,
239 "%s(%p) called", __func__, instance);
240
241 if (mutex_lock_killable(&state->mutex))
242 return VCHIQ_RETRY;
243
244 /* Remove all services */
245 status = vchiq_shutdown_internal(state, instance);
246
247 mutex_unlock(&state->mutex);
248
249 vchiq_log_trace(vchiq_core_log_level,
250 "%s(%p): returning %d", __func__, instance, status);
251
252 if (status == VCHIQ_SUCCESS) {
253 struct bulk_waiter_node *waiter, *next;
254
255 list_for_each_entry_safe(waiter, next,
256 &instance->bulk_waiter_list, list) {
257 list_del(&waiter->list);
258 vchiq_log_info(vchiq_arm_log_level,
259 "bulk_waiter - cleaned up %pK for pid %d",
260 waiter, waiter->pid);
261 kfree(waiter);
262 }
263 kfree(instance);
264 }
265
266 return status;
267 }
268 EXPORT_SYMBOL(vchiq_shutdown);
269
vchiq_is_connected(VCHIQ_INSTANCE_T instance)270 static int vchiq_is_connected(VCHIQ_INSTANCE_T instance)
271 {
272 return instance->connected;
273 }
274
vchiq_connect(VCHIQ_INSTANCE_T instance)275 VCHIQ_STATUS_T vchiq_connect(VCHIQ_INSTANCE_T instance)
276 {
277 VCHIQ_STATUS_T status;
278 struct vchiq_state *state = instance->state;
279
280 vchiq_log_trace(vchiq_core_log_level,
281 "%s(%p) called", __func__, instance);
282
283 if (mutex_lock_killable(&state->mutex)) {
284 vchiq_log_trace(vchiq_core_log_level,
285 "%s: call to mutex_lock failed", __func__);
286 status = VCHIQ_RETRY;
287 goto failed;
288 }
289 status = vchiq_connect_internal(state, instance);
290
291 if (status == VCHIQ_SUCCESS)
292 instance->connected = 1;
293
294 mutex_unlock(&state->mutex);
295
296 failed:
297 vchiq_log_trace(vchiq_core_log_level,
298 "%s(%p): returning %d", __func__, instance, status);
299
300 return status;
301 }
302 EXPORT_SYMBOL(vchiq_connect);
303
vchiq_add_service(VCHIQ_INSTANCE_T instance,const struct vchiq_service_params * params,VCHIQ_SERVICE_HANDLE_T * phandle)304 VCHIQ_STATUS_T vchiq_add_service(
305 VCHIQ_INSTANCE_T instance,
306 const struct vchiq_service_params *params,
307 VCHIQ_SERVICE_HANDLE_T *phandle)
308 {
309 VCHIQ_STATUS_T status;
310 struct vchiq_state *state = instance->state;
311 struct vchiq_service *service = NULL;
312 int srvstate;
313
314 vchiq_log_trace(vchiq_core_log_level,
315 "%s(%p) called", __func__, instance);
316
317 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
318
319 srvstate = vchiq_is_connected(instance)
320 ? VCHIQ_SRVSTATE_LISTENING
321 : VCHIQ_SRVSTATE_HIDDEN;
322
323 service = vchiq_add_service_internal(
324 state,
325 params,
326 srvstate,
327 instance,
328 NULL);
329
330 if (service) {
331 *phandle = service->handle;
332 status = VCHIQ_SUCCESS;
333 } else
334 status = VCHIQ_ERROR;
335
336 vchiq_log_trace(vchiq_core_log_level,
337 "%s(%p): returning %d", __func__, instance, status);
338
339 return status;
340 }
341 EXPORT_SYMBOL(vchiq_add_service);
342
vchiq_open_service(VCHIQ_INSTANCE_T instance,const struct vchiq_service_params * params,VCHIQ_SERVICE_HANDLE_T * phandle)343 VCHIQ_STATUS_T vchiq_open_service(
344 VCHIQ_INSTANCE_T instance,
345 const struct vchiq_service_params *params,
346 VCHIQ_SERVICE_HANDLE_T *phandle)
347 {
348 VCHIQ_STATUS_T status = VCHIQ_ERROR;
349 struct vchiq_state *state = instance->state;
350 struct vchiq_service *service = NULL;
351
352 vchiq_log_trace(vchiq_core_log_level,
353 "%s(%p) called", __func__, instance);
354
355 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
356
357 if (!vchiq_is_connected(instance))
358 goto failed;
359
360 service = vchiq_add_service_internal(state,
361 params,
362 VCHIQ_SRVSTATE_OPENING,
363 instance,
364 NULL);
365
366 if (service) {
367 *phandle = service->handle;
368 status = vchiq_open_service_internal(service, current->pid);
369 if (status != VCHIQ_SUCCESS) {
370 vchiq_remove_service(service->handle);
371 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
372 }
373 }
374
375 failed:
376 vchiq_log_trace(vchiq_core_log_level,
377 "%s(%p): returning %d", __func__, instance, status);
378
379 return status;
380 }
381 EXPORT_SYMBOL(vchiq_open_service);
382
383 VCHIQ_STATUS_T
vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle,const void * data,unsigned int size,void * userdata,VCHIQ_BULK_MODE_T mode)384 vchiq_bulk_transmit(VCHIQ_SERVICE_HANDLE_T handle, const void *data,
385 unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
386 {
387 VCHIQ_STATUS_T status;
388
389 switch (mode) {
390 case VCHIQ_BULK_MODE_NOCALLBACK:
391 case VCHIQ_BULK_MODE_CALLBACK:
392 status = vchiq_bulk_transfer(handle, (void *)data, size,
393 userdata, mode,
394 VCHIQ_BULK_TRANSMIT);
395 break;
396 case VCHIQ_BULK_MODE_BLOCKING:
397 status = vchiq_blocking_bulk_transfer(handle,
398 (void *)data, size, VCHIQ_BULK_TRANSMIT);
399 break;
400 default:
401 return VCHIQ_ERROR;
402 }
403
404 return status;
405 }
406 EXPORT_SYMBOL(vchiq_bulk_transmit);
407
408 VCHIQ_STATUS_T
vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle,void * data,unsigned int size,void * userdata,VCHIQ_BULK_MODE_T mode)409 vchiq_bulk_receive(VCHIQ_SERVICE_HANDLE_T handle, void *data,
410 unsigned int size, void *userdata, VCHIQ_BULK_MODE_T mode)
411 {
412 VCHIQ_STATUS_T status;
413
414 switch (mode) {
415 case VCHIQ_BULK_MODE_NOCALLBACK:
416 case VCHIQ_BULK_MODE_CALLBACK:
417 status = vchiq_bulk_transfer(handle, data, size, userdata,
418 mode, VCHIQ_BULK_RECEIVE);
419 break;
420 case VCHIQ_BULK_MODE_BLOCKING:
421 status = vchiq_blocking_bulk_transfer(handle,
422 (void *)data, size, VCHIQ_BULK_RECEIVE);
423 break;
424 default:
425 return VCHIQ_ERROR;
426 }
427
428 return status;
429 }
430 EXPORT_SYMBOL(vchiq_bulk_receive);
431
432 static VCHIQ_STATUS_T
vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle,void * data,unsigned int size,VCHIQ_BULK_DIR_T dir)433 vchiq_blocking_bulk_transfer(VCHIQ_SERVICE_HANDLE_T handle, void *data,
434 unsigned int size, VCHIQ_BULK_DIR_T dir)
435 {
436 VCHIQ_INSTANCE_T instance;
437 struct vchiq_service *service;
438 VCHIQ_STATUS_T status;
439 struct bulk_waiter_node *waiter = NULL;
440
441 service = find_service_by_handle(handle);
442 if (!service)
443 return VCHIQ_ERROR;
444
445 instance = service->instance;
446
447 unlock_service(service);
448
449 mutex_lock(&instance->bulk_waiter_list_mutex);
450 list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
451 if (waiter->pid == current->pid) {
452 list_del(&waiter->list);
453 break;
454 }
455 }
456 mutex_unlock(&instance->bulk_waiter_list_mutex);
457
458 if (waiter) {
459 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
460
461 if (bulk) {
462 /* This thread has an outstanding bulk transfer. */
463 if ((bulk->data != data) ||
464 (bulk->size != size)) {
465 /* This is not a retry of the previous one.
466 * Cancel the signal when the transfer
467 * completes.
468 */
469 spin_lock(&bulk_waiter_spinlock);
470 bulk->userdata = NULL;
471 spin_unlock(&bulk_waiter_spinlock);
472 }
473 }
474 }
475
476 if (!waiter) {
477 waiter = kzalloc(sizeof(struct bulk_waiter_node), GFP_KERNEL);
478 if (!waiter) {
479 vchiq_log_error(vchiq_core_log_level,
480 "%s - out of memory", __func__);
481 return VCHIQ_ERROR;
482 }
483 }
484
485 status = vchiq_bulk_transfer(handle, data, size, &waiter->bulk_waiter,
486 VCHIQ_BULK_MODE_BLOCKING, dir);
487 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
488 !waiter->bulk_waiter.bulk) {
489 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
490
491 if (bulk) {
492 /* Cancel the signal when the transfer
493 * completes.
494 */
495 spin_lock(&bulk_waiter_spinlock);
496 bulk->userdata = NULL;
497 spin_unlock(&bulk_waiter_spinlock);
498 }
499 kfree(waiter);
500 } else {
501 waiter->pid = current->pid;
502 mutex_lock(&instance->bulk_waiter_list_mutex);
503 list_add(&waiter->list, &instance->bulk_waiter_list);
504 mutex_unlock(&instance->bulk_waiter_list_mutex);
505 vchiq_log_info(vchiq_arm_log_level,
506 "saved bulk_waiter %pK for pid %d",
507 waiter, current->pid);
508 }
509
510 return status;
511 }
512 /****************************************************************************
513 *
514 * add_completion
515 *
516 ***************************************************************************/
517
518 static VCHIQ_STATUS_T
add_completion(VCHIQ_INSTANCE_T instance,VCHIQ_REASON_T reason,struct vchiq_header * header,struct user_service * user_service,void * bulk_userdata)519 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
520 struct vchiq_header *header, struct user_service *user_service,
521 void *bulk_userdata)
522 {
523 struct vchiq_completion_data *completion;
524 int insert;
525
526 DEBUG_INITIALISE(g_state.local)
527
528 insert = instance->completion_insert;
529 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
530 /* Out of space - wait for the client */
531 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
532 vchiq_log_trace(vchiq_arm_log_level,
533 "%s - completion queue full", __func__);
534 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
535 if (wait_for_completion_interruptible(
536 &instance->remove_event)) {
537 vchiq_log_info(vchiq_arm_log_level,
538 "service_callback interrupted");
539 return VCHIQ_RETRY;
540 } else if (instance->closing) {
541 vchiq_log_info(vchiq_arm_log_level,
542 "service_callback closing");
543 return VCHIQ_SUCCESS;
544 }
545 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
546 }
547
548 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
549
550 completion->header = header;
551 completion->reason = reason;
552 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
553 completion->service_userdata = user_service->service;
554 completion->bulk_userdata = bulk_userdata;
555
556 if (reason == VCHIQ_SERVICE_CLOSED) {
557 /* Take an extra reference, to be held until
558 this CLOSED notification is delivered. */
559 lock_service(user_service->service);
560 if (instance->use_close_delivered)
561 user_service->close_pending = 1;
562 }
563
564 /* A write barrier is needed here to ensure that the entire completion
565 record is written out before the insert point. */
566 wmb();
567
568 if (reason == VCHIQ_MESSAGE_AVAILABLE)
569 user_service->message_available_pos = insert;
570
571 insert++;
572 instance->completion_insert = insert;
573
574 complete(&instance->insert_event);
575
576 return VCHIQ_SUCCESS;
577 }
578
579 /****************************************************************************
580 *
581 * service_callback
582 *
583 ***************************************************************************/
584
585 static VCHIQ_STATUS_T
service_callback(VCHIQ_REASON_T reason,struct vchiq_header * header,VCHIQ_SERVICE_HANDLE_T handle,void * bulk_userdata)586 service_callback(VCHIQ_REASON_T reason, struct vchiq_header *header,
587 VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
588 {
589 /* How do we ensure the callback goes to the right client?
590 ** The service_user data points to a user_service record
591 ** containing the original callback and the user state structure, which
592 ** contains a circular buffer for completion records.
593 */
594 struct user_service *user_service;
595 struct vchiq_service *service;
596 VCHIQ_INSTANCE_T instance;
597 bool skip_completion = false;
598
599 DEBUG_INITIALISE(g_state.local)
600
601 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
602
603 service = handle_to_service(handle);
604 BUG_ON(!service);
605 user_service = (struct user_service *)service->base.userdata;
606 instance = user_service->instance;
607
608 if (!instance || instance->closing)
609 return VCHIQ_SUCCESS;
610
611 vchiq_log_trace(vchiq_arm_log_level,
612 "%s - service %lx(%d,%p), reason %d, header %lx, "
613 "instance %lx, bulk_userdata %lx",
614 __func__, (unsigned long)user_service,
615 service->localport, user_service->userdata,
616 reason, (unsigned long)header,
617 (unsigned long)instance, (unsigned long)bulk_userdata);
618
619 if (header && user_service->is_vchi) {
620 spin_lock(&msg_queue_spinlock);
621 while (user_service->msg_insert ==
622 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
623 spin_unlock(&msg_queue_spinlock);
624 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
625 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
626 vchiq_log_trace(vchiq_arm_log_level,
627 "service_callback - msg queue full");
628 /* If there is no MESSAGE_AVAILABLE in the completion
629 ** queue, add one
630 */
631 if ((user_service->message_available_pos -
632 instance->completion_remove) < 0) {
633 VCHIQ_STATUS_T status;
634
635 vchiq_log_info(vchiq_arm_log_level,
636 "Inserting extra MESSAGE_AVAILABLE");
637 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
638 status = add_completion(instance, reason,
639 NULL, user_service, bulk_userdata);
640 if (status != VCHIQ_SUCCESS) {
641 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
642 return status;
643 }
644 }
645
646 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
647 if (wait_for_completion_interruptible(
648 &user_service->remove_event)) {
649 vchiq_log_info(vchiq_arm_log_level,
650 "%s interrupted", __func__);
651 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
652 return VCHIQ_RETRY;
653 } else if (instance->closing) {
654 vchiq_log_info(vchiq_arm_log_level,
655 "%s closing", __func__);
656 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
657 return VCHIQ_ERROR;
658 }
659 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
660 spin_lock(&msg_queue_spinlock);
661 }
662
663 user_service->msg_queue[user_service->msg_insert &
664 (MSG_QUEUE_SIZE - 1)] = header;
665 user_service->msg_insert++;
666
667 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
668 ** there is a MESSAGE_AVAILABLE in the completion queue then
669 ** bypass the completion queue.
670 */
671 if (((user_service->message_available_pos -
672 instance->completion_remove) >= 0) ||
673 user_service->dequeue_pending) {
674 user_service->dequeue_pending = 0;
675 skip_completion = true;
676 }
677
678 spin_unlock(&msg_queue_spinlock);
679 complete(&user_service->insert_event);
680
681 header = NULL;
682 }
683 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
684
685 if (skip_completion)
686 return VCHIQ_SUCCESS;
687
688 return add_completion(instance, reason, header, user_service,
689 bulk_userdata);
690 }
691
692 /****************************************************************************
693 *
694 * user_service_free
695 *
696 ***************************************************************************/
697 static void
user_service_free(void * userdata)698 user_service_free(void *userdata)
699 {
700 kfree(userdata);
701 }
702
703 /****************************************************************************
704 *
705 * close_delivered
706 *
707 ***************************************************************************/
close_delivered(struct user_service * user_service)708 static void close_delivered(struct user_service *user_service)
709 {
710 vchiq_log_info(vchiq_arm_log_level,
711 "%s(handle=%x)",
712 __func__, user_service->service->handle);
713
714 if (user_service->close_pending) {
715 /* Allow the underlying service to be culled */
716 unlock_service(user_service->service);
717
718 /* Wake the user-thread blocked in close_ or remove_service */
719 complete(&user_service->close_event);
720
721 user_service->close_pending = 0;
722 }
723 }
724
725 struct vchiq_io_copy_callback_context {
726 struct vchiq_element *element;
727 size_t element_offset;
728 unsigned long elements_to_go;
729 };
730
vchiq_ioc_copy_element_data(void * context,void * dest,size_t offset,size_t maxsize)731 static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
732 size_t offset, size_t maxsize)
733 {
734 struct vchiq_io_copy_callback_context *cc = context;
735 size_t total_bytes_copied = 0;
736 size_t bytes_this_round;
737
738 while (total_bytes_copied < maxsize) {
739 if (!cc->elements_to_go)
740 return total_bytes_copied;
741
742 if (!cc->element->size) {
743 cc->elements_to_go--;
744 cc->element++;
745 cc->element_offset = 0;
746 continue;
747 }
748
749 bytes_this_round = min(cc->element->size - cc->element_offset,
750 maxsize - total_bytes_copied);
751
752 if (copy_from_user(dest + total_bytes_copied,
753 cc->element->data + cc->element_offset,
754 bytes_this_round))
755 return -EFAULT;
756
757 cc->element_offset += bytes_this_round;
758 total_bytes_copied += bytes_this_round;
759
760 if (cc->element_offset == cc->element->size) {
761 cc->elements_to_go--;
762 cc->element++;
763 cc->element_offset = 0;
764 }
765 }
766
767 return maxsize;
768 }
769
770 /**************************************************************************
771 *
772 * vchiq_ioc_queue_message
773 *
774 **************************************************************************/
775 static VCHIQ_STATUS_T
vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,struct vchiq_element * elements,unsigned long count)776 vchiq_ioc_queue_message(VCHIQ_SERVICE_HANDLE_T handle,
777 struct vchiq_element *elements,
778 unsigned long count)
779 {
780 struct vchiq_io_copy_callback_context context;
781 unsigned long i;
782 size_t total_size = 0;
783
784 context.element = elements;
785 context.element_offset = 0;
786 context.elements_to_go = count;
787
788 for (i = 0; i < count; i++) {
789 if (!elements[i].data && elements[i].size != 0)
790 return -EFAULT;
791
792 total_size += elements[i].size;
793 }
794
795 return vchiq_queue_message(handle, vchiq_ioc_copy_element_data,
796 &context, total_size);
797 }
798
799 /****************************************************************************
800 *
801 * vchiq_ioctl
802 *
803 ***************************************************************************/
804 static long
vchiq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)805 vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
806 {
807 VCHIQ_INSTANCE_T instance = file->private_data;
808 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
809 struct vchiq_service *service = NULL;
810 long ret = 0;
811 int i, rc;
812
813 DEBUG_INITIALISE(g_state.local)
814
815 vchiq_log_trace(vchiq_arm_log_level,
816 "%s - instance %pK, cmd %s, arg %lx",
817 __func__, instance,
818 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
819 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
820 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
821
822 switch (cmd) {
823 case VCHIQ_IOC_SHUTDOWN:
824 if (!instance->connected)
825 break;
826
827 /* Remove all services */
828 i = 0;
829 while ((service = next_service_by_instance(instance->state,
830 instance, &i)) != NULL) {
831 status = vchiq_remove_service(service->handle);
832 unlock_service(service);
833 if (status != VCHIQ_SUCCESS)
834 break;
835 }
836 service = NULL;
837
838 if (status == VCHIQ_SUCCESS) {
839 /* Wake the completion thread and ask it to exit */
840 instance->closing = 1;
841 complete(&instance->insert_event);
842 }
843
844 break;
845
846 case VCHIQ_IOC_CONNECT:
847 if (instance->connected) {
848 ret = -EINVAL;
849 break;
850 }
851 rc = mutex_lock_killable(&instance->state->mutex);
852 if (rc) {
853 vchiq_log_error(vchiq_arm_log_level,
854 "vchiq: connect: could not lock mutex for "
855 "state %d: %d",
856 instance->state->id, rc);
857 ret = -EINTR;
858 break;
859 }
860 status = vchiq_connect_internal(instance->state, instance);
861 mutex_unlock(&instance->state->mutex);
862
863 if (status == VCHIQ_SUCCESS)
864 instance->connected = 1;
865 else
866 vchiq_log_error(vchiq_arm_log_level,
867 "vchiq: could not connect: %d", status);
868 break;
869
870 case VCHIQ_IOC_CREATE_SERVICE: {
871 struct vchiq_create_service args;
872 struct user_service *user_service = NULL;
873 void *userdata;
874 int srvstate;
875
876 if (copy_from_user(&args, (const void __user *)arg,
877 sizeof(args))) {
878 ret = -EFAULT;
879 break;
880 }
881
882 user_service = kmalloc(sizeof(*user_service), GFP_KERNEL);
883 if (!user_service) {
884 ret = -ENOMEM;
885 break;
886 }
887
888 if (args.is_open) {
889 if (!instance->connected) {
890 ret = -ENOTCONN;
891 kfree(user_service);
892 break;
893 }
894 srvstate = VCHIQ_SRVSTATE_OPENING;
895 } else {
896 srvstate =
897 instance->connected ?
898 VCHIQ_SRVSTATE_LISTENING :
899 VCHIQ_SRVSTATE_HIDDEN;
900 }
901
902 userdata = args.params.userdata;
903 args.params.callback = service_callback;
904 args.params.userdata = user_service;
905 service = vchiq_add_service_internal(
906 instance->state,
907 &args.params, srvstate,
908 instance, user_service_free);
909
910 if (service != NULL) {
911 user_service->service = service;
912 user_service->userdata = userdata;
913 user_service->instance = instance;
914 user_service->is_vchi = (args.is_vchi != 0);
915 user_service->dequeue_pending = 0;
916 user_service->close_pending = 0;
917 user_service->message_available_pos =
918 instance->completion_remove - 1;
919 user_service->msg_insert = 0;
920 user_service->msg_remove = 0;
921 init_completion(&user_service->insert_event);
922 init_completion(&user_service->remove_event);
923 init_completion(&user_service->close_event);
924
925 if (args.is_open) {
926 status = vchiq_open_service_internal
927 (service, instance->pid);
928 if (status != VCHIQ_SUCCESS) {
929 vchiq_remove_service(service->handle);
930 service = NULL;
931 ret = (status == VCHIQ_RETRY) ?
932 -EINTR : -EIO;
933 break;
934 }
935 }
936
937 if (copy_to_user((void __user *)
938 &(((struct vchiq_create_service __user *)
939 arg)->handle),
940 (const void *)&service->handle,
941 sizeof(service->handle))) {
942 ret = -EFAULT;
943 vchiq_remove_service(service->handle);
944 }
945
946 service = NULL;
947 } else {
948 ret = -EEXIST;
949 kfree(user_service);
950 }
951 } break;
952
953 case VCHIQ_IOC_CLOSE_SERVICE:
954 case VCHIQ_IOC_REMOVE_SERVICE: {
955 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
956 struct user_service *user_service;
957
958 service = find_service_for_instance(instance, handle);
959 if (!service) {
960 ret = -EINVAL;
961 break;
962 }
963
964 user_service = service->base.userdata;
965
966 /* close_pending is false on first entry, and when the
967 wait in vchiq_close_service has been interrupted. */
968 if (!user_service->close_pending) {
969 status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
970 vchiq_close_service(service->handle) :
971 vchiq_remove_service(service->handle);
972 if (status != VCHIQ_SUCCESS)
973 break;
974 }
975
976 /* close_pending is true once the underlying service
977 has been closed until the client library calls the
978 CLOSE_DELIVERED ioctl, signalling close_event. */
979 if (user_service->close_pending &&
980 wait_for_completion_interruptible(
981 &user_service->close_event))
982 status = VCHIQ_RETRY;
983 break;
984 }
985
986 case VCHIQ_IOC_USE_SERVICE:
987 case VCHIQ_IOC_RELEASE_SERVICE: {
988 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
989
990 service = find_service_for_instance(instance, handle);
991 if (service != NULL) {
992 status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
993 vchiq_use_service_internal(service) :
994 vchiq_release_service_internal(service);
995 if (status != VCHIQ_SUCCESS) {
996 vchiq_log_error(vchiq_susp_log_level,
997 "%s: cmd %s returned error %d for "
998 "service %c%c%c%c:%03d",
999 __func__,
1000 (cmd == VCHIQ_IOC_USE_SERVICE) ?
1001 "VCHIQ_IOC_USE_SERVICE" :
1002 "VCHIQ_IOC_RELEASE_SERVICE",
1003 status,
1004 VCHIQ_FOURCC_AS_4CHARS(
1005 service->base.fourcc),
1006 service->client_id);
1007 ret = -EINVAL;
1008 }
1009 } else
1010 ret = -EINVAL;
1011 } break;
1012
1013 case VCHIQ_IOC_QUEUE_MESSAGE: {
1014 struct vchiq_queue_message args;
1015
1016 if (copy_from_user(&args, (const void __user *)arg,
1017 sizeof(args))) {
1018 ret = -EFAULT;
1019 break;
1020 }
1021
1022 service = find_service_for_instance(instance, args.handle);
1023
1024 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
1025 /* Copy elements into kernel space */
1026 struct vchiq_element elements[MAX_ELEMENTS];
1027
1028 if (copy_from_user(elements, args.elements,
1029 args.count * sizeof(struct vchiq_element)) == 0)
1030 status = vchiq_ioc_queue_message
1031 (args.handle,
1032 elements, args.count);
1033 else
1034 ret = -EFAULT;
1035 } else {
1036 ret = -EINVAL;
1037 }
1038 } break;
1039
1040 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
1041 case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
1042 struct vchiq_queue_bulk_transfer args;
1043 struct bulk_waiter_node *waiter = NULL;
1044
1045 VCHIQ_BULK_DIR_T dir =
1046 (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
1047 VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
1048
1049 if (copy_from_user(&args, (const void __user *)arg,
1050 sizeof(args))) {
1051 ret = -EFAULT;
1052 break;
1053 }
1054
1055 service = find_service_for_instance(instance, args.handle);
1056 if (!service) {
1057 ret = -EINVAL;
1058 break;
1059 }
1060
1061 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
1062 waiter = kzalloc(sizeof(struct bulk_waiter_node),
1063 GFP_KERNEL);
1064 if (!waiter) {
1065 ret = -ENOMEM;
1066 break;
1067 }
1068
1069 args.userdata = &waiter->bulk_waiter;
1070 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
1071 mutex_lock(&instance->bulk_waiter_list_mutex);
1072 list_for_each_entry(waiter, &instance->bulk_waiter_list,
1073 list) {
1074 if (waiter->pid == current->pid) {
1075 list_del(&waiter->list);
1076 break;
1077 }
1078 }
1079 mutex_unlock(&instance->bulk_waiter_list_mutex);
1080 if (!waiter) {
1081 vchiq_log_error(vchiq_arm_log_level,
1082 "no bulk_waiter found for pid %d",
1083 current->pid);
1084 ret = -ESRCH;
1085 break;
1086 }
1087 vchiq_log_info(vchiq_arm_log_level,
1088 "found bulk_waiter %pK for pid %d", waiter,
1089 current->pid);
1090 args.userdata = &waiter->bulk_waiter;
1091 }
1092
1093 status = vchiq_bulk_transfer(args.handle, args.data, args.size,
1094 args.userdata, args.mode, dir);
1095
1096 if (!waiter)
1097 break;
1098
1099 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
1100 !waiter->bulk_waiter.bulk) {
1101 if (waiter->bulk_waiter.bulk) {
1102 /* Cancel the signal when the transfer
1103 ** completes. */
1104 spin_lock(&bulk_waiter_spinlock);
1105 waiter->bulk_waiter.bulk->userdata = NULL;
1106 spin_unlock(&bulk_waiter_spinlock);
1107 }
1108 kfree(waiter);
1109 } else {
1110 const VCHIQ_BULK_MODE_T mode_waiting =
1111 VCHIQ_BULK_MODE_WAITING;
1112 waiter->pid = current->pid;
1113 mutex_lock(&instance->bulk_waiter_list_mutex);
1114 list_add(&waiter->list, &instance->bulk_waiter_list);
1115 mutex_unlock(&instance->bulk_waiter_list_mutex);
1116 vchiq_log_info(vchiq_arm_log_level,
1117 "saved bulk_waiter %pK for pid %d",
1118 waiter, current->pid);
1119
1120 if (copy_to_user((void __user *)
1121 &(((struct vchiq_queue_bulk_transfer __user *)
1122 arg)->mode),
1123 (const void *)&mode_waiting,
1124 sizeof(mode_waiting)))
1125 ret = -EFAULT;
1126 }
1127 } break;
1128
1129 case VCHIQ_IOC_AWAIT_COMPLETION: {
1130 struct vchiq_await_completion args;
1131
1132 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1133 if (!instance->connected) {
1134 ret = -ENOTCONN;
1135 break;
1136 }
1137
1138 if (copy_from_user(&args, (const void __user *)arg,
1139 sizeof(args))) {
1140 ret = -EFAULT;
1141 break;
1142 }
1143
1144 mutex_lock(&instance->completion_mutex);
1145
1146 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1147 while ((instance->completion_remove ==
1148 instance->completion_insert)
1149 && !instance->closing) {
1150 int rc;
1151
1152 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1153 mutex_unlock(&instance->completion_mutex);
1154 rc = wait_for_completion_interruptible(
1155 &instance->insert_event);
1156 mutex_lock(&instance->completion_mutex);
1157 if (rc) {
1158 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1159 vchiq_log_info(vchiq_arm_log_level,
1160 "AWAIT_COMPLETION interrupted");
1161 ret = -EINTR;
1162 break;
1163 }
1164 }
1165 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1166
1167 if (ret == 0) {
1168 int msgbufcount = args.msgbufcount;
1169 int remove = instance->completion_remove;
1170
1171 for (ret = 0; ret < args.count; ret++) {
1172 struct vchiq_completion_data *completion;
1173 struct vchiq_service *service;
1174 struct user_service *user_service;
1175 struct vchiq_header *header;
1176
1177 if (remove == instance->completion_insert)
1178 break;
1179
1180 completion = &instance->completions[
1181 remove & (MAX_COMPLETIONS - 1)];
1182
1183 /*
1184 * A read memory barrier is needed to stop
1185 * prefetch of a stale completion record
1186 */
1187 rmb();
1188
1189 service = completion->service_userdata;
1190 user_service = service->base.userdata;
1191 completion->service_userdata =
1192 user_service->userdata;
1193
1194 header = completion->header;
1195 if (header) {
1196 void __user *msgbuf;
1197 int msglen;
1198
1199 msglen = header->size +
1200 sizeof(struct vchiq_header);
1201 /* This must be a VCHIQ-style service */
1202 if (args.msgbufsize < msglen) {
1203 vchiq_log_error(
1204 vchiq_arm_log_level,
1205 "header %pK: msgbufsize %x < msglen %x",
1206 header, args.msgbufsize,
1207 msglen);
1208 WARN(1, "invalid message "
1209 "size\n");
1210 if (ret == 0)
1211 ret = -EMSGSIZE;
1212 break;
1213 }
1214 if (msgbufcount <= 0)
1215 /* Stall here for lack of a
1216 ** buffer for the message. */
1217 break;
1218 /* Get the pointer from user space */
1219 msgbufcount--;
1220 if (copy_from_user(&msgbuf,
1221 (const void __user *)
1222 &args.msgbufs[msgbufcount],
1223 sizeof(msgbuf))) {
1224 if (ret == 0)
1225 ret = -EFAULT;
1226 break;
1227 }
1228
1229 /* Copy the message to user space */
1230 if (copy_to_user(msgbuf, header,
1231 msglen)) {
1232 if (ret == 0)
1233 ret = -EFAULT;
1234 break;
1235 }
1236
1237 /* Now it has been copied, the message
1238 ** can be released. */
1239 vchiq_release_message(service->handle,
1240 header);
1241
1242 /* The completion must point to the
1243 ** msgbuf. */
1244 completion->header = msgbuf;
1245 }
1246
1247 if ((completion->reason ==
1248 VCHIQ_SERVICE_CLOSED) &&
1249 !instance->use_close_delivered)
1250 unlock_service(service);
1251
1252 if (copy_to_user((void __user *)(
1253 (size_t)args.buf + ret *
1254 sizeof(struct vchiq_completion_data)),
1255 completion,
1256 sizeof(struct vchiq_completion_data))) {
1257 if (ret == 0)
1258 ret = -EFAULT;
1259 break;
1260 }
1261
1262 /*
1263 * Ensure that the above copy has completed
1264 * before advancing the remove pointer.
1265 */
1266 mb();
1267 remove++;
1268 instance->completion_remove = remove;
1269 }
1270
1271 if (msgbufcount != args.msgbufcount) {
1272 if (copy_to_user((void __user *)
1273 &((struct vchiq_await_completion *)arg)
1274 ->msgbufcount,
1275 &msgbufcount,
1276 sizeof(msgbufcount))) {
1277 ret = -EFAULT;
1278 }
1279 }
1280 }
1281
1282 if (ret)
1283 complete(&instance->remove_event);
1284 mutex_unlock(&instance->completion_mutex);
1285 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
1286 } break;
1287
1288 case VCHIQ_IOC_DEQUEUE_MESSAGE: {
1289 struct vchiq_dequeue_message args;
1290 struct user_service *user_service;
1291 struct vchiq_header *header;
1292
1293 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1294 if (copy_from_user(&args, (const void __user *)arg,
1295 sizeof(args))) {
1296 ret = -EFAULT;
1297 break;
1298 }
1299 service = find_service_for_instance(instance, args.handle);
1300 if (!service) {
1301 ret = -EINVAL;
1302 break;
1303 }
1304 user_service = (struct user_service *)service->base.userdata;
1305 if (user_service->is_vchi == 0) {
1306 ret = -EINVAL;
1307 break;
1308 }
1309
1310 spin_lock(&msg_queue_spinlock);
1311 if (user_service->msg_remove == user_service->msg_insert) {
1312 if (!args.blocking) {
1313 spin_unlock(&msg_queue_spinlock);
1314 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1315 ret = -EWOULDBLOCK;
1316 break;
1317 }
1318 user_service->dequeue_pending = 1;
1319 do {
1320 spin_unlock(&msg_queue_spinlock);
1321 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1322 if (wait_for_completion_interruptible(
1323 &user_service->insert_event)) {
1324 vchiq_log_info(vchiq_arm_log_level,
1325 "DEQUEUE_MESSAGE interrupted");
1326 ret = -EINTR;
1327 break;
1328 }
1329 spin_lock(&msg_queue_spinlock);
1330 } while (user_service->msg_remove ==
1331 user_service->msg_insert);
1332
1333 if (ret)
1334 break;
1335 }
1336
1337 BUG_ON((int)(user_service->msg_insert -
1338 user_service->msg_remove) < 0);
1339
1340 header = user_service->msg_queue[user_service->msg_remove &
1341 (MSG_QUEUE_SIZE - 1)];
1342 user_service->msg_remove++;
1343 spin_unlock(&msg_queue_spinlock);
1344
1345 complete(&user_service->remove_event);
1346 if (header == NULL)
1347 ret = -ENOTCONN;
1348 else if (header->size <= args.bufsize) {
1349 /* Copy to user space if msgbuf is not NULL */
1350 if ((args.buf == NULL) ||
1351 (copy_to_user((void __user *)args.buf,
1352 header->data,
1353 header->size) == 0)) {
1354 ret = header->size;
1355 vchiq_release_message(
1356 service->handle,
1357 header);
1358 } else
1359 ret = -EFAULT;
1360 } else {
1361 vchiq_log_error(vchiq_arm_log_level,
1362 "header %pK: bufsize %x < size %x",
1363 header, args.bufsize, header->size);
1364 WARN(1, "invalid size\n");
1365 ret = -EMSGSIZE;
1366 }
1367 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
1368 } break;
1369
1370 case VCHIQ_IOC_GET_CLIENT_ID: {
1371 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1372
1373 ret = vchiq_get_client_id(handle);
1374 } break;
1375
1376 case VCHIQ_IOC_GET_CONFIG: {
1377 struct vchiq_get_config args;
1378 struct vchiq_config config;
1379
1380 if (copy_from_user(&args, (const void __user *)arg,
1381 sizeof(args))) {
1382 ret = -EFAULT;
1383 break;
1384 }
1385 if (args.config_size > sizeof(config)) {
1386 ret = -EINVAL;
1387 break;
1388 }
1389
1390 vchiq_get_config(&config);
1391 if (copy_to_user(args.pconfig, &config, args.config_size)) {
1392 ret = -EFAULT;
1393 break;
1394 }
1395 } break;
1396
1397 case VCHIQ_IOC_SET_SERVICE_OPTION: {
1398 struct vchiq_set_service_option args;
1399
1400 if (copy_from_user(&args, (const void __user *)arg,
1401 sizeof(args))) {
1402 ret = -EFAULT;
1403 break;
1404 }
1405
1406 service = find_service_for_instance(instance, args.handle);
1407 if (!service) {
1408 ret = -EINVAL;
1409 break;
1410 }
1411
1412 status = vchiq_set_service_option(
1413 args.handle, args.option, args.value);
1414 } break;
1415
1416 case VCHIQ_IOC_LIB_VERSION: {
1417 unsigned int lib_version = (unsigned int)arg;
1418
1419 if (lib_version < VCHIQ_VERSION_MIN)
1420 ret = -EINVAL;
1421 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
1422 instance->use_close_delivered = 1;
1423 } break;
1424
1425 case VCHIQ_IOC_CLOSE_DELIVERED: {
1426 VCHIQ_SERVICE_HANDLE_T handle = (VCHIQ_SERVICE_HANDLE_T)arg;
1427
1428 service = find_closed_service_for_instance(instance, handle);
1429 if (service != NULL) {
1430 struct user_service *user_service =
1431 (struct user_service *)service->base.userdata;
1432 close_delivered(user_service);
1433 } else
1434 ret = -EINVAL;
1435 } break;
1436
1437 default:
1438 ret = -ENOTTY;
1439 break;
1440 }
1441
1442 if (service)
1443 unlock_service(service);
1444
1445 if (ret == 0) {
1446 if (status == VCHIQ_ERROR)
1447 ret = -EIO;
1448 else if (status == VCHIQ_RETRY)
1449 ret = -EINTR;
1450 }
1451
1452 if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
1453 (ret != -EWOULDBLOCK))
1454 vchiq_log_info(vchiq_arm_log_level,
1455 " ioctl instance %pK, cmd %s -> status %d, %ld",
1456 instance,
1457 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1458 ioctl_names[_IOC_NR(cmd)] :
1459 "<invalid>",
1460 status, ret);
1461 else
1462 vchiq_log_trace(vchiq_arm_log_level,
1463 " ioctl instance %pK, cmd %s -> status %d, %ld",
1464 instance,
1465 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
1466 ioctl_names[_IOC_NR(cmd)] :
1467 "<invalid>",
1468 status, ret);
1469
1470 return ret;
1471 }
1472
1473 #if defined(CONFIG_COMPAT)
1474
1475 struct vchiq_service_params32 {
1476 int fourcc;
1477 compat_uptr_t callback;
1478 compat_uptr_t userdata;
1479 short version; /* Increment for non-trivial changes */
1480 short version_min; /* Update for incompatible changes */
1481 };
1482
1483 struct vchiq_create_service32 {
1484 struct vchiq_service_params32 params;
1485 int is_open;
1486 int is_vchi;
1487 unsigned int handle; /* OUT */
1488 };
1489
1490 #define VCHIQ_IOC_CREATE_SERVICE32 \
1491 _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32)
1492
1493 static long
vchiq_compat_ioctl_create_service(struct file * file,unsigned int cmd,unsigned long arg)1494 vchiq_compat_ioctl_create_service(
1495 struct file *file,
1496 unsigned int cmd,
1497 unsigned long arg)
1498 {
1499 struct vchiq_create_service __user *args;
1500 struct vchiq_create_service32 __user *ptrargs32 =
1501 (struct vchiq_create_service32 __user *)arg;
1502 struct vchiq_create_service32 args32;
1503 long ret;
1504
1505 args = compat_alloc_user_space(sizeof(*args));
1506 if (!args)
1507 return -EFAULT;
1508
1509 if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1510 return -EFAULT;
1511
1512 if (put_user(args32.params.fourcc, &args->params.fourcc) ||
1513 put_user(compat_ptr(args32.params.callback),
1514 &args->params.callback) ||
1515 put_user(compat_ptr(args32.params.userdata),
1516 &args->params.userdata) ||
1517 put_user(args32.params.version, &args->params.version) ||
1518 put_user(args32.params.version_min,
1519 &args->params.version_min) ||
1520 put_user(args32.is_open, &args->is_open) ||
1521 put_user(args32.is_vchi, &args->is_vchi) ||
1522 put_user(args32.handle, &args->handle))
1523 return -EFAULT;
1524
1525 ret = vchiq_ioctl(file, VCHIQ_IOC_CREATE_SERVICE, (unsigned long)args);
1526
1527 if (ret < 0)
1528 return ret;
1529
1530 if (get_user(args32.handle, &args->handle))
1531 return -EFAULT;
1532
1533 if (copy_to_user(&ptrargs32->handle,
1534 &args32.handle,
1535 sizeof(args32.handle)))
1536 return -EFAULT;
1537
1538 return 0;
1539 }
1540
1541 struct vchiq_element32 {
1542 compat_uptr_t data;
1543 unsigned int size;
1544 };
1545
1546 struct vchiq_queue_message32 {
1547 unsigned int handle;
1548 unsigned int count;
1549 compat_uptr_t elements;
1550 };
1551
1552 #define VCHIQ_IOC_QUEUE_MESSAGE32 \
1553 _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32)
1554
1555 static long
vchiq_compat_ioctl_queue_message(struct file * file,unsigned int cmd,unsigned long arg)1556 vchiq_compat_ioctl_queue_message(struct file *file,
1557 unsigned int cmd,
1558 unsigned long arg)
1559 {
1560 struct vchiq_queue_message __user *args;
1561 struct vchiq_element __user *elements;
1562 struct vchiq_queue_message32 args32;
1563 unsigned int count;
1564
1565 if (copy_from_user(&args32,
1566 (struct vchiq_queue_message32 __user *)arg,
1567 sizeof(args32)))
1568 return -EFAULT;
1569
1570 args = compat_alloc_user_space(sizeof(*args) +
1571 (sizeof(*elements) * MAX_ELEMENTS));
1572
1573 if (!args)
1574 return -EFAULT;
1575
1576 if (put_user(args32.handle, &args->handle) ||
1577 put_user(args32.count, &args->count) ||
1578 put_user(compat_ptr(args32.elements), &args->elements))
1579 return -EFAULT;
1580
1581 if (args32.count > MAX_ELEMENTS)
1582 return -EINVAL;
1583
1584 if (args32.elements && args32.count) {
1585 struct vchiq_element32 tempelement32[MAX_ELEMENTS];
1586
1587 elements = (struct vchiq_element __user *)(args + 1);
1588
1589 if (copy_from_user(&tempelement32,
1590 compat_ptr(args32.elements),
1591 sizeof(tempelement32)))
1592 return -EFAULT;
1593
1594 for (count = 0; count < args32.count; count++) {
1595 if (put_user(compat_ptr(tempelement32[count].data),
1596 &elements[count].data) ||
1597 put_user(tempelement32[count].size,
1598 &elements[count].size))
1599 return -EFAULT;
1600 }
1601
1602 if (put_user(elements, &args->elements))
1603 return -EFAULT;
1604 }
1605
1606 return vchiq_ioctl(file, VCHIQ_IOC_QUEUE_MESSAGE, (unsigned long)args);
1607 }
1608
1609 struct vchiq_queue_bulk_transfer32 {
1610 unsigned int handle;
1611 compat_uptr_t data;
1612 unsigned int size;
1613 compat_uptr_t userdata;
1614 VCHIQ_BULK_MODE_T mode;
1615 };
1616
1617 #define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \
1618 _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32)
1619 #define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \
1620 _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32)
1621
1622 static long
vchiq_compat_ioctl_queue_bulk(struct file * file,unsigned int cmd,unsigned long arg)1623 vchiq_compat_ioctl_queue_bulk(struct file *file,
1624 unsigned int cmd,
1625 unsigned long arg)
1626 {
1627 struct vchiq_queue_bulk_transfer __user *args;
1628 struct vchiq_queue_bulk_transfer32 args32;
1629 struct vchiq_queue_bulk_transfer32 __user *ptrargs32 =
1630 (struct vchiq_queue_bulk_transfer32 __user *)arg;
1631 long ret;
1632
1633 args = compat_alloc_user_space(sizeof(*args));
1634 if (!args)
1635 return -EFAULT;
1636
1637 if (copy_from_user(&args32, ptrargs32, sizeof(args32)))
1638 return -EFAULT;
1639
1640 if (put_user(args32.handle, &args->handle) ||
1641 put_user(compat_ptr(args32.data), &args->data) ||
1642 put_user(args32.size, &args->size) ||
1643 put_user(compat_ptr(args32.userdata), &args->userdata) ||
1644 put_user(args32.mode, &args->mode))
1645 return -EFAULT;
1646
1647 if (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32)
1648 cmd = VCHIQ_IOC_QUEUE_BULK_TRANSMIT;
1649 else
1650 cmd = VCHIQ_IOC_QUEUE_BULK_RECEIVE;
1651
1652 ret = vchiq_ioctl(file, cmd, (unsigned long)args);
1653
1654 if (ret < 0)
1655 return ret;
1656
1657 if (get_user(args32.mode, &args->mode))
1658 return -EFAULT;
1659
1660 if (copy_to_user(&ptrargs32->mode,
1661 &args32.mode,
1662 sizeof(args32.mode)))
1663 return -EFAULT;
1664
1665 return 0;
1666 }
1667
1668 struct vchiq_completion_data32 {
1669 VCHIQ_REASON_T reason;
1670 compat_uptr_t header;
1671 compat_uptr_t service_userdata;
1672 compat_uptr_t bulk_userdata;
1673 };
1674
1675 struct vchiq_await_completion32 {
1676 unsigned int count;
1677 compat_uptr_t buf;
1678 unsigned int msgbufsize;
1679 unsigned int msgbufcount; /* IN/OUT */
1680 compat_uptr_t msgbufs;
1681 };
1682
1683 #define VCHIQ_IOC_AWAIT_COMPLETION32 \
1684 _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32)
1685
1686 static long
vchiq_compat_ioctl_await_completion(struct file * file,unsigned int cmd,unsigned long arg)1687 vchiq_compat_ioctl_await_completion(struct file *file,
1688 unsigned int cmd,
1689 unsigned long arg)
1690 {
1691 struct vchiq_await_completion __user *args;
1692 struct vchiq_completion_data __user *completion;
1693 struct vchiq_completion_data completiontemp;
1694 struct vchiq_await_completion32 args32;
1695 struct vchiq_completion_data32 completion32;
1696 unsigned int __user *msgbufcount32;
1697 unsigned int msgbufcount_native;
1698 compat_uptr_t msgbuf32;
1699 void __user *msgbuf;
1700 void * __user *msgbufptr;
1701 long ret;
1702
1703 args = compat_alloc_user_space(sizeof(*args) +
1704 sizeof(*completion) +
1705 sizeof(*msgbufptr));
1706 if (!args)
1707 return -EFAULT;
1708
1709 completion = (struct vchiq_completion_data __user *)(args + 1);
1710 msgbufptr = (void * __user *)(completion + 1);
1711
1712 if (copy_from_user(&args32,
1713 (struct vchiq_completion_data32 __user *)arg,
1714 sizeof(args32)))
1715 return -EFAULT;
1716
1717 if (put_user(args32.count, &args->count) ||
1718 put_user(compat_ptr(args32.buf), &args->buf) ||
1719 put_user(args32.msgbufsize, &args->msgbufsize) ||
1720 put_user(args32.msgbufcount, &args->msgbufcount) ||
1721 put_user(compat_ptr(args32.msgbufs), &args->msgbufs))
1722 return -EFAULT;
1723
1724 /* These are simple cases, so just fall into the native handler */
1725 if (!args32.count || !args32.buf || !args32.msgbufcount)
1726 return vchiq_ioctl(file,
1727 VCHIQ_IOC_AWAIT_COMPLETION,
1728 (unsigned long)args);
1729
1730 /*
1731 * These are the more complex cases. Typical applications of this
1732 * ioctl will use a very large count, with a very large msgbufcount.
1733 * Since the native ioctl can asynchronously fill in the returned
1734 * buffers and the application can in theory begin processing messages
1735 * even before the ioctl returns, a bit of a trick is used here.
1736 *
1737 * By forcing both count and msgbufcount to be 1, it forces the native
1738 * ioctl to only claim at most 1 message is available. This tricks
1739 * the calling application into thinking only 1 message was actually
1740 * available in the queue so like all good applications it will retry
1741 * waiting until all the required messages are received.
1742 *
1743 * This trick has been tested and proven to work with vchiq_test,
1744 * Minecraft_PI, the "hello pi" examples, and various other
1745 * applications that are included in Raspbian.
1746 */
1747
1748 if (copy_from_user(&msgbuf32,
1749 compat_ptr(args32.msgbufs) +
1750 (sizeof(compat_uptr_t) *
1751 (args32.msgbufcount - 1)),
1752 sizeof(msgbuf32)))
1753 return -EFAULT;
1754
1755 msgbuf = compat_ptr(msgbuf32);
1756
1757 if (copy_to_user(msgbufptr,
1758 &msgbuf,
1759 sizeof(msgbuf)))
1760 return -EFAULT;
1761
1762 if (copy_to_user(&args->msgbufs,
1763 &msgbufptr,
1764 sizeof(msgbufptr)))
1765 return -EFAULT;
1766
1767 if (put_user(1U, &args->count) ||
1768 put_user(completion, &args->buf) ||
1769 put_user(1U, &args->msgbufcount))
1770 return -EFAULT;
1771
1772 ret = vchiq_ioctl(file,
1773 VCHIQ_IOC_AWAIT_COMPLETION,
1774 (unsigned long)args);
1775
1776 /*
1777 * An return value of 0 here means that no messages where available
1778 * in the message queue. In this case the native ioctl does not
1779 * return any data to the application at all. Not even to update
1780 * msgbufcount. This functionality needs to be kept here for
1781 * compatibility.
1782 *
1783 * Of course, < 0 means that an error occurred and no data is being
1784 * returned.
1785 *
1786 * Since count and msgbufcount was forced to 1, that means
1787 * the only other possible return value is 1. Meaning that 1 message
1788 * was available, so that multiple message case does not need to be
1789 * handled here.
1790 */
1791 if (ret <= 0)
1792 return ret;
1793
1794 if (copy_from_user(&completiontemp, completion, sizeof(*completion)))
1795 return -EFAULT;
1796
1797 completion32.reason = completiontemp.reason;
1798 completion32.header = ptr_to_compat(completiontemp.header);
1799 completion32.service_userdata =
1800 ptr_to_compat(completiontemp.service_userdata);
1801 completion32.bulk_userdata =
1802 ptr_to_compat(completiontemp.bulk_userdata);
1803
1804 if (copy_to_user(compat_ptr(args32.buf),
1805 &completion32,
1806 sizeof(completion32)))
1807 return -EFAULT;
1808
1809 if (get_user(msgbufcount_native, &args->msgbufcount))
1810 return -EFAULT;
1811
1812 if (!msgbufcount_native)
1813 args32.msgbufcount--;
1814
1815 msgbufcount32 =
1816 &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
1817
1818 if (copy_to_user(msgbufcount32,
1819 &args32.msgbufcount,
1820 sizeof(args32.msgbufcount)))
1821 return -EFAULT;
1822
1823 return 1;
1824 }
1825
1826 struct vchiq_dequeue_message32 {
1827 unsigned int handle;
1828 int blocking;
1829 unsigned int bufsize;
1830 compat_uptr_t buf;
1831 };
1832
1833 #define VCHIQ_IOC_DEQUEUE_MESSAGE32 \
1834 _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32)
1835
1836 static long
vchiq_compat_ioctl_dequeue_message(struct file * file,unsigned int cmd,unsigned long arg)1837 vchiq_compat_ioctl_dequeue_message(struct file *file,
1838 unsigned int cmd,
1839 unsigned long arg)
1840 {
1841 struct vchiq_dequeue_message __user *args;
1842 struct vchiq_dequeue_message32 args32;
1843
1844 args = compat_alloc_user_space(sizeof(*args));
1845 if (!args)
1846 return -EFAULT;
1847
1848 if (copy_from_user(&args32,
1849 (struct vchiq_dequeue_message32 __user *)arg,
1850 sizeof(args32)))
1851 return -EFAULT;
1852
1853 if (put_user(args32.handle, &args->handle) ||
1854 put_user(args32.blocking, &args->blocking) ||
1855 put_user(args32.bufsize, &args->bufsize) ||
1856 put_user(compat_ptr(args32.buf), &args->buf))
1857 return -EFAULT;
1858
1859 return vchiq_ioctl(file, VCHIQ_IOC_DEQUEUE_MESSAGE,
1860 (unsigned long)args);
1861 }
1862
1863 struct vchiq_get_config32 {
1864 unsigned int config_size;
1865 compat_uptr_t pconfig;
1866 };
1867
1868 #define VCHIQ_IOC_GET_CONFIG32 \
1869 _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32)
1870
1871 static long
vchiq_compat_ioctl_get_config(struct file * file,unsigned int cmd,unsigned long arg)1872 vchiq_compat_ioctl_get_config(struct file *file,
1873 unsigned int cmd,
1874 unsigned long arg)
1875 {
1876 struct vchiq_get_config __user *args;
1877 struct vchiq_get_config32 args32;
1878
1879 args = compat_alloc_user_space(sizeof(*args));
1880 if (!args)
1881 return -EFAULT;
1882
1883 if (copy_from_user(&args32,
1884 (struct vchiq_get_config32 __user *)arg,
1885 sizeof(args32)))
1886 return -EFAULT;
1887
1888 if (put_user(args32.config_size, &args->config_size) ||
1889 put_user(compat_ptr(args32.pconfig), &args->pconfig))
1890 return -EFAULT;
1891
1892 return vchiq_ioctl(file, VCHIQ_IOC_GET_CONFIG, (unsigned long)args);
1893 }
1894
1895 static long
vchiq_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1896 vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1897 {
1898 switch (cmd) {
1899 case VCHIQ_IOC_CREATE_SERVICE32:
1900 return vchiq_compat_ioctl_create_service(file, cmd, arg);
1901 case VCHIQ_IOC_QUEUE_MESSAGE32:
1902 return vchiq_compat_ioctl_queue_message(file, cmd, arg);
1903 case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32:
1904 case VCHIQ_IOC_QUEUE_BULK_RECEIVE32:
1905 return vchiq_compat_ioctl_queue_bulk(file, cmd, arg);
1906 case VCHIQ_IOC_AWAIT_COMPLETION32:
1907 return vchiq_compat_ioctl_await_completion(file, cmd, arg);
1908 case VCHIQ_IOC_DEQUEUE_MESSAGE32:
1909 return vchiq_compat_ioctl_dequeue_message(file, cmd, arg);
1910 case VCHIQ_IOC_GET_CONFIG32:
1911 return vchiq_compat_ioctl_get_config(file, cmd, arg);
1912 default:
1913 return vchiq_ioctl(file, cmd, arg);
1914 }
1915 }
1916
1917 #endif
1918
vchiq_open(struct inode * inode,struct file * file)1919 static int vchiq_open(struct inode *inode, struct file *file)
1920 {
1921 struct vchiq_state *state = vchiq_get_state();
1922 VCHIQ_INSTANCE_T instance;
1923
1924 vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
1925
1926 if (!state) {
1927 vchiq_log_error(vchiq_arm_log_level,
1928 "vchiq has no connection to VideoCore");
1929 return -ENOTCONN;
1930 }
1931
1932 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1933 if (!instance)
1934 return -ENOMEM;
1935
1936 instance->state = state;
1937 instance->pid = current->tgid;
1938
1939 vchiq_debugfs_add_instance(instance);
1940
1941 init_completion(&instance->insert_event);
1942 init_completion(&instance->remove_event);
1943 mutex_init(&instance->completion_mutex);
1944 mutex_init(&instance->bulk_waiter_list_mutex);
1945 INIT_LIST_HEAD(&instance->bulk_waiter_list);
1946
1947 file->private_data = instance;
1948
1949 return 0;
1950 }
1951
vchiq_release(struct inode * inode,struct file * file)1952 static int vchiq_release(struct inode *inode, struct file *file)
1953 {
1954 VCHIQ_INSTANCE_T instance = file->private_data;
1955 struct vchiq_state *state = vchiq_get_state();
1956 struct vchiq_service *service;
1957 int ret = 0;
1958 int i;
1959
1960 vchiq_log_info(vchiq_arm_log_level, "%s: instance=%lx", __func__,
1961 (unsigned long)instance);
1962
1963 if (!state) {
1964 ret = -EPERM;
1965 goto out;
1966 }
1967
1968 /* Ensure videocore is awake to allow termination. */
1969 vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ);
1970
1971 mutex_lock(&instance->completion_mutex);
1972
1973 /* Wake the completion thread and ask it to exit */
1974 instance->closing = 1;
1975 complete(&instance->insert_event);
1976
1977 mutex_unlock(&instance->completion_mutex);
1978
1979 /* Wake the slot handler if the completion queue is full. */
1980 complete(&instance->remove_event);
1981
1982 /* Mark all services for termination... */
1983 i = 0;
1984 while ((service = next_service_by_instance(state, instance, &i))) {
1985 struct user_service *user_service = service->base.userdata;
1986
1987 /* Wake the slot handler if the msg queue is full. */
1988 complete(&user_service->remove_event);
1989
1990 vchiq_terminate_service_internal(service);
1991 unlock_service(service);
1992 }
1993
1994 /* ...and wait for them to die */
1995 i = 0;
1996 while ((service = next_service_by_instance(state, instance, &i))) {
1997 struct user_service *user_service = service->base.userdata;
1998
1999 wait_for_completion(&service->remove_event);
2000
2001 BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
2002
2003 spin_lock(&msg_queue_spinlock);
2004
2005 while (user_service->msg_remove != user_service->msg_insert) {
2006 struct vchiq_header *header;
2007 int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1);
2008
2009 header = user_service->msg_queue[m];
2010 user_service->msg_remove++;
2011 spin_unlock(&msg_queue_spinlock);
2012
2013 if (header)
2014 vchiq_release_message(service->handle, header);
2015 spin_lock(&msg_queue_spinlock);
2016 }
2017
2018 spin_unlock(&msg_queue_spinlock);
2019
2020 unlock_service(service);
2021 }
2022
2023 /* Release any closed services */
2024 while (instance->completion_remove !=
2025 instance->completion_insert) {
2026 struct vchiq_completion_data *completion;
2027 struct vchiq_service *service;
2028
2029 completion = &instance->completions[
2030 instance->completion_remove & (MAX_COMPLETIONS - 1)];
2031 service = completion->service_userdata;
2032 if (completion->reason == VCHIQ_SERVICE_CLOSED) {
2033 struct user_service *user_service =
2034 service->base.userdata;
2035
2036 /* Wake any blocked user-thread */
2037 if (instance->use_close_delivered)
2038 complete(&user_service->close_event);
2039 unlock_service(service);
2040 }
2041 instance->completion_remove++;
2042 }
2043
2044 /* Release the PEER service count. */
2045 vchiq_release_internal(instance->state, NULL);
2046
2047 {
2048 struct bulk_waiter_node *waiter, *next;
2049
2050 list_for_each_entry_safe(waiter, next,
2051 &instance->bulk_waiter_list, list) {
2052 list_del(&waiter->list);
2053 vchiq_log_info(vchiq_arm_log_level,
2054 "bulk_waiter - cleaned up %pK for pid %d",
2055 waiter, waiter->pid);
2056 kfree(waiter);
2057 }
2058 }
2059
2060 vchiq_debugfs_remove_instance(instance);
2061
2062 kfree(instance);
2063 file->private_data = NULL;
2064
2065 out:
2066 return ret;
2067 }
2068
2069 /****************************************************************************
2070 *
2071 * vchiq_dump
2072 *
2073 ***************************************************************************/
2074
2075 void
vchiq_dump(void * dump_context,const char * str,int len)2076 vchiq_dump(void *dump_context, const char *str, int len)
2077 {
2078 struct dump_context *context = (struct dump_context *)dump_context;
2079
2080 if (context->actual < context->space) {
2081 int copy_bytes;
2082
2083 if (context->offset > 0) {
2084 int skip_bytes = min(len, (int)context->offset);
2085
2086 str += skip_bytes;
2087 len -= skip_bytes;
2088 context->offset -= skip_bytes;
2089 if (context->offset > 0)
2090 return;
2091 }
2092 copy_bytes = min(len, (int)(context->space - context->actual));
2093 if (copy_bytes == 0)
2094 return;
2095 if (copy_to_user(context->buf + context->actual, str,
2096 copy_bytes))
2097 context->actual = -EFAULT;
2098 context->actual += copy_bytes;
2099 len -= copy_bytes;
2100
2101 /* If tne terminating NUL is included in the length, then it
2102 ** marks the end of a line and should be replaced with a
2103 ** carriage return. */
2104 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
2105 char cr = '\n';
2106
2107 if (copy_to_user(context->buf + context->actual - 1,
2108 &cr, 1))
2109 context->actual = -EFAULT;
2110 }
2111 }
2112 }
2113
2114 /****************************************************************************
2115 *
2116 * vchiq_dump_platform_instance_state
2117 *
2118 ***************************************************************************/
2119
2120 void
vchiq_dump_platform_instances(void * dump_context)2121 vchiq_dump_platform_instances(void *dump_context)
2122 {
2123 struct vchiq_state *state = vchiq_get_state();
2124 char buf[80];
2125 int len;
2126 int i;
2127
2128 /* There is no list of instances, so instead scan all services,
2129 marking those that have been dumped. */
2130
2131 for (i = 0; i < state->unused_service; i++) {
2132 struct vchiq_service *service = state->services[i];
2133 VCHIQ_INSTANCE_T instance;
2134
2135 if (service && (service->base.callback == service_callback)) {
2136 instance = service->instance;
2137 if (instance)
2138 instance->mark = 0;
2139 }
2140 }
2141
2142 for (i = 0; i < state->unused_service; i++) {
2143 struct vchiq_service *service = state->services[i];
2144 VCHIQ_INSTANCE_T instance;
2145
2146 if (service && (service->base.callback == service_callback)) {
2147 instance = service->instance;
2148 if (instance && !instance->mark) {
2149 len = snprintf(buf, sizeof(buf),
2150 "Instance %pK: pid %d,%s completions %d/%d",
2151 instance, instance->pid,
2152 instance->connected ? " connected, " :
2153 "",
2154 instance->completion_insert -
2155 instance->completion_remove,
2156 MAX_COMPLETIONS);
2157
2158 vchiq_dump(dump_context, buf, len + 1);
2159
2160 instance->mark = 1;
2161 }
2162 }
2163 }
2164 }
2165
2166 /****************************************************************************
2167 *
2168 * vchiq_dump_platform_service_state
2169 *
2170 ***************************************************************************/
2171
2172 void
vchiq_dump_platform_service_state(void * dump_context,struct vchiq_service * service)2173 vchiq_dump_platform_service_state(void *dump_context,
2174 struct vchiq_service *service)
2175 {
2176 struct user_service *user_service =
2177 (struct user_service *)service->base.userdata;
2178 char buf[80];
2179 int len;
2180
2181 len = snprintf(buf, sizeof(buf), " instance %pK", service->instance);
2182
2183 if ((service->base.callback == service_callback) &&
2184 user_service->is_vchi) {
2185 len += snprintf(buf + len, sizeof(buf) - len,
2186 ", %d/%d messages",
2187 user_service->msg_insert - user_service->msg_remove,
2188 MSG_QUEUE_SIZE);
2189
2190 if (user_service->dequeue_pending)
2191 len += snprintf(buf + len, sizeof(buf) - len,
2192 " (dequeue pending)");
2193 }
2194
2195 vchiq_dump(dump_context, buf, len + 1);
2196 }
2197
2198 /****************************************************************************
2199 *
2200 * vchiq_read
2201 *
2202 ***************************************************************************/
2203
2204 static ssize_t
vchiq_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2205 vchiq_read(struct file *file, char __user *buf,
2206 size_t count, loff_t *ppos)
2207 {
2208 struct dump_context context;
2209
2210 context.buf = buf;
2211 context.actual = 0;
2212 context.space = count;
2213 context.offset = *ppos;
2214
2215 vchiq_dump_state(&context, &g_state);
2216
2217 *ppos += context.actual;
2218
2219 return context.actual;
2220 }
2221
2222 struct vchiq_state *
vchiq_get_state(void)2223 vchiq_get_state(void)
2224 {
2225
2226 if (g_state.remote == NULL)
2227 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
2228 else if (g_state.remote->initialised != 1)
2229 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
2230 __func__, g_state.remote->initialised);
2231
2232 return ((g_state.remote != NULL) &&
2233 (g_state.remote->initialised == 1)) ? &g_state : NULL;
2234 }
2235
2236 static const struct file_operations
2237 vchiq_fops = {
2238 .owner = THIS_MODULE,
2239 .unlocked_ioctl = vchiq_ioctl,
2240 #if defined(CONFIG_COMPAT)
2241 .compat_ioctl = vchiq_compat_ioctl,
2242 #endif
2243 .open = vchiq_open,
2244 .release = vchiq_release,
2245 .read = vchiq_read
2246 };
2247
2248 /*
2249 * Autosuspend related functionality
2250 */
2251
2252 int
vchiq_videocore_wanted(struct vchiq_state * state)2253 vchiq_videocore_wanted(struct vchiq_state *state)
2254 {
2255 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2256
2257 if (!arm_state)
2258 /* autosuspend not supported - always return wanted */
2259 return 1;
2260 else if (arm_state->blocked_count)
2261 return 1;
2262 else if (!arm_state->videocore_use_count)
2263 /* usage count zero - check for override unless we're forcing */
2264 if (arm_state->resume_blocked)
2265 return 0;
2266 else
2267 return vchiq_platform_videocore_wanted(state);
2268 else
2269 /* non-zero usage count - videocore still required */
2270 return 1;
2271 }
2272
2273 static VCHIQ_STATUS_T
vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,struct vchiq_header * header,VCHIQ_SERVICE_HANDLE_T service_user,void * bulk_user)2274 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
2275 struct vchiq_header *header,
2276 VCHIQ_SERVICE_HANDLE_T service_user,
2277 void *bulk_user)
2278 {
2279 vchiq_log_error(vchiq_susp_log_level,
2280 "%s callback reason %d", __func__, reason);
2281 return 0;
2282 }
2283
2284 static int
vchiq_keepalive_thread_func(void * v)2285 vchiq_keepalive_thread_func(void *v)
2286 {
2287 struct vchiq_state *state = (struct vchiq_state *)v;
2288 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2289
2290 VCHIQ_STATUS_T status;
2291 VCHIQ_INSTANCE_T instance;
2292 VCHIQ_SERVICE_HANDLE_T ka_handle;
2293
2294 struct vchiq_service_params params = {
2295 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
2296 .callback = vchiq_keepalive_vchiq_callback,
2297 .version = KEEPALIVE_VER,
2298 .version_min = KEEPALIVE_VER_MIN
2299 };
2300
2301 status = vchiq_initialise(&instance);
2302 if (status != VCHIQ_SUCCESS) {
2303 vchiq_log_error(vchiq_susp_log_level,
2304 "%s vchiq_initialise failed %d", __func__, status);
2305 goto exit;
2306 }
2307
2308 status = vchiq_connect(instance);
2309 if (status != VCHIQ_SUCCESS) {
2310 vchiq_log_error(vchiq_susp_log_level,
2311 "%s vchiq_connect failed %d", __func__, status);
2312 goto shutdown;
2313 }
2314
2315 status = vchiq_add_service(instance, ¶ms, &ka_handle);
2316 if (status != VCHIQ_SUCCESS) {
2317 vchiq_log_error(vchiq_susp_log_level,
2318 "%s vchiq_open_service failed %d", __func__, status);
2319 goto shutdown;
2320 }
2321
2322 while (1) {
2323 long rc = 0, uc = 0;
2324
2325 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
2326 vchiq_log_error(vchiq_susp_log_level,
2327 "%s interrupted", __func__);
2328 flush_signals(current);
2329 continue;
2330 }
2331
2332 /* read and clear counters. Do release_count then use_count to
2333 * prevent getting more releases than uses */
2334 rc = atomic_xchg(&arm_state->ka_release_count, 0);
2335 uc = atomic_xchg(&arm_state->ka_use_count, 0);
2336
2337 /* Call use/release service the requisite number of times.
2338 * Process use before release so use counts don't go negative */
2339 while (uc--) {
2340 atomic_inc(&arm_state->ka_use_ack_count);
2341 status = vchiq_use_service(ka_handle);
2342 if (status != VCHIQ_SUCCESS) {
2343 vchiq_log_error(vchiq_susp_log_level,
2344 "%s vchiq_use_service error %d",
2345 __func__, status);
2346 }
2347 }
2348 while (rc--) {
2349 status = vchiq_release_service(ka_handle);
2350 if (status != VCHIQ_SUCCESS) {
2351 vchiq_log_error(vchiq_susp_log_level,
2352 "%s vchiq_release_service error %d",
2353 __func__, status);
2354 }
2355 }
2356 }
2357
2358 shutdown:
2359 vchiq_shutdown(instance);
2360 exit:
2361 return 0;
2362 }
2363
2364 VCHIQ_STATUS_T
vchiq_arm_init_state(struct vchiq_state * state,struct vchiq_arm_state * arm_state)2365 vchiq_arm_init_state(struct vchiq_state *state,
2366 struct vchiq_arm_state *arm_state)
2367 {
2368 if (arm_state) {
2369 rwlock_init(&arm_state->susp_res_lock);
2370
2371 init_completion(&arm_state->ka_evt);
2372 atomic_set(&arm_state->ka_use_count, 0);
2373 atomic_set(&arm_state->ka_use_ack_count, 0);
2374 atomic_set(&arm_state->ka_release_count, 0);
2375
2376 init_completion(&arm_state->vc_suspend_complete);
2377
2378 init_completion(&arm_state->vc_resume_complete);
2379 /* Initialise to 'done' state. We only want to block on resume
2380 * completion while videocore is suspended. */
2381 set_resume_state(arm_state, VC_RESUME_RESUMED);
2382
2383 init_completion(&arm_state->resume_blocker);
2384 /* Initialise to 'done' state. We only want to block on this
2385 * completion while resume is blocked */
2386 complete_all(&arm_state->resume_blocker);
2387
2388 init_completion(&arm_state->blocked_blocker);
2389 /* Initialise to 'done' state. We only want to block on this
2390 * completion while things are waiting on the resume blocker */
2391 complete_all(&arm_state->blocked_blocker);
2392
2393 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
2394 arm_state->suspend_timer_running = 0;
2395 arm_state->state = state;
2396 timer_setup(&arm_state->suspend_timer, suspend_timer_callback,
2397 0);
2398
2399 arm_state->first_connect = 0;
2400
2401 }
2402 return VCHIQ_SUCCESS;
2403 }
2404
2405 /*
2406 ** Functions to modify the state variables;
2407 ** set_suspend_state
2408 ** set_resume_state
2409 **
2410 ** There are more state variables than we might like, so ensure they remain in
2411 ** step. Suspend and resume state are maintained separately, since most of
2412 ** these state machines can operate independently. However, there are a few
2413 ** states where state transitions in one state machine cause a reset to the
2414 ** other state machine. In addition, there are some completion events which
2415 ** need to occur on state machine reset and end-state(s), so these are also
2416 ** dealt with in these functions.
2417 **
2418 ** In all states we set the state variable according to the input, but in some
2419 ** cases we perform additional steps outlined below;
2420 **
2421 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
2422 ** The suspend completion is completed after any suspend
2423 ** attempt. When we reset the state machine we also reset
2424 ** the completion. This reset occurs when videocore is
2425 ** resumed, and also if we initiate suspend after a suspend
2426 ** failure.
2427 **
2428 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
2429 ** suspend - ie from this point on we must try to suspend
2430 ** before resuming can occur. We therefore also reset the
2431 ** resume state machine to VC_RESUME_IDLE in this state.
2432 **
2433 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
2434 ** complete_all on the suspend completion to notify
2435 ** anything waiting for suspend to happen.
2436 **
2437 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
2438 ** initiate resume, so no need to alter resume state.
2439 ** We call complete_all on the suspend completion to notify
2440 ** of suspend rejection.
2441 **
2442 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend. We notify the
2443 ** suspend completion and reset the resume state machine.
2444 **
2445 ** VC_RESUME_IDLE - Initialise the resume completion at the same time. The
2446 ** resume completion is in it's 'done' state whenever
2447 ** videcore is running. Therefore, the VC_RESUME_IDLE
2448 ** state implies that videocore is suspended.
2449 ** Hence, any thread which needs to wait until videocore is
2450 ** running can wait on this completion - it will only block
2451 ** if videocore is suspended.
2452 **
2453 ** VC_RESUME_RESUMED - Resume has completed successfully. Videocore is running.
2454 ** Call complete_all on the resume completion to unblock
2455 ** any threads waiting for resume. Also reset the suspend
2456 ** state machine to it's idle state.
2457 **
2458 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
2459 */
2460
2461 void
set_suspend_state(struct vchiq_arm_state * arm_state,enum vc_suspend_status new_state)2462 set_suspend_state(struct vchiq_arm_state *arm_state,
2463 enum vc_suspend_status new_state)
2464 {
2465 /* set the state in all cases */
2466 arm_state->vc_suspend_state = new_state;
2467
2468 /* state specific additional actions */
2469 switch (new_state) {
2470 case VC_SUSPEND_FORCE_CANCELED:
2471 complete_all(&arm_state->vc_suspend_complete);
2472 break;
2473 case VC_SUSPEND_REJECTED:
2474 complete_all(&arm_state->vc_suspend_complete);
2475 break;
2476 case VC_SUSPEND_FAILED:
2477 complete_all(&arm_state->vc_suspend_complete);
2478 arm_state->vc_resume_state = VC_RESUME_RESUMED;
2479 complete_all(&arm_state->vc_resume_complete);
2480 break;
2481 case VC_SUSPEND_IDLE:
2482 reinit_completion(&arm_state->vc_suspend_complete);
2483 break;
2484 case VC_SUSPEND_REQUESTED:
2485 break;
2486 case VC_SUSPEND_IN_PROGRESS:
2487 set_resume_state(arm_state, VC_RESUME_IDLE);
2488 break;
2489 case VC_SUSPEND_SUSPENDED:
2490 complete_all(&arm_state->vc_suspend_complete);
2491 break;
2492 default:
2493 BUG();
2494 break;
2495 }
2496 }
2497
2498 void
set_resume_state(struct vchiq_arm_state * arm_state,enum vc_resume_status new_state)2499 set_resume_state(struct vchiq_arm_state *arm_state,
2500 enum vc_resume_status new_state)
2501 {
2502 /* set the state in all cases */
2503 arm_state->vc_resume_state = new_state;
2504
2505 /* state specific additional actions */
2506 switch (new_state) {
2507 case VC_RESUME_FAILED:
2508 break;
2509 case VC_RESUME_IDLE:
2510 reinit_completion(&arm_state->vc_resume_complete);
2511 break;
2512 case VC_RESUME_REQUESTED:
2513 break;
2514 case VC_RESUME_IN_PROGRESS:
2515 break;
2516 case VC_RESUME_RESUMED:
2517 complete_all(&arm_state->vc_resume_complete);
2518 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2519 break;
2520 default:
2521 BUG();
2522 break;
2523 }
2524 }
2525
2526 /* should be called with the write lock held */
2527 inline void
start_suspend_timer(struct vchiq_arm_state * arm_state)2528 start_suspend_timer(struct vchiq_arm_state *arm_state)
2529 {
2530 del_timer(&arm_state->suspend_timer);
2531 arm_state->suspend_timer.expires = jiffies +
2532 msecs_to_jiffies(arm_state->suspend_timer_timeout);
2533 add_timer(&arm_state->suspend_timer);
2534 arm_state->suspend_timer_running = 1;
2535 }
2536
2537 /* should be called with the write lock held */
2538 static inline void
stop_suspend_timer(struct vchiq_arm_state * arm_state)2539 stop_suspend_timer(struct vchiq_arm_state *arm_state)
2540 {
2541 if (arm_state->suspend_timer_running) {
2542 del_timer(&arm_state->suspend_timer);
2543 arm_state->suspend_timer_running = 0;
2544 }
2545 }
2546
2547 static inline int
need_resume(struct vchiq_state * state)2548 need_resume(struct vchiq_state *state)
2549 {
2550 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2551
2552 return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
2553 (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
2554 vchiq_videocore_wanted(state);
2555 }
2556
2557 static inline void
unblock_resume(struct vchiq_arm_state * arm_state)2558 unblock_resume(struct vchiq_arm_state *arm_state)
2559 {
2560 complete_all(&arm_state->resume_blocker);
2561 arm_state->resume_blocked = 0;
2562 }
2563
2564 /* Initiate suspend via slot handler. Should be called with the write lock
2565 * held */
2566 VCHIQ_STATUS_T
vchiq_arm_vcsuspend(struct vchiq_state * state)2567 vchiq_arm_vcsuspend(struct vchiq_state *state)
2568 {
2569 VCHIQ_STATUS_T status = VCHIQ_ERROR;
2570 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2571
2572 if (!arm_state)
2573 goto out;
2574
2575 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2576 status = VCHIQ_SUCCESS;
2577
2578 switch (arm_state->vc_suspend_state) {
2579 case VC_SUSPEND_REQUESTED:
2580 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
2581 "requested", __func__);
2582 break;
2583 case VC_SUSPEND_IN_PROGRESS:
2584 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
2585 "progress", __func__);
2586 break;
2587
2588 default:
2589 /* We don't expect to be in other states, so log but continue
2590 * anyway */
2591 vchiq_log_error(vchiq_susp_log_level,
2592 "%s unexpected suspend state %s", __func__,
2593 suspend_state_names[arm_state->vc_suspend_state +
2594 VC_SUSPEND_NUM_OFFSET]);
2595 /* fall through */
2596 case VC_SUSPEND_REJECTED:
2597 case VC_SUSPEND_FAILED:
2598 /* Ensure any idle state actions have been run */
2599 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2600 /* fall through */
2601 case VC_SUSPEND_IDLE:
2602 vchiq_log_info(vchiq_susp_log_level,
2603 "%s: suspending", __func__);
2604 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
2605 /* kick the slot handler thread to initiate suspend */
2606 request_poll(state, NULL, 0);
2607 break;
2608 }
2609
2610 out:
2611 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
2612 return status;
2613 }
2614
2615 void
vchiq_platform_check_suspend(struct vchiq_state * state)2616 vchiq_platform_check_suspend(struct vchiq_state *state)
2617 {
2618 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2619 int susp = 0;
2620
2621 if (!arm_state)
2622 goto out;
2623
2624 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2625
2626 write_lock_bh(&arm_state->susp_res_lock);
2627 if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
2628 arm_state->vc_resume_state == VC_RESUME_RESUMED) {
2629 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
2630 susp = 1;
2631 }
2632 write_unlock_bh(&arm_state->susp_res_lock);
2633
2634 if (susp)
2635 vchiq_platform_suspend(state);
2636
2637 out:
2638 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2639 return;
2640 }
2641
2642 void
vchiq_check_suspend(struct vchiq_state * state)2643 vchiq_check_suspend(struct vchiq_state *state)
2644 {
2645 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2646
2647 if (!arm_state)
2648 goto out;
2649
2650 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2651
2652 write_lock_bh(&arm_state->susp_res_lock);
2653 if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
2654 arm_state->first_connect &&
2655 !vchiq_videocore_wanted(state)) {
2656 vchiq_arm_vcsuspend(state);
2657 }
2658 write_unlock_bh(&arm_state->susp_res_lock);
2659
2660 out:
2661 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2662 }
2663
2664 /* This function should be called with the write lock held */
2665 int
vchiq_check_resume(struct vchiq_state * state)2666 vchiq_check_resume(struct vchiq_state *state)
2667 {
2668 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2669 int resume = 0;
2670
2671 if (!arm_state)
2672 goto out;
2673
2674 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2675
2676 if (need_resume(state)) {
2677 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2678 request_poll(state, NULL, 0);
2679 resume = 1;
2680 }
2681
2682 out:
2683 vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
2684 return resume;
2685 }
2686
2687 VCHIQ_STATUS_T
vchiq_use_internal(struct vchiq_state * state,struct vchiq_service * service,enum USE_TYPE_E use_type)2688 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
2689 enum USE_TYPE_E use_type)
2690 {
2691 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2692 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2693 char entity[16];
2694 int *entity_uc;
2695 int local_uc, local_entity_uc;
2696
2697 if (!arm_state)
2698 goto out;
2699
2700 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2701
2702 if (use_type == USE_TYPE_VCHIQ) {
2703 sprintf(entity, "VCHIQ: ");
2704 entity_uc = &arm_state->peer_use_count;
2705 } else if (service) {
2706 sprintf(entity, "%c%c%c%c:%03d",
2707 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2708 service->client_id);
2709 entity_uc = &service->service_use_count;
2710 } else {
2711 vchiq_log_error(vchiq_susp_log_level, "%s null service "
2712 "ptr", __func__);
2713 ret = VCHIQ_ERROR;
2714 goto out;
2715 }
2716
2717 write_lock_bh(&arm_state->susp_res_lock);
2718 while (arm_state->resume_blocked) {
2719 /* If we call 'use' while force suspend is waiting for suspend,
2720 * then we're about to block the thread which the force is
2721 * waiting to complete, so we're bound to just time out. In this
2722 * case, set the suspend state such that the wait will be
2723 * canceled, so we can complete as quickly as possible. */
2724 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
2725 VC_SUSPEND_IDLE) {
2726 set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
2727 break;
2728 }
2729 /* If suspend is already in progress then we need to block */
2730 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
2731 /* Indicate that there are threads waiting on the resume
2732 * blocker. These need to be allowed to complete before
2733 * a _second_ call to force suspend can complete,
2734 * otherwise low priority threads might never actually
2735 * continue */
2736 arm_state->blocked_count++;
2737 write_unlock_bh(&arm_state->susp_res_lock);
2738 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2739 "blocked - waiting...", __func__, entity);
2740 if (wait_for_completion_killable(
2741 &arm_state->resume_blocker)) {
2742 vchiq_log_error(vchiq_susp_log_level, "%s %s "
2743 "wait for resume blocker interrupted",
2744 __func__, entity);
2745 ret = VCHIQ_ERROR;
2746 write_lock_bh(&arm_state->susp_res_lock);
2747 arm_state->blocked_count--;
2748 write_unlock_bh(&arm_state->susp_res_lock);
2749 goto out;
2750 }
2751 vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
2752 "unblocked", __func__, entity);
2753 write_lock_bh(&arm_state->susp_res_lock);
2754 if (--arm_state->blocked_count == 0)
2755 complete_all(&arm_state->blocked_blocker);
2756 }
2757 }
2758
2759 stop_suspend_timer(arm_state);
2760
2761 local_uc = ++arm_state->videocore_use_count;
2762 local_entity_uc = ++(*entity_uc);
2763
2764 /* If there's a pending request which hasn't yet been serviced then
2765 * just clear it. If we're past VC_SUSPEND_REQUESTED state then
2766 * vc_resume_complete will block until we either resume or fail to
2767 * suspend */
2768 if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
2769 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
2770
2771 if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
2772 set_resume_state(arm_state, VC_RESUME_REQUESTED);
2773 vchiq_log_info(vchiq_susp_log_level,
2774 "%s %s count %d, state count %d",
2775 __func__, entity, local_entity_uc, local_uc);
2776 request_poll(state, NULL, 0);
2777 } else
2778 vchiq_log_trace(vchiq_susp_log_level,
2779 "%s %s count %d, state count %d",
2780 __func__, entity, *entity_uc, local_uc);
2781
2782 write_unlock_bh(&arm_state->susp_res_lock);
2783
2784 /* Completion is in a done state when we're not suspended, so this won't
2785 * block for the non-suspended case. */
2786 if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
2787 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
2788 __func__, entity);
2789 if (wait_for_completion_killable(
2790 &arm_state->vc_resume_complete)) {
2791 vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
2792 "resume interrupted", __func__, entity);
2793 ret = VCHIQ_ERROR;
2794 goto out;
2795 }
2796 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
2797 entity);
2798 }
2799
2800 if (ret == VCHIQ_SUCCESS) {
2801 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
2802 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
2803
2804 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
2805 /* Send the use notify to videocore */
2806 status = vchiq_send_remote_use_active(state);
2807 if (status == VCHIQ_SUCCESS)
2808 ack_cnt--;
2809 else
2810 atomic_add(ack_cnt,
2811 &arm_state->ka_use_ack_count);
2812 }
2813 }
2814
2815 out:
2816 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2817 return ret;
2818 }
2819
2820 VCHIQ_STATUS_T
vchiq_release_internal(struct vchiq_state * state,struct vchiq_service * service)2821 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
2822 {
2823 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2824 VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
2825 char entity[16];
2826 int *entity_uc;
2827
2828 if (!arm_state)
2829 goto out;
2830
2831 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2832
2833 if (service) {
2834 sprintf(entity, "%c%c%c%c:%03d",
2835 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
2836 service->client_id);
2837 entity_uc = &service->service_use_count;
2838 } else {
2839 sprintf(entity, "PEER: ");
2840 entity_uc = &arm_state->peer_use_count;
2841 }
2842
2843 write_lock_bh(&arm_state->susp_res_lock);
2844 if (!arm_state->videocore_use_count || !(*entity_uc)) {
2845 /* Don't use BUG_ON - don't allow user thread to crash kernel */
2846 WARN_ON(!arm_state->videocore_use_count);
2847 WARN_ON(!(*entity_uc));
2848 ret = VCHIQ_ERROR;
2849 goto unlock;
2850 }
2851 --arm_state->videocore_use_count;
2852 --(*entity_uc);
2853
2854 if (!vchiq_videocore_wanted(state)) {
2855 if (vchiq_platform_use_suspend_timer() &&
2856 !arm_state->resume_blocked) {
2857 /* Only use the timer if we're not trying to force
2858 * suspend (=> resume_blocked) */
2859 start_suspend_timer(arm_state);
2860 } else {
2861 vchiq_log_info(vchiq_susp_log_level,
2862 "%s %s count %d, state count %d - suspending",
2863 __func__, entity, *entity_uc,
2864 arm_state->videocore_use_count);
2865 vchiq_arm_vcsuspend(state);
2866 }
2867 } else
2868 vchiq_log_trace(vchiq_susp_log_level,
2869 "%s %s count %d, state count %d",
2870 __func__, entity, *entity_uc,
2871 arm_state->videocore_use_count);
2872
2873 unlock:
2874 write_unlock_bh(&arm_state->susp_res_lock);
2875
2876 out:
2877 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
2878 return ret;
2879 }
2880
2881 void
vchiq_on_remote_use(struct vchiq_state * state)2882 vchiq_on_remote_use(struct vchiq_state *state)
2883 {
2884 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2885
2886 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2887 atomic_inc(&arm_state->ka_use_count);
2888 complete(&arm_state->ka_evt);
2889 }
2890
2891 void
vchiq_on_remote_release(struct vchiq_state * state)2892 vchiq_on_remote_release(struct vchiq_state *state)
2893 {
2894 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
2895
2896 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
2897 atomic_inc(&arm_state->ka_release_count);
2898 complete(&arm_state->ka_evt);
2899 }
2900
2901 VCHIQ_STATUS_T
vchiq_use_service_internal(struct vchiq_service * service)2902 vchiq_use_service_internal(struct vchiq_service *service)
2903 {
2904 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
2905 }
2906
2907 VCHIQ_STATUS_T
vchiq_release_service_internal(struct vchiq_service * service)2908 vchiq_release_service_internal(struct vchiq_service *service)
2909 {
2910 return vchiq_release_internal(service->state, service);
2911 }
2912
2913 struct vchiq_debugfs_node *
vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)2914 vchiq_instance_get_debugfs_node(VCHIQ_INSTANCE_T instance)
2915 {
2916 return &instance->debugfs_node;
2917 }
2918
2919 int
vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)2920 vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
2921 {
2922 struct vchiq_service *service;
2923 int use_count = 0, i;
2924
2925 i = 0;
2926 while ((service = next_service_by_instance(instance->state,
2927 instance, &i)) != NULL) {
2928 use_count += service->service_use_count;
2929 unlock_service(service);
2930 }
2931 return use_count;
2932 }
2933
2934 int
vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)2935 vchiq_instance_get_pid(VCHIQ_INSTANCE_T instance)
2936 {
2937 return instance->pid;
2938 }
2939
2940 int
vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)2941 vchiq_instance_get_trace(VCHIQ_INSTANCE_T instance)
2942 {
2943 return instance->trace;
2944 }
2945
2946 void
vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance,int trace)2947 vchiq_instance_set_trace(VCHIQ_INSTANCE_T instance, int trace)
2948 {
2949 struct vchiq_service *service;
2950 int i;
2951
2952 i = 0;
2953 while ((service = next_service_by_instance(instance->state,
2954 instance, &i)) != NULL) {
2955 service->trace = trace;
2956 unlock_service(service);
2957 }
2958 instance->trace = (trace != 0);
2959 }
2960
suspend_timer_callback(struct timer_list * t)2961 static void suspend_timer_callback(struct timer_list *t)
2962 {
2963 struct vchiq_arm_state *arm_state =
2964 from_timer(arm_state, t, suspend_timer);
2965 struct vchiq_state *state = arm_state->state;
2966
2967 vchiq_log_info(vchiq_susp_log_level,
2968 "%s - suspend timer expired - check suspend", __func__);
2969 vchiq_check_suspend(state);
2970 }
2971
2972 VCHIQ_STATUS_T
vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)2973 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
2974 {
2975 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2976 struct vchiq_service *service = find_service_by_handle(handle);
2977
2978 if (service) {
2979 ret = vchiq_use_internal(service->state, service,
2980 USE_TYPE_SERVICE);
2981 unlock_service(service);
2982 }
2983 return ret;
2984 }
2985
2986 VCHIQ_STATUS_T
vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)2987 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
2988 {
2989 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
2990 struct vchiq_service *service = find_service_by_handle(handle);
2991
2992 if (service) {
2993 ret = vchiq_release_internal(service->state, service);
2994 unlock_service(service);
2995 }
2996 return ret;
2997 }
2998
2999 struct service_data_struct {
3000 int fourcc;
3001 int clientid;
3002 int use_count;
3003 };
3004
3005 void
vchiq_dump_service_use_state(struct vchiq_state * state)3006 vchiq_dump_service_use_state(struct vchiq_state *state)
3007 {
3008 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3009 struct service_data_struct *service_data;
3010 int i, found = 0;
3011 /* If there's more than 64 services, only dump ones with
3012 * non-zero counts */
3013 int only_nonzero = 0;
3014 static const char *nz = "<-- preventing suspend";
3015
3016 enum vc_suspend_status vc_suspend_state;
3017 enum vc_resume_status vc_resume_state;
3018 int peer_count;
3019 int vc_use_count;
3020 int active_services;
3021
3022 if (!arm_state)
3023 return;
3024
3025 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
3026 GFP_KERNEL);
3027 if (!service_data)
3028 return;
3029
3030 read_lock_bh(&arm_state->susp_res_lock);
3031 vc_suspend_state = arm_state->vc_suspend_state;
3032 vc_resume_state = arm_state->vc_resume_state;
3033 peer_count = arm_state->peer_use_count;
3034 vc_use_count = arm_state->videocore_use_count;
3035 active_services = state->unused_service;
3036 if (active_services > MAX_SERVICES)
3037 only_nonzero = 1;
3038
3039 for (i = 0; i < active_services; i++) {
3040 struct vchiq_service *service_ptr = state->services[i];
3041
3042 if (!service_ptr)
3043 continue;
3044
3045 if (only_nonzero && !service_ptr->service_use_count)
3046 continue;
3047
3048 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
3049 continue;
3050
3051 service_data[found].fourcc = service_ptr->base.fourcc;
3052 service_data[found].clientid = service_ptr->client_id;
3053 service_data[found].use_count = service_ptr->service_use_count;
3054 found++;
3055 if (found >= MAX_SERVICES)
3056 break;
3057 }
3058
3059 read_unlock_bh(&arm_state->susp_res_lock);
3060
3061 vchiq_log_warning(vchiq_susp_log_level,
3062 "-- Videcore suspend state: %s --",
3063 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
3064 vchiq_log_warning(vchiq_susp_log_level,
3065 "-- Videcore resume state: %s --",
3066 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
3067
3068 if (only_nonzero)
3069 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
3070 "services (%d). Only dumping up to first %d services "
3071 "with non-zero use-count", active_services, found);
3072
3073 for (i = 0; i < found; i++) {
3074 vchiq_log_warning(vchiq_susp_log_level,
3075 "----- %c%c%c%c:%d service count %d %s",
3076 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
3077 service_data[i].clientid,
3078 service_data[i].use_count,
3079 service_data[i].use_count ? nz : "");
3080 }
3081 vchiq_log_warning(vchiq_susp_log_level,
3082 "----- VCHIQ use count count %d", peer_count);
3083 vchiq_log_warning(vchiq_susp_log_level,
3084 "--- Overall vchiq instance use count %d", vc_use_count);
3085
3086 kfree(service_data);
3087
3088 vchiq_dump_platform_use_state(state);
3089 }
3090
3091 VCHIQ_STATUS_T
vchiq_check_service(struct vchiq_service * service)3092 vchiq_check_service(struct vchiq_service *service)
3093 {
3094 struct vchiq_arm_state *arm_state;
3095 VCHIQ_STATUS_T ret = VCHIQ_ERROR;
3096
3097 if (!service || !service->state)
3098 goto out;
3099
3100 vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
3101
3102 arm_state = vchiq_platform_get_arm_state(service->state);
3103
3104 read_lock_bh(&arm_state->susp_res_lock);
3105 if (service->service_use_count)
3106 ret = VCHIQ_SUCCESS;
3107 read_unlock_bh(&arm_state->susp_res_lock);
3108
3109 if (ret == VCHIQ_ERROR) {
3110 vchiq_log_error(vchiq_susp_log_level,
3111 "%s ERROR - %c%c%c%c:%d service count %d, "
3112 "state count %d, videocore suspend state %s", __func__,
3113 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
3114 service->client_id, service->service_use_count,
3115 arm_state->videocore_use_count,
3116 suspend_state_names[arm_state->vc_suspend_state +
3117 VC_SUSPEND_NUM_OFFSET]);
3118 vchiq_dump_service_use_state(service->state);
3119 }
3120 out:
3121 return ret;
3122 }
3123
3124 /* stub functions */
vchiq_on_remote_use_active(struct vchiq_state * state)3125 void vchiq_on_remote_use_active(struct vchiq_state *state)
3126 {
3127 (void)state;
3128 }
3129
vchiq_platform_conn_state_changed(struct vchiq_state * state,VCHIQ_CONNSTATE_T oldstate,VCHIQ_CONNSTATE_T newstate)3130 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
3131 VCHIQ_CONNSTATE_T oldstate,
3132 VCHIQ_CONNSTATE_T newstate)
3133 {
3134 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
3135
3136 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
3137 get_conn_state_name(oldstate), get_conn_state_name(newstate));
3138 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
3139 write_lock_bh(&arm_state->susp_res_lock);
3140 if (!arm_state->first_connect) {
3141 char threadname[16];
3142
3143 arm_state->first_connect = 1;
3144 write_unlock_bh(&arm_state->susp_res_lock);
3145 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
3146 state->id);
3147 arm_state->ka_thread = kthread_create(
3148 &vchiq_keepalive_thread_func,
3149 (void *)state,
3150 threadname);
3151 if (IS_ERR(arm_state->ka_thread)) {
3152 vchiq_log_error(vchiq_susp_log_level,
3153 "vchiq: FATAL: couldn't create thread %s",
3154 threadname);
3155 } else {
3156 wake_up_process(arm_state->ka_thread);
3157 }
3158 } else
3159 write_unlock_bh(&arm_state->susp_res_lock);
3160 }
3161 }
3162
3163 static const struct of_device_id vchiq_of_match[] = {
3164 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
3165 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
3166 {},
3167 };
3168 MODULE_DEVICE_TABLE(of, vchiq_of_match);
3169
3170 static struct platform_device *
vchiq_register_child(struct platform_device * pdev,const char * name)3171 vchiq_register_child(struct platform_device *pdev, const char *name)
3172 {
3173 struct platform_device_info pdevinfo;
3174 struct platform_device *child;
3175
3176 memset(&pdevinfo, 0, sizeof(pdevinfo));
3177
3178 pdevinfo.parent = &pdev->dev;
3179 pdevinfo.name = name;
3180 pdevinfo.id = PLATFORM_DEVID_NONE;
3181 pdevinfo.dma_mask = DMA_BIT_MASK(32);
3182
3183 child = platform_device_register_full(&pdevinfo);
3184 if (IS_ERR(child)) {
3185 dev_warn(&pdev->dev, "%s not registered\n", name);
3186 child = NULL;
3187 }
3188
3189 return child;
3190 }
3191
vchiq_probe(struct platform_device * pdev)3192 static int vchiq_probe(struct platform_device *pdev)
3193 {
3194 struct device_node *fw_node;
3195 const struct of_device_id *of_id;
3196 struct vchiq_drvdata *drvdata;
3197 struct device *vchiq_dev;
3198 int err;
3199
3200 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
3201 drvdata = (struct vchiq_drvdata *)of_id->data;
3202 if (!drvdata)
3203 return -EINVAL;
3204
3205 fw_node = of_find_compatible_node(NULL, NULL,
3206 "raspberrypi,bcm2835-firmware");
3207 if (!fw_node) {
3208 dev_err(&pdev->dev, "Missing firmware node\n");
3209 return -ENOENT;
3210 }
3211
3212 drvdata->fw = rpi_firmware_get(fw_node);
3213 of_node_put(fw_node);
3214 if (!drvdata->fw)
3215 return -EPROBE_DEFER;
3216
3217 platform_set_drvdata(pdev, drvdata);
3218
3219 err = vchiq_platform_init(pdev, &g_state);
3220 if (err)
3221 goto failed_platform_init;
3222
3223 cdev_init(&vchiq_cdev, &vchiq_fops);
3224 vchiq_cdev.owner = THIS_MODULE;
3225 err = cdev_add(&vchiq_cdev, vchiq_devid, 1);
3226 if (err) {
3227 vchiq_log_error(vchiq_arm_log_level,
3228 "Unable to register device");
3229 goto failed_platform_init;
3230 }
3231
3232 vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
3233 "vchiq");
3234 if (IS_ERR(vchiq_dev)) {
3235 err = PTR_ERR(vchiq_dev);
3236 goto failed_device_create;
3237 }
3238
3239 vchiq_debugfs_init();
3240
3241 vchiq_log_info(vchiq_arm_log_level,
3242 "vchiq: initialised - version %d (min %d), device %d.%d",
3243 VCHIQ_VERSION, VCHIQ_VERSION_MIN,
3244 MAJOR(vchiq_devid), MINOR(vchiq_devid));
3245
3246 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
3247 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
3248
3249 return 0;
3250
3251 failed_device_create:
3252 cdev_del(&vchiq_cdev);
3253 failed_platform_init:
3254 vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
3255 return err;
3256 }
3257
vchiq_remove(struct platform_device * pdev)3258 static int vchiq_remove(struct platform_device *pdev)
3259 {
3260 platform_device_unregister(bcm2835_camera);
3261 vchiq_debugfs_deinit();
3262 device_destroy(vchiq_class, vchiq_devid);
3263 cdev_del(&vchiq_cdev);
3264
3265 return 0;
3266 }
3267
3268 static struct platform_driver vchiq_driver = {
3269 .driver = {
3270 .name = "bcm2835_vchiq",
3271 .of_match_table = vchiq_of_match,
3272 },
3273 .probe = vchiq_probe,
3274 .remove = vchiq_remove,
3275 };
3276
vchiq_driver_init(void)3277 static int __init vchiq_driver_init(void)
3278 {
3279 int ret;
3280
3281 vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
3282 if (IS_ERR(vchiq_class)) {
3283 pr_err("Failed to create vchiq class\n");
3284 return PTR_ERR(vchiq_class);
3285 }
3286
3287 ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
3288 if (ret) {
3289 pr_err("Failed to allocate vchiq's chrdev region\n");
3290 goto class_destroy;
3291 }
3292
3293 ret = platform_driver_register(&vchiq_driver);
3294 if (ret) {
3295 pr_err("Failed to register vchiq driver\n");
3296 goto region_unregister;
3297 }
3298
3299 return 0;
3300
3301 region_unregister:
3302 platform_driver_unregister(&vchiq_driver);
3303
3304 class_destroy:
3305 class_destroy(vchiq_class);
3306
3307 return ret;
3308 }
3309 module_init(vchiq_driver_init);
3310
vchiq_driver_exit(void)3311 static void __exit vchiq_driver_exit(void)
3312 {
3313 platform_driver_unregister(&vchiq_driver);
3314 unregister_chrdev_region(vchiq_devid, 1);
3315 class_destroy(vchiq_class);
3316 }
3317 module_exit(vchiq_driver_exit);
3318
3319 MODULE_LICENSE("Dual BSD/GPL");
3320 MODULE_DESCRIPTION("Videocore VCHIQ driver");
3321 MODULE_AUTHOR("Broadcom Corporation");
3322