1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Broadcom BM2835 V4L2 driver
4 *
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6 *
7 * Authors: Vincent Sanders <vincent.sanders@collabora.co.uk>
8 * Dave Stevenson <dsteve@broadcom.com>
9 * Simon Mellor <simellor@broadcom.com>
10 * Luke Diamand <luked@broadcom.com>
11 *
12 * V4L2 driver MMAL vchiq interface code
13 */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/mutex.h>
20 #include <linux/mm.h>
21 #include <linux/slab.h>
22 #include <linux/completion.h>
23 #include <linux/vmalloc.h>
24 #include <asm/cacheflush.h>
25 #include <media/videobuf2-vmalloc.h>
26
27 #include "mmal-common.h"
28 #include "mmal-vchiq.h"
29 #include "mmal-msg.h"
30
31 #define USE_VCHIQ_ARM
32 #include "interface/vchi/vchi.h"
33
34 /* maximum number of components supported */
35 #define VCHIQ_MMAL_MAX_COMPONENTS 4
36
37 /*#define FULL_MSG_DUMP 1*/
38
39 #ifdef DEBUG
40 static const char *const msg_type_names[] = {
41 "UNKNOWN",
42 "QUIT",
43 "SERVICE_CLOSED",
44 "GET_VERSION",
45 "COMPONENT_CREATE",
46 "COMPONENT_DESTROY",
47 "COMPONENT_ENABLE",
48 "COMPONENT_DISABLE",
49 "PORT_INFO_GET",
50 "PORT_INFO_SET",
51 "PORT_ACTION",
52 "BUFFER_FROM_HOST",
53 "BUFFER_TO_HOST",
54 "GET_STATS",
55 "PORT_PARAMETER_SET",
56 "PORT_PARAMETER_GET",
57 "EVENT_TO_HOST",
58 "GET_CORE_STATS_FOR_PORT",
59 "OPAQUE_ALLOCATOR",
60 "CONSUME_MEM",
61 "LMK",
62 "OPAQUE_ALLOCATOR_DESC",
63 "DRM_GET_LHS32",
64 "DRM_GET_TIME",
65 "BUFFER_FROM_HOST_ZEROLEN",
66 "PORT_FLUSH",
67 "HOST_LOG",
68 };
69 #endif
70
71 static const char *const port_action_type_names[] = {
72 "UNKNOWN",
73 "ENABLE",
74 "DISABLE",
75 "FLUSH",
76 "CONNECT",
77 "DISCONNECT",
78 "SET_REQUIREMENTS",
79 };
80
81 #if defined(DEBUG)
82 #if defined(FULL_MSG_DUMP)
83 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
84 do { \
85 pr_debug(TITLE" type:%s(%d) length:%d\n", \
86 msg_type_names[(MSG)->h.type], \
87 (MSG)->h.type, (MSG_LEN)); \
88 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
89 16, 4, (MSG), \
90 sizeof(struct mmal_msg_header), 1); \
91 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
92 16, 4, \
93 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
94 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
95 } while (0)
96 #else
97 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
98 { \
99 pr_debug(TITLE" type:%s(%d) length:%d\n", \
100 msg_type_names[(MSG)->h.type], \
101 (MSG)->h.type, (MSG_LEN)); \
102 }
103 #endif
104 #else
105 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
106 #endif
107
108 struct vchiq_mmal_instance;
109
110 /* normal message context */
111 struct mmal_msg_context {
112 struct vchiq_mmal_instance *instance;
113
114 /* Index in the context_map idr so that we can find the
115 * mmal_msg_context again when servicing the VCHI reply.
116 */
117 int handle;
118
119 union {
120 struct {
121 /* work struct for defered callback - must come first */
122 struct work_struct work;
123 /* mmal instance */
124 struct vchiq_mmal_instance *instance;
125 /* mmal port */
126 struct vchiq_mmal_port *port;
127 /* actual buffer used to store bulk reply */
128 struct mmal_buffer *buffer;
129 /* amount of buffer used */
130 unsigned long buffer_used;
131 /* MMAL buffer flags */
132 u32 mmal_flags;
133 /* Presentation and Decode timestamps */
134 s64 pts;
135 s64 dts;
136
137 int status; /* context status */
138
139 } bulk; /* bulk data */
140
141 struct {
142 /* message handle to release */
143 VCHI_HELD_MSG_T msg_handle;
144 /* pointer to received message */
145 struct mmal_msg *msg;
146 /* received message length */
147 u32 msg_len;
148 /* completion upon reply */
149 struct completion cmplt;
150 } sync; /* synchronous response */
151 } u;
152
153 };
154
155 struct vchiq_mmal_instance {
156 VCHI_SERVICE_HANDLE_T handle;
157
158 /* ensure serialised access to service */
159 struct mutex vchiq_mutex;
160
161 /* vmalloc page to receive scratch bulk xfers into */
162 void *bulk_scratch;
163
164 struct idr context_map;
165 spinlock_t context_map_lock;
166
167 /* component to use next */
168 int component_idx;
169 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
170 };
171
172 static struct mmal_msg_context *
get_msg_context(struct vchiq_mmal_instance * instance)173 get_msg_context(struct vchiq_mmal_instance *instance)
174 {
175 struct mmal_msg_context *msg_context;
176 int handle;
177
178 /* todo: should this be allocated from a pool to avoid kzalloc */
179 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
180
181 if (!msg_context)
182 return ERR_PTR(-ENOMEM);
183
184 /* Create an ID that will be passed along with our message so
185 * that when we service the VCHI reply, we can look up what
186 * message is being replied to.
187 */
188 spin_lock(&instance->context_map_lock);
189 handle = idr_alloc(&instance->context_map, msg_context,
190 0, 0, GFP_KERNEL);
191 spin_unlock(&instance->context_map_lock);
192
193 if (handle < 0) {
194 kfree(msg_context);
195 return ERR_PTR(handle);
196 }
197
198 msg_context->instance = instance;
199 msg_context->handle = handle;
200
201 return msg_context;
202 }
203
204 static struct mmal_msg_context *
lookup_msg_context(struct vchiq_mmal_instance * instance,int handle)205 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
206 {
207 return idr_find(&instance->context_map, handle);
208 }
209
210 static void
release_msg_context(struct mmal_msg_context * msg_context)211 release_msg_context(struct mmal_msg_context *msg_context)
212 {
213 struct vchiq_mmal_instance *instance = msg_context->instance;
214
215 spin_lock(&instance->context_map_lock);
216 idr_remove(&instance->context_map, msg_context->handle);
217 spin_unlock(&instance->context_map_lock);
218 kfree(msg_context);
219 }
220
221 /* deals with receipt of event to host message */
event_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)222 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
223 struct mmal_msg *msg, u32 msg_len)
224 {
225 pr_debug("unhandled event\n");
226 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
227 msg->u.event_to_host.client_component,
228 msg->u.event_to_host.port_type,
229 msg->u.event_to_host.port_num,
230 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
231 }
232
233 /* workqueue scheduled callback
234 *
235 * we do this because it is important we do not call any other vchiq
236 * sync calls from witin the message delivery thread
237 */
buffer_work_cb(struct work_struct * work)238 static void buffer_work_cb(struct work_struct *work)
239 {
240 struct mmal_msg_context *msg_context =
241 container_of(work, struct mmal_msg_context, u.bulk.work);
242
243 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
244 msg_context->u.bulk.port,
245 msg_context->u.bulk.status,
246 msg_context->u.bulk.buffer,
247 msg_context->u.bulk.buffer_used,
248 msg_context->u.bulk.mmal_flags,
249 msg_context->u.bulk.dts,
250 msg_context->u.bulk.pts);
251
252 }
253
254 /* enqueue a bulk receive for a given message context */
bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)255 static int bulk_receive(struct vchiq_mmal_instance *instance,
256 struct mmal_msg *msg,
257 struct mmal_msg_context *msg_context)
258 {
259 unsigned long rd_len;
260 int ret;
261
262 rd_len = msg->u.buffer_from_host.buffer_header.length;
263
264 if (!msg_context->u.bulk.buffer) {
265 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
266
267 /* todo: this is a serious error, we should never have
268 * committed a buffer_to_host operation to the mmal
269 * port without the buffer to back it up (underflow
270 * handling) and there is no obvious way to deal with
271 * this - how is the mmal servie going to react when
272 * we fail to do the xfer and reschedule a buffer when
273 * it arrives? perhaps a starved flag to indicate a
274 * waiting bulk receive?
275 */
276
277 return -EINVAL;
278 }
279
280 /* ensure we do not overrun the available buffer */
281 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
282 rd_len = msg_context->u.bulk.buffer->buffer_size;
283 pr_warn("short read as not enough receive buffer space\n");
284 /* todo: is this the correct response, what happens to
285 * the rest of the message data?
286 */
287 }
288
289 /* store length */
290 msg_context->u.bulk.buffer_used = rd_len;
291 msg_context->u.bulk.mmal_flags =
292 msg->u.buffer_from_host.buffer_header.flags;
293 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
294 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
295
296 /* queue the bulk submission */
297 vchi_service_use(instance->handle);
298 ret = vchi_bulk_queue_receive(instance->handle,
299 msg_context->u.bulk.buffer->buffer,
300 /* Actual receive needs to be a multiple
301 * of 4 bytes
302 */
303 (rd_len + 3) & ~3,
304 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
305 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
306 msg_context);
307
308 vchi_service_release(instance->handle);
309
310 return ret;
311 }
312
313 /* enque a dummy bulk receive for a given message context */
dummy_bulk_receive(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)314 static int dummy_bulk_receive(struct vchiq_mmal_instance *instance,
315 struct mmal_msg_context *msg_context)
316 {
317 int ret;
318
319 /* zero length indicates this was a dummy transfer */
320 msg_context->u.bulk.buffer_used = 0;
321
322 /* queue the bulk submission */
323 vchi_service_use(instance->handle);
324
325 ret = vchi_bulk_queue_receive(instance->handle,
326 instance->bulk_scratch,
327 8,
328 VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
329 VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
330 msg_context);
331
332 vchi_service_release(instance->handle);
333
334 return ret;
335 }
336
337 /* data in message, memcpy from packet into output buffer */
inline_receive(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,struct mmal_msg_context * msg_context)338 static int inline_receive(struct vchiq_mmal_instance *instance,
339 struct mmal_msg *msg,
340 struct mmal_msg_context *msg_context)
341 {
342 memcpy(msg_context->u.bulk.buffer->buffer,
343 msg->u.buffer_from_host.short_data,
344 msg->u.buffer_from_host.payload_in_message);
345
346 msg_context->u.bulk.buffer_used =
347 msg->u.buffer_from_host.payload_in_message;
348
349 return 0;
350 }
351
352 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
353 static int
buffer_from_host(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buf)354 buffer_from_host(struct vchiq_mmal_instance *instance,
355 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
356 {
357 struct mmal_msg_context *msg_context;
358 struct mmal_msg m;
359 int ret;
360
361 if (!port->enabled)
362 return -EINVAL;
363
364 pr_debug("instance:%p buffer:%p\n", instance->handle, buf);
365
366 /* get context */
367 if (!buf->msg_context) {
368 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
369 buf);
370 return -EINVAL;
371 }
372 msg_context = buf->msg_context;
373
374 /* store bulk message context for when data arrives */
375 msg_context->u.bulk.instance = instance;
376 msg_context->u.bulk.port = port;
377 msg_context->u.bulk.buffer = buf;
378 msg_context->u.bulk.buffer_used = 0;
379
380 /* initialise work structure ready to schedule callback */
381 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
382
383 /* prep the buffer from host message */
384 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
385
386 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
387 m.h.magic = MMAL_MAGIC;
388 m.h.context = msg_context->handle;
389 m.h.status = 0;
390
391 /* drvbuf is our private data passed back */
392 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
393 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
394 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
395 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
396
397 /* buffer header */
398 m.u.buffer_from_host.buffer_header.cmd = 0;
399 m.u.buffer_from_host.buffer_header.data =
400 (u32)(unsigned long)buf->buffer;
401 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
402 m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
403 m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
404 m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
405 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
406 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
407
408 /* clear buffer type sepecific data */
409 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
410 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
411
412 /* no payload in message */
413 m.u.buffer_from_host.payload_in_message = 0;
414
415 vchi_service_use(instance->handle);
416
417 ret = vchi_queue_kernel_message(instance->handle,
418 &m,
419 sizeof(struct mmal_msg_header) +
420 sizeof(m.u.buffer_from_host));
421
422 vchi_service_release(instance->handle);
423
424 return ret;
425 }
426
427 /* deals with receipt of buffer to host message */
buffer_to_host_cb(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,u32 msg_len)428 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
429 struct mmal_msg *msg, u32 msg_len)
430 {
431 struct mmal_msg_context *msg_context;
432 u32 handle;
433
434 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
435 __func__, instance, msg, msg_len);
436
437 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
438 handle = msg->u.buffer_from_host.drvbuf.client_context;
439 msg_context = lookup_msg_context(instance, handle);
440
441 if (!msg_context) {
442 pr_err("drvbuf.client_context(%u) is invalid\n",
443 handle);
444 return;
445 }
446 } else {
447 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
448 return;
449 }
450
451 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
452 /* message reception had an error */
453 pr_warn("error %d in reply\n", msg->h.status);
454
455 msg_context->u.bulk.status = msg->h.status;
456
457 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
458 /* empty buffer */
459 if (msg->u.buffer_from_host.buffer_header.flags &
460 MMAL_BUFFER_HEADER_FLAG_EOS) {
461 msg_context->u.bulk.status =
462 dummy_bulk_receive(instance, msg_context);
463 if (msg_context->u.bulk.status == 0)
464 return; /* successful bulk submission, bulk
465 * completion will trigger callback
466 */
467 } else {
468 /* do callback with empty buffer - not EOS though */
469 msg_context->u.bulk.status = 0;
470 msg_context->u.bulk.buffer_used = 0;
471 }
472 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
473 /* data is not in message, queue a bulk receive */
474 msg_context->u.bulk.status =
475 bulk_receive(instance, msg, msg_context);
476 if (msg_context->u.bulk.status == 0)
477 return; /* successful bulk submission, bulk
478 * completion will trigger callback
479 */
480
481 /* failed to submit buffer, this will end badly */
482 pr_err("error %d on bulk submission\n",
483 msg_context->u.bulk.status);
484
485 } else if (msg->u.buffer_from_host.payload_in_message <=
486 MMAL_VC_SHORT_DATA) {
487 /* data payload within message */
488 msg_context->u.bulk.status = inline_receive(instance, msg,
489 msg_context);
490 } else {
491 pr_err("message with invalid short payload\n");
492
493 /* signal error */
494 msg_context->u.bulk.status = -EINVAL;
495 msg_context->u.bulk.buffer_used =
496 msg->u.buffer_from_host.payload_in_message;
497 }
498
499 /* schedule the port callback */
500 schedule_work(&msg_context->u.bulk.work);
501 }
502
bulk_receive_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)503 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
504 struct mmal_msg_context *msg_context)
505 {
506 msg_context->u.bulk.status = 0;
507
508 /* schedule the port callback */
509 schedule_work(&msg_context->u.bulk.work);
510 }
511
bulk_abort_cb(struct vchiq_mmal_instance * instance,struct mmal_msg_context * msg_context)512 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
513 struct mmal_msg_context *msg_context)
514 {
515 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
516
517 msg_context->u.bulk.status = -EINTR;
518
519 schedule_work(&msg_context->u.bulk.work);
520 }
521
522 /* incoming event service callback */
service_callback(void * param,const VCHI_CALLBACK_REASON_T reason,void * bulk_ctx)523 static void service_callback(void *param,
524 const VCHI_CALLBACK_REASON_T reason,
525 void *bulk_ctx)
526 {
527 struct vchiq_mmal_instance *instance = param;
528 int status;
529 u32 msg_len;
530 struct mmal_msg *msg;
531 VCHI_HELD_MSG_T msg_handle;
532 struct mmal_msg_context *msg_context;
533
534 if (!instance) {
535 pr_err("Message callback passed NULL instance\n");
536 return;
537 }
538
539 switch (reason) {
540 case VCHI_CALLBACK_MSG_AVAILABLE:
541 status = vchi_msg_hold(instance->handle, (void **)&msg,
542 &msg_len, VCHI_FLAGS_NONE, &msg_handle);
543 if (status) {
544 pr_err("Unable to dequeue a message (%d)\n", status);
545 break;
546 }
547
548 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
549
550 /* handling is different for buffer messages */
551 switch (msg->h.type) {
552 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
553 vchi_held_msg_release(&msg_handle);
554 break;
555
556 case MMAL_MSG_TYPE_EVENT_TO_HOST:
557 event_to_host_cb(instance, msg, msg_len);
558 vchi_held_msg_release(&msg_handle);
559
560 break;
561
562 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
563 buffer_to_host_cb(instance, msg, msg_len);
564 vchi_held_msg_release(&msg_handle);
565 break;
566
567 default:
568 /* messages dependent on header context to complete */
569 if (!msg->h.context) {
570 pr_err("received message context was null!\n");
571 vchi_held_msg_release(&msg_handle);
572 break;
573 }
574
575 msg_context = lookup_msg_context(instance,
576 msg->h.context);
577 if (!msg_context) {
578 pr_err("received invalid message context %u!\n",
579 msg->h.context);
580 vchi_held_msg_release(&msg_handle);
581 break;
582 }
583
584 /* fill in context values */
585 msg_context->u.sync.msg_handle = msg_handle;
586 msg_context->u.sync.msg = msg;
587 msg_context->u.sync.msg_len = msg_len;
588
589 /* todo: should this check (completion_done()
590 * == 1) for no one waiting? or do we need a
591 * flag to tell us the completion has been
592 * interrupted so we can free the message and
593 * its context. This probably also solves the
594 * message arriving after interruption todo
595 * below
596 */
597
598 /* complete message so caller knows it happened */
599 complete(&msg_context->u.sync.cmplt);
600 break;
601 }
602
603 break;
604
605 case VCHI_CALLBACK_BULK_RECEIVED:
606 bulk_receive_cb(instance, bulk_ctx);
607 break;
608
609 case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
610 bulk_abort_cb(instance, bulk_ctx);
611 break;
612
613 case VCHI_CALLBACK_SERVICE_CLOSED:
614 /* TODO: consider if this requires action if received when
615 * driver is not explicitly closing the service
616 */
617 break;
618
619 default:
620 pr_err("Received unhandled message reason %d\n", reason);
621 break;
622 }
623 }
624
send_synchronous_mmal_msg(struct vchiq_mmal_instance * instance,struct mmal_msg * msg,unsigned int payload_len,struct mmal_msg ** msg_out,VCHI_HELD_MSG_T * msg_handle_out)625 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
626 struct mmal_msg *msg,
627 unsigned int payload_len,
628 struct mmal_msg **msg_out,
629 VCHI_HELD_MSG_T *msg_handle_out)
630 {
631 struct mmal_msg_context *msg_context;
632 int ret;
633 unsigned long timeout;
634
635 /* payload size must not cause message to exceed max size */
636 if (payload_len >
637 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
638 pr_err("payload length %d exceeds max:%d\n", payload_len,
639 (int)(MMAL_MSG_MAX_SIZE -
640 sizeof(struct mmal_msg_header)));
641 return -EINVAL;
642 }
643
644 msg_context = get_msg_context(instance);
645 if (IS_ERR(msg_context))
646 return PTR_ERR(msg_context);
647
648 init_completion(&msg_context->u.sync.cmplt);
649
650 msg->h.magic = MMAL_MAGIC;
651 msg->h.context = msg_context->handle;
652 msg->h.status = 0;
653
654 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
655 ">>> sync message");
656
657 vchi_service_use(instance->handle);
658
659 ret = vchi_queue_kernel_message(instance->handle,
660 msg,
661 sizeof(struct mmal_msg_header) +
662 payload_len);
663
664 vchi_service_release(instance->handle);
665
666 if (ret) {
667 pr_err("error %d queuing message\n", ret);
668 release_msg_context(msg_context);
669 return ret;
670 }
671
672 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
673 3 * HZ);
674 if (timeout == 0) {
675 pr_err("timed out waiting for sync completion\n");
676 ret = -ETIME;
677 /* todo: what happens if the message arrives after aborting */
678 release_msg_context(msg_context);
679 return ret;
680 }
681
682 *msg_out = msg_context->u.sync.msg;
683 *msg_handle_out = msg_context->u.sync.msg_handle;
684 release_msg_context(msg_context);
685
686 return 0;
687 }
688
dump_port_info(struct vchiq_mmal_port * port)689 static void dump_port_info(struct vchiq_mmal_port *port)
690 {
691 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
692
693 pr_debug("buffer minimum num:%d size:%d align:%d\n",
694 port->minimum_buffer.num,
695 port->minimum_buffer.size, port->minimum_buffer.alignment);
696
697 pr_debug("buffer recommended num:%d size:%d align:%d\n",
698 port->recommended_buffer.num,
699 port->recommended_buffer.size,
700 port->recommended_buffer.alignment);
701
702 pr_debug("buffer current values num:%d size:%d align:%d\n",
703 port->current_buffer.num,
704 port->current_buffer.size, port->current_buffer.alignment);
705
706 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
707 port->format.type,
708 port->format.encoding, port->format.encoding_variant);
709
710 pr_debug(" bitrate:%d flags:0x%x\n",
711 port->format.bitrate, port->format.flags);
712
713 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
714 pr_debug
715 ("es video format: width:%d height:%d colourspace:0x%x\n",
716 port->es.video.width, port->es.video.height,
717 port->es.video.color_space);
718
719 pr_debug(" : crop xywh %d,%d,%d,%d\n",
720 port->es.video.crop.x,
721 port->es.video.crop.y,
722 port->es.video.crop.width, port->es.video.crop.height);
723 pr_debug(" : framerate %d/%d aspect %d/%d\n",
724 port->es.video.frame_rate.num,
725 port->es.video.frame_rate.den,
726 port->es.video.par.num, port->es.video.par.den);
727 }
728 }
729
port_to_mmal_msg(struct vchiq_mmal_port * port,struct mmal_port * p)730 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
731 {
732 /* todo do readonly fields need setting at all? */
733 p->type = port->type;
734 p->index = port->index;
735 p->index_all = 0;
736 p->is_enabled = port->enabled;
737 p->buffer_num_min = port->minimum_buffer.num;
738 p->buffer_size_min = port->minimum_buffer.size;
739 p->buffer_alignment_min = port->minimum_buffer.alignment;
740 p->buffer_num_recommended = port->recommended_buffer.num;
741 p->buffer_size_recommended = port->recommended_buffer.size;
742
743 /* only three writable fields in a port */
744 p->buffer_num = port->current_buffer.num;
745 p->buffer_size = port->current_buffer.size;
746 p->userdata = (u32)(unsigned long)port;
747 }
748
port_info_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)749 static int port_info_set(struct vchiq_mmal_instance *instance,
750 struct vchiq_mmal_port *port)
751 {
752 int ret;
753 struct mmal_msg m;
754 struct mmal_msg *rmsg;
755 VCHI_HELD_MSG_T rmsg_handle;
756
757 pr_debug("setting port info port %p\n", port);
758 if (!port)
759 return -1;
760 dump_port_info(port);
761
762 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
763
764 m.u.port_info_set.component_handle = port->component->handle;
765 m.u.port_info_set.port_type = port->type;
766 m.u.port_info_set.port_index = port->index;
767
768 port_to_mmal_msg(port, &m.u.port_info_set.port);
769
770 /* elementary stream format setup */
771 m.u.port_info_set.format.type = port->format.type;
772 m.u.port_info_set.format.encoding = port->format.encoding;
773 m.u.port_info_set.format.encoding_variant =
774 port->format.encoding_variant;
775 m.u.port_info_set.format.bitrate = port->format.bitrate;
776 m.u.port_info_set.format.flags = port->format.flags;
777
778 memcpy(&m.u.port_info_set.es, &port->es,
779 sizeof(union mmal_es_specific_format));
780
781 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
782 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
783 port->format.extradata_size);
784
785 ret = send_synchronous_mmal_msg(instance, &m,
786 sizeof(m.u.port_info_set),
787 &rmsg, &rmsg_handle);
788 if (ret)
789 return ret;
790
791 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
792 /* got an unexpected message type in reply */
793 ret = -EINVAL;
794 goto release_msg;
795 }
796
797 /* return operation status */
798 ret = -rmsg->u.port_info_get_reply.status;
799
800 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
801 port->component->handle, port->handle);
802
803 release_msg:
804 vchi_held_msg_release(&rmsg_handle);
805
806 return ret;
807 }
808
809 /* use port info get message to retrieve port information */
port_info_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)810 static int port_info_get(struct vchiq_mmal_instance *instance,
811 struct vchiq_mmal_port *port)
812 {
813 int ret;
814 struct mmal_msg m;
815 struct mmal_msg *rmsg;
816 VCHI_HELD_MSG_T rmsg_handle;
817
818 /* port info time */
819 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
820 m.u.port_info_get.component_handle = port->component->handle;
821 m.u.port_info_get.port_type = port->type;
822 m.u.port_info_get.index = port->index;
823
824 ret = send_synchronous_mmal_msg(instance, &m,
825 sizeof(m.u.port_info_get),
826 &rmsg, &rmsg_handle);
827 if (ret)
828 return ret;
829
830 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
831 /* got an unexpected message type in reply */
832 ret = -EINVAL;
833 goto release_msg;
834 }
835
836 /* return operation status */
837 ret = -rmsg->u.port_info_get_reply.status;
838 if (ret != MMAL_MSG_STATUS_SUCCESS)
839 goto release_msg;
840
841 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
842 port->enabled = false;
843 else
844 port->enabled = true;
845
846 /* copy the values out of the message */
847 port->handle = rmsg->u.port_info_get_reply.port_handle;
848
849 /* port type and index cached to use on port info set because
850 * it does not use a port handle
851 */
852 port->type = rmsg->u.port_info_get_reply.port_type;
853 port->index = rmsg->u.port_info_get_reply.port_index;
854
855 port->minimum_buffer.num =
856 rmsg->u.port_info_get_reply.port.buffer_num_min;
857 port->minimum_buffer.size =
858 rmsg->u.port_info_get_reply.port.buffer_size_min;
859 port->minimum_buffer.alignment =
860 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
861
862 port->recommended_buffer.alignment =
863 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
864 port->recommended_buffer.num =
865 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
866
867 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
868 port->current_buffer.size =
869 rmsg->u.port_info_get_reply.port.buffer_size;
870
871 /* stream format */
872 port->format.type = rmsg->u.port_info_get_reply.format.type;
873 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
874 port->format.encoding_variant =
875 rmsg->u.port_info_get_reply.format.encoding_variant;
876 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
877 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
878
879 /* elementary stream format */
880 memcpy(&port->es,
881 &rmsg->u.port_info_get_reply.es,
882 sizeof(union mmal_es_specific_format));
883 port->format.es = &port->es;
884
885 port->format.extradata_size =
886 rmsg->u.port_info_get_reply.format.extradata_size;
887 memcpy(port->format.extradata,
888 rmsg->u.port_info_get_reply.extradata,
889 port->format.extradata_size);
890
891 pr_debug("received port info\n");
892 dump_port_info(port);
893
894 release_msg:
895
896 pr_debug("%s:result:%d component:0x%x port:%d\n",
897 __func__, ret, port->component->handle, port->handle);
898
899 vchi_held_msg_release(&rmsg_handle);
900
901 return ret;
902 }
903
904 /* create comonent on vc */
create_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component,const char * name)905 static int create_component(struct vchiq_mmal_instance *instance,
906 struct vchiq_mmal_component *component,
907 const char *name)
908 {
909 int ret;
910 struct mmal_msg m;
911 struct mmal_msg *rmsg;
912 VCHI_HELD_MSG_T rmsg_handle;
913
914 /* build component create message */
915 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
916 m.u.component_create.client_component = (u32)(unsigned long)component;
917 strncpy(m.u.component_create.name, name,
918 sizeof(m.u.component_create.name));
919
920 ret = send_synchronous_mmal_msg(instance, &m,
921 sizeof(m.u.component_create),
922 &rmsg, &rmsg_handle);
923 if (ret)
924 return ret;
925
926 if (rmsg->h.type != m.h.type) {
927 /* got an unexpected message type in reply */
928 ret = -EINVAL;
929 goto release_msg;
930 }
931
932 ret = -rmsg->u.component_create_reply.status;
933 if (ret != MMAL_MSG_STATUS_SUCCESS)
934 goto release_msg;
935
936 /* a valid component response received */
937 component->handle = rmsg->u.component_create_reply.component_handle;
938 component->inputs = rmsg->u.component_create_reply.input_num;
939 component->outputs = rmsg->u.component_create_reply.output_num;
940 component->clocks = rmsg->u.component_create_reply.clock_num;
941
942 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
943 component->handle,
944 component->inputs, component->outputs, component->clocks);
945
946 release_msg:
947 vchi_held_msg_release(&rmsg_handle);
948
949 return ret;
950 }
951
952 /* destroys a component on vc */
destroy_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)953 static int destroy_component(struct vchiq_mmal_instance *instance,
954 struct vchiq_mmal_component *component)
955 {
956 int ret;
957 struct mmal_msg m;
958 struct mmal_msg *rmsg;
959 VCHI_HELD_MSG_T rmsg_handle;
960
961 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
962 m.u.component_destroy.component_handle = component->handle;
963
964 ret = send_synchronous_mmal_msg(instance, &m,
965 sizeof(m.u.component_destroy),
966 &rmsg, &rmsg_handle);
967 if (ret)
968 return ret;
969
970 if (rmsg->h.type != m.h.type) {
971 /* got an unexpected message type in reply */
972 ret = -EINVAL;
973 goto release_msg;
974 }
975
976 ret = -rmsg->u.component_destroy_reply.status;
977
978 release_msg:
979
980 vchi_held_msg_release(&rmsg_handle);
981
982 return ret;
983 }
984
985 /* enable a component on vc */
enable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)986 static int enable_component(struct vchiq_mmal_instance *instance,
987 struct vchiq_mmal_component *component)
988 {
989 int ret;
990 struct mmal_msg m;
991 struct mmal_msg *rmsg;
992 VCHI_HELD_MSG_T rmsg_handle;
993
994 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
995 m.u.component_enable.component_handle = component->handle;
996
997 ret = send_synchronous_mmal_msg(instance, &m,
998 sizeof(m.u.component_enable),
999 &rmsg, &rmsg_handle);
1000 if (ret)
1001 return ret;
1002
1003 if (rmsg->h.type != m.h.type) {
1004 /* got an unexpected message type in reply */
1005 ret = -EINVAL;
1006 goto release_msg;
1007 }
1008
1009 ret = -rmsg->u.component_enable_reply.status;
1010
1011 release_msg:
1012 vchi_held_msg_release(&rmsg_handle);
1013
1014 return ret;
1015 }
1016
1017 /* disable a component on vc */
disable_component(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1018 static int disable_component(struct vchiq_mmal_instance *instance,
1019 struct vchiq_mmal_component *component)
1020 {
1021 int ret;
1022 struct mmal_msg m;
1023 struct mmal_msg *rmsg;
1024 VCHI_HELD_MSG_T rmsg_handle;
1025
1026 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1027 m.u.component_disable.component_handle = component->handle;
1028
1029 ret = send_synchronous_mmal_msg(instance, &m,
1030 sizeof(m.u.component_disable),
1031 &rmsg, &rmsg_handle);
1032 if (ret)
1033 return ret;
1034
1035 if (rmsg->h.type != m.h.type) {
1036 /* got an unexpected message type in reply */
1037 ret = -EINVAL;
1038 goto release_msg;
1039 }
1040
1041 ret = -rmsg->u.component_disable_reply.status;
1042
1043 release_msg:
1044
1045 vchi_held_msg_release(&rmsg_handle);
1046
1047 return ret;
1048 }
1049
1050 /* get version of mmal implementation */
get_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1051 static int get_version(struct vchiq_mmal_instance *instance,
1052 u32 *major_out, u32 *minor_out)
1053 {
1054 int ret;
1055 struct mmal_msg m;
1056 struct mmal_msg *rmsg;
1057 VCHI_HELD_MSG_T rmsg_handle;
1058
1059 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1060
1061 ret = send_synchronous_mmal_msg(instance, &m,
1062 sizeof(m.u.version),
1063 &rmsg, &rmsg_handle);
1064 if (ret)
1065 return ret;
1066
1067 if (rmsg->h.type != m.h.type) {
1068 /* got an unexpected message type in reply */
1069 ret = -EINVAL;
1070 goto release_msg;
1071 }
1072
1073 *major_out = rmsg->u.version.major;
1074 *minor_out = rmsg->u.version.minor;
1075
1076 release_msg:
1077 vchi_held_msg_release(&rmsg_handle);
1078
1079 return ret;
1080 }
1081
1082 /* do a port action with a port as a parameter */
port_action_port(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type)1083 static int port_action_port(struct vchiq_mmal_instance *instance,
1084 struct vchiq_mmal_port *port,
1085 enum mmal_msg_port_action_type action_type)
1086 {
1087 int ret;
1088 struct mmal_msg m;
1089 struct mmal_msg *rmsg;
1090 VCHI_HELD_MSG_T rmsg_handle;
1091
1092 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1093 m.u.port_action_port.component_handle = port->component->handle;
1094 m.u.port_action_port.port_handle = port->handle;
1095 m.u.port_action_port.action = action_type;
1096
1097 port_to_mmal_msg(port, &m.u.port_action_port.port);
1098
1099 ret = send_synchronous_mmal_msg(instance, &m,
1100 sizeof(m.u.port_action_port),
1101 &rmsg, &rmsg_handle);
1102 if (ret)
1103 return ret;
1104
1105 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1106 /* got an unexpected message type in reply */
1107 ret = -EINVAL;
1108 goto release_msg;
1109 }
1110
1111 ret = -rmsg->u.port_action_reply.status;
1112
1113 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1114 __func__,
1115 ret, port->component->handle, port->handle,
1116 port_action_type_names[action_type], action_type);
1117
1118 release_msg:
1119 vchi_held_msg_release(&rmsg_handle);
1120
1121 return ret;
1122 }
1123
1124 /* do a port action with handles as parameters */
port_action_handle(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,enum mmal_msg_port_action_type action_type,u32 connect_component_handle,u32 connect_port_handle)1125 static int port_action_handle(struct vchiq_mmal_instance *instance,
1126 struct vchiq_mmal_port *port,
1127 enum mmal_msg_port_action_type action_type,
1128 u32 connect_component_handle,
1129 u32 connect_port_handle)
1130 {
1131 int ret;
1132 struct mmal_msg m;
1133 struct mmal_msg *rmsg;
1134 VCHI_HELD_MSG_T rmsg_handle;
1135
1136 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1137
1138 m.u.port_action_handle.component_handle = port->component->handle;
1139 m.u.port_action_handle.port_handle = port->handle;
1140 m.u.port_action_handle.action = action_type;
1141
1142 m.u.port_action_handle.connect_component_handle =
1143 connect_component_handle;
1144 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1145
1146 ret = send_synchronous_mmal_msg(instance, &m,
1147 sizeof(m.u.port_action_handle),
1148 &rmsg, &rmsg_handle);
1149 if (ret)
1150 return ret;
1151
1152 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1153 /* got an unexpected message type in reply */
1154 ret = -EINVAL;
1155 goto release_msg;
1156 }
1157
1158 ret = -rmsg->u.port_action_reply.status;
1159
1160 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1161 __func__,
1162 ret, port->component->handle, port->handle,
1163 port_action_type_names[action_type],
1164 action_type, connect_component_handle, connect_port_handle);
1165
1166 release_msg:
1167 vchi_held_msg_release(&rmsg_handle);
1168
1169 return ret;
1170 }
1171
port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 value_size)1172 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1173 struct vchiq_mmal_port *port,
1174 u32 parameter_id, void *value, u32 value_size)
1175 {
1176 int ret;
1177 struct mmal_msg m;
1178 struct mmal_msg *rmsg;
1179 VCHI_HELD_MSG_T rmsg_handle;
1180
1181 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1182
1183 m.u.port_parameter_set.component_handle = port->component->handle;
1184 m.u.port_parameter_set.port_handle = port->handle;
1185 m.u.port_parameter_set.id = parameter_id;
1186 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1187 memcpy(&m.u.port_parameter_set.value, value, value_size);
1188
1189 ret = send_synchronous_mmal_msg(instance, &m,
1190 (4 * sizeof(u32)) + value_size,
1191 &rmsg, &rmsg_handle);
1192 if (ret)
1193 return ret;
1194
1195 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1196 /* got an unexpected message type in reply */
1197 ret = -EINVAL;
1198 goto release_msg;
1199 }
1200
1201 ret = -rmsg->u.port_parameter_set_reply.status;
1202
1203 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1204 __func__,
1205 ret, port->component->handle, port->handle, parameter_id);
1206
1207 release_msg:
1208 vchi_held_msg_release(&rmsg_handle);
1209
1210 return ret;
1211 }
1212
port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter_id,void * value,u32 * value_size)1213 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1214 struct vchiq_mmal_port *port,
1215 u32 parameter_id, void *value, u32 *value_size)
1216 {
1217 int ret;
1218 struct mmal_msg m;
1219 struct mmal_msg *rmsg;
1220 VCHI_HELD_MSG_T rmsg_handle;
1221
1222 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1223
1224 m.u.port_parameter_get.component_handle = port->component->handle;
1225 m.u.port_parameter_get.port_handle = port->handle;
1226 m.u.port_parameter_get.id = parameter_id;
1227 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1228
1229 ret = send_synchronous_mmal_msg(instance, &m,
1230 sizeof(struct
1231 mmal_msg_port_parameter_get),
1232 &rmsg, &rmsg_handle);
1233 if (ret)
1234 return ret;
1235
1236 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1237 /* got an unexpected message type in reply */
1238 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1239 ret = -EINVAL;
1240 goto release_msg;
1241 }
1242
1243 ret = -rmsg->u.port_parameter_get_reply.status;
1244 /* port_parameter_get_reply.size includes the header,
1245 * whilst *value_size doesn't.
1246 */
1247 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1248
1249 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1250 /* Copy only as much as we have space for
1251 * but report true size of parameter
1252 */
1253 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1254 *value_size);
1255 *value_size = rmsg->u.port_parameter_get_reply.size;
1256 } else
1257 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1258 rmsg->u.port_parameter_get_reply.size);
1259
1260 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1261 ret, port->component->handle, port->handle, parameter_id);
1262
1263 release_msg:
1264 vchi_held_msg_release(&rmsg_handle);
1265
1266 return ret;
1267 }
1268
1269 /* disables a port and drains buffers from it */
port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1270 static int port_disable(struct vchiq_mmal_instance *instance,
1271 struct vchiq_mmal_port *port)
1272 {
1273 int ret;
1274 struct list_head *q, *buf_head;
1275 unsigned long flags = 0;
1276
1277 if (!port->enabled)
1278 return 0;
1279
1280 port->enabled = false;
1281
1282 ret = port_action_port(instance, port,
1283 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1284 if (ret == 0) {
1285 /*
1286 * Drain all queued buffers on port. This should only
1287 * apply to buffers that have been queued before the port
1288 * has been enabled. If the port has been enabled and buffers
1289 * passed, then the buffers should have been removed from this
1290 * list, and we should get the relevant callbacks via VCHIQ
1291 * to release the buffers.
1292 */
1293 spin_lock_irqsave(&port->slock, flags);
1294
1295 list_for_each_safe(buf_head, q, &port->buffers) {
1296 struct mmal_buffer *mmalbuf;
1297
1298 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1299 list);
1300 list_del(buf_head);
1301 if (port->buffer_cb)
1302 port->buffer_cb(instance,
1303 port, 0, mmalbuf, 0, 0,
1304 MMAL_TIME_UNKNOWN,
1305 MMAL_TIME_UNKNOWN);
1306 }
1307
1308 spin_unlock_irqrestore(&port->slock, flags);
1309
1310 ret = port_info_get(instance, port);
1311 }
1312
1313 return ret;
1314 }
1315
1316 /* enable a port */
port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1317 static int port_enable(struct vchiq_mmal_instance *instance,
1318 struct vchiq_mmal_port *port)
1319 {
1320 unsigned int hdr_count;
1321 struct list_head *q, *buf_head;
1322 int ret;
1323
1324 if (port->enabled)
1325 return 0;
1326
1327 /* ensure there are enough buffers queued to cover the buffer headers */
1328 if (port->buffer_cb) {
1329 hdr_count = 0;
1330 list_for_each(buf_head, &port->buffers) {
1331 hdr_count++;
1332 }
1333 if (hdr_count < port->current_buffer.num)
1334 return -ENOSPC;
1335 }
1336
1337 ret = port_action_port(instance, port,
1338 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1339 if (ret)
1340 goto done;
1341
1342 port->enabled = true;
1343
1344 if (port->buffer_cb) {
1345 /* send buffer headers to videocore */
1346 hdr_count = 1;
1347 list_for_each_safe(buf_head, q, &port->buffers) {
1348 struct mmal_buffer *mmalbuf;
1349
1350 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1351 list);
1352 ret = buffer_from_host(instance, port, mmalbuf);
1353 if (ret)
1354 goto done;
1355
1356 list_del(buf_head);
1357 hdr_count++;
1358 if (hdr_count > port->current_buffer.num)
1359 break;
1360 }
1361 }
1362
1363 ret = port_info_get(instance, port);
1364
1365 done:
1366 return ret;
1367 }
1368
1369 /* ------------------------------------------------------------------
1370 * Exported API
1371 *------------------------------------------------------------------
1372 */
1373
vchiq_mmal_port_set_format(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1374 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1375 struct vchiq_mmal_port *port)
1376 {
1377 int ret;
1378
1379 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1380 return -EINTR;
1381
1382 ret = port_info_set(instance, port);
1383 if (ret)
1384 goto release_unlock;
1385
1386 /* read what has actually been set */
1387 ret = port_info_get(instance, port);
1388
1389 release_unlock:
1390 mutex_unlock(&instance->vchiq_mutex);
1391
1392 return ret;
1393 }
1394
vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 value_size)1395 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1396 struct vchiq_mmal_port *port,
1397 u32 parameter, void *value, u32 value_size)
1398 {
1399 int ret;
1400
1401 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1402 return -EINTR;
1403
1404 ret = port_parameter_set(instance, port, parameter, value, value_size);
1405
1406 mutex_unlock(&instance->vchiq_mutex);
1407
1408 return ret;
1409 }
1410
vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,u32 parameter,void * value,u32 * value_size)1411 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1412 struct vchiq_mmal_port *port,
1413 u32 parameter, void *value, u32 *value_size)
1414 {
1415 int ret;
1416
1417 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1418 return -EINTR;
1419
1420 ret = port_parameter_get(instance, port, parameter, value, value_size);
1421
1422 mutex_unlock(&instance->vchiq_mutex);
1423
1424 return ret;
1425 }
1426
1427 /* enable a port
1428 *
1429 * enables a port and queues buffers for satisfying callbacks if we
1430 * provide a callback handler
1431 */
vchiq_mmal_port_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,vchiq_mmal_buffer_cb buffer_cb)1432 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1433 struct vchiq_mmal_port *port,
1434 vchiq_mmal_buffer_cb buffer_cb)
1435 {
1436 int ret;
1437
1438 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1439 return -EINTR;
1440
1441 /* already enabled - noop */
1442 if (port->enabled) {
1443 ret = 0;
1444 goto unlock;
1445 }
1446
1447 port->buffer_cb = buffer_cb;
1448
1449 ret = port_enable(instance, port);
1450
1451 unlock:
1452 mutex_unlock(&instance->vchiq_mutex);
1453
1454 return ret;
1455 }
1456
vchiq_mmal_port_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port)1457 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1458 struct vchiq_mmal_port *port)
1459 {
1460 int ret;
1461
1462 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1463 return -EINTR;
1464
1465 if (!port->enabled) {
1466 mutex_unlock(&instance->vchiq_mutex);
1467 return 0;
1468 }
1469
1470 ret = port_disable(instance, port);
1471
1472 mutex_unlock(&instance->vchiq_mutex);
1473
1474 return ret;
1475 }
1476
1477 /* ports will be connected in a tunneled manner so data buffers
1478 * are not handled by client.
1479 */
vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * src,struct vchiq_mmal_port * dst)1480 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1481 struct vchiq_mmal_port *src,
1482 struct vchiq_mmal_port *dst)
1483 {
1484 int ret;
1485
1486 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1487 return -EINTR;
1488
1489 /* disconnect ports if connected */
1490 if (src->connected) {
1491 ret = port_disable(instance, src);
1492 if (ret) {
1493 pr_err("failed disabling src port(%d)\n", ret);
1494 goto release_unlock;
1495 }
1496
1497 /* do not need to disable the destination port as they
1498 * are connected and it is done automatically
1499 */
1500
1501 ret = port_action_handle(instance, src,
1502 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1503 src->connected->component->handle,
1504 src->connected->handle);
1505 if (ret < 0) {
1506 pr_err("failed disconnecting src port\n");
1507 goto release_unlock;
1508 }
1509 src->connected->enabled = false;
1510 src->connected = NULL;
1511 }
1512
1513 if (!dst) {
1514 /* do not make new connection */
1515 ret = 0;
1516 pr_debug("not making new connection\n");
1517 goto release_unlock;
1518 }
1519
1520 /* copy src port format to dst */
1521 dst->format.encoding = src->format.encoding;
1522 dst->es.video.width = src->es.video.width;
1523 dst->es.video.height = src->es.video.height;
1524 dst->es.video.crop.x = src->es.video.crop.x;
1525 dst->es.video.crop.y = src->es.video.crop.y;
1526 dst->es.video.crop.width = src->es.video.crop.width;
1527 dst->es.video.crop.height = src->es.video.crop.height;
1528 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1529 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1530
1531 /* set new format */
1532 ret = port_info_set(instance, dst);
1533 if (ret) {
1534 pr_debug("setting port info failed\n");
1535 goto release_unlock;
1536 }
1537
1538 /* read what has actually been set */
1539 ret = port_info_get(instance, dst);
1540 if (ret) {
1541 pr_debug("read back port info failed\n");
1542 goto release_unlock;
1543 }
1544
1545 /* connect two ports together */
1546 ret = port_action_handle(instance, src,
1547 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1548 dst->component->handle, dst->handle);
1549 if (ret < 0) {
1550 pr_debug("connecting port %d:%d to %d:%d failed\n",
1551 src->component->handle, src->handle,
1552 dst->component->handle, dst->handle);
1553 goto release_unlock;
1554 }
1555 src->connected = dst;
1556
1557 release_unlock:
1558
1559 mutex_unlock(&instance->vchiq_mutex);
1560
1561 return ret;
1562 }
1563
vchiq_mmal_submit_buffer(struct vchiq_mmal_instance * instance,struct vchiq_mmal_port * port,struct mmal_buffer * buffer)1564 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1565 struct vchiq_mmal_port *port,
1566 struct mmal_buffer *buffer)
1567 {
1568 unsigned long flags = 0;
1569 int ret;
1570
1571 ret = buffer_from_host(instance, port, buffer);
1572 if (ret == -EINVAL) {
1573 /* Port is disabled. Queue for when it is enabled. */
1574 spin_lock_irqsave(&port->slock, flags);
1575 list_add_tail(&buffer->list, &port->buffers);
1576 spin_unlock_irqrestore(&port->slock, flags);
1577 }
1578
1579 return 0;
1580 }
1581
mmal_vchi_buffer_init(struct vchiq_mmal_instance * instance,struct mmal_buffer * buf)1582 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1583 struct mmal_buffer *buf)
1584 {
1585 struct mmal_msg_context *msg_context = get_msg_context(instance);
1586
1587 if (IS_ERR(msg_context))
1588 return (PTR_ERR(msg_context));
1589
1590 buf->msg_context = msg_context;
1591 return 0;
1592 }
1593
mmal_vchi_buffer_cleanup(struct mmal_buffer * buf)1594 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1595 {
1596 struct mmal_msg_context *msg_context = buf->msg_context;
1597
1598 if (msg_context)
1599 release_msg_context(msg_context);
1600 buf->msg_context = NULL;
1601
1602 return 0;
1603 }
1604
1605 /* Initialise a mmal component and its ports
1606 *
1607 */
vchiq_mmal_component_init(struct vchiq_mmal_instance * instance,const char * name,struct vchiq_mmal_component ** component_out)1608 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1609 const char *name,
1610 struct vchiq_mmal_component **component_out)
1611 {
1612 int ret;
1613 int idx; /* port index */
1614 struct vchiq_mmal_component *component;
1615
1616 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1617 return -EINTR;
1618
1619 if (instance->component_idx == VCHIQ_MMAL_MAX_COMPONENTS) {
1620 ret = -EINVAL; /* todo is this correct error? */
1621 goto unlock;
1622 }
1623
1624 component = &instance->component[instance->component_idx];
1625
1626 ret = create_component(instance, component, name);
1627 if (ret < 0)
1628 goto unlock;
1629
1630 /* ports info needs gathering */
1631 component->control.type = MMAL_PORT_TYPE_CONTROL;
1632 component->control.index = 0;
1633 component->control.component = component;
1634 spin_lock_init(&component->control.slock);
1635 INIT_LIST_HEAD(&component->control.buffers);
1636 ret = port_info_get(instance, &component->control);
1637 if (ret < 0)
1638 goto release_component;
1639
1640 for (idx = 0; idx < component->inputs; idx++) {
1641 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1642 component->input[idx].index = idx;
1643 component->input[idx].component = component;
1644 spin_lock_init(&component->input[idx].slock);
1645 INIT_LIST_HEAD(&component->input[idx].buffers);
1646 ret = port_info_get(instance, &component->input[idx]);
1647 if (ret < 0)
1648 goto release_component;
1649 }
1650
1651 for (idx = 0; idx < component->outputs; idx++) {
1652 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1653 component->output[idx].index = idx;
1654 component->output[idx].component = component;
1655 spin_lock_init(&component->output[idx].slock);
1656 INIT_LIST_HEAD(&component->output[idx].buffers);
1657 ret = port_info_get(instance, &component->output[idx]);
1658 if (ret < 0)
1659 goto release_component;
1660 }
1661
1662 for (idx = 0; idx < component->clocks; idx++) {
1663 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1664 component->clock[idx].index = idx;
1665 component->clock[idx].component = component;
1666 spin_lock_init(&component->clock[idx].slock);
1667 INIT_LIST_HEAD(&component->clock[idx].buffers);
1668 ret = port_info_get(instance, &component->clock[idx]);
1669 if (ret < 0)
1670 goto release_component;
1671 }
1672
1673 instance->component_idx++;
1674
1675 *component_out = component;
1676
1677 mutex_unlock(&instance->vchiq_mutex);
1678
1679 return 0;
1680
1681 release_component:
1682 destroy_component(instance, component);
1683 unlock:
1684 mutex_unlock(&instance->vchiq_mutex);
1685
1686 return ret;
1687 }
1688
1689 /*
1690 * cause a mmal component to be destroyed
1691 */
vchiq_mmal_component_finalise(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1692 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1693 struct vchiq_mmal_component *component)
1694 {
1695 int ret;
1696
1697 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1698 return -EINTR;
1699
1700 if (component->enabled)
1701 ret = disable_component(instance, component);
1702
1703 ret = destroy_component(instance, component);
1704
1705 mutex_unlock(&instance->vchiq_mutex);
1706
1707 return ret;
1708 }
1709
1710 /*
1711 * cause a mmal component to be enabled
1712 */
vchiq_mmal_component_enable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1713 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1714 struct vchiq_mmal_component *component)
1715 {
1716 int ret;
1717
1718 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1719 return -EINTR;
1720
1721 if (component->enabled) {
1722 mutex_unlock(&instance->vchiq_mutex);
1723 return 0;
1724 }
1725
1726 ret = enable_component(instance, component);
1727 if (ret == 0)
1728 component->enabled = true;
1729
1730 mutex_unlock(&instance->vchiq_mutex);
1731
1732 return ret;
1733 }
1734
1735 /*
1736 * cause a mmal component to be enabled
1737 */
vchiq_mmal_component_disable(struct vchiq_mmal_instance * instance,struct vchiq_mmal_component * component)1738 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1739 struct vchiq_mmal_component *component)
1740 {
1741 int ret;
1742
1743 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1744 return -EINTR;
1745
1746 if (!component->enabled) {
1747 mutex_unlock(&instance->vchiq_mutex);
1748 return 0;
1749 }
1750
1751 ret = disable_component(instance, component);
1752 if (ret == 0)
1753 component->enabled = false;
1754
1755 mutex_unlock(&instance->vchiq_mutex);
1756
1757 return ret;
1758 }
1759
vchiq_mmal_version(struct vchiq_mmal_instance * instance,u32 * major_out,u32 * minor_out)1760 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1761 u32 *major_out, u32 *minor_out)
1762 {
1763 int ret;
1764
1765 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1766 return -EINTR;
1767
1768 ret = get_version(instance, major_out, minor_out);
1769
1770 mutex_unlock(&instance->vchiq_mutex);
1771
1772 return ret;
1773 }
1774
vchiq_mmal_finalise(struct vchiq_mmal_instance * instance)1775 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1776 {
1777 int status = 0;
1778
1779 if (!instance)
1780 return -EINVAL;
1781
1782 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1783 return -EINTR;
1784
1785 vchi_service_use(instance->handle);
1786
1787 status = vchi_service_close(instance->handle);
1788 if (status != 0)
1789 pr_err("mmal-vchiq: VCHIQ close failed\n");
1790
1791 mutex_unlock(&instance->vchiq_mutex);
1792
1793 vfree(instance->bulk_scratch);
1794
1795 idr_destroy(&instance->context_map);
1796
1797 kfree(instance);
1798
1799 return status;
1800 }
1801
vchiq_mmal_init(struct vchiq_mmal_instance ** out_instance)1802 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1803 {
1804 int status;
1805 struct vchiq_mmal_instance *instance;
1806 static VCHI_CONNECTION_T *vchi_connection;
1807 static VCHI_INSTANCE_T vchi_instance;
1808 SERVICE_CREATION_T params = {
1809 .version = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1810 .service_id = VC_MMAL_SERVER_NAME,
1811 .connection = vchi_connection,
1812 .rx_fifo_size = 0,
1813 .tx_fifo_size = 0,
1814 .callback = service_callback,
1815 .callback_param = NULL,
1816 .want_unaligned_bulk_rx = 1,
1817 .want_unaligned_bulk_tx = 1,
1818 .want_crc = 0
1819 };
1820
1821 /* compile time checks to ensure structure size as they are
1822 * directly (de)serialised from memory.
1823 */
1824
1825 /* ensure the header structure has packed to the correct size */
1826 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1827
1828 /* ensure message structure does not exceed maximum length */
1829 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1830
1831 /* mmal port struct is correct size */
1832 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1833
1834 /* create a vchi instance */
1835 status = vchi_initialise(&vchi_instance);
1836 if (status) {
1837 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1838 status);
1839 return -EIO;
1840 }
1841
1842 status = vchi_connect(NULL, 0, vchi_instance);
1843 if (status) {
1844 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1845 return -EIO;
1846 }
1847
1848 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1849
1850 if (!instance)
1851 return -ENOMEM;
1852
1853 mutex_init(&instance->vchiq_mutex);
1854
1855 instance->bulk_scratch = vmalloc(PAGE_SIZE);
1856
1857 spin_lock_init(&instance->context_map_lock);
1858 idr_init_base(&instance->context_map, 1);
1859
1860 params.callback_param = instance;
1861
1862 status = vchi_service_open(vchi_instance, ¶ms, &instance->handle);
1863 if (status) {
1864 pr_err("Failed to open VCHI service connection (status=%d)\n",
1865 status);
1866 goto err_close_services;
1867 }
1868
1869 vchi_service_release(instance->handle);
1870
1871 *out_instance = instance;
1872
1873 return 0;
1874
1875 err_close_services:
1876
1877 vchi_service_close(instance->handle);
1878 vfree(instance->bulk_scratch);
1879 kfree(instance);
1880 return -ENODEV;
1881 }
1882