1 /*
2 * videobuf2-core.c - video buffer 2 core framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * The vb2_thread implementation was based on code from videobuf-dvb.c:
10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation.
15 */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/err.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/mm.h>
23 #include <linux/poll.h>
24 #include <linux/slab.h>
25 #include <linux/sched.h>
26 #include <linux/freezer.h>
27 #include <linux/kthread.h>
28
29 #include <media/videobuf2-core.h>
30 #include <media/v4l2-mc.h>
31
32 #include <trace/events/vb2.h>
33
34 static int debug;
35 module_param(debug, int, 0644);
36
37 #define dprintk(level, fmt, arg...) \
38 do { \
39 if (debug >= level) \
40 pr_info("%s: " fmt, __func__, ## arg); \
41 } while (0)
42
43 #ifdef CONFIG_VIDEO_ADV_DEBUG
44
45 /*
46 * If advanced debugging is on, then count how often each op is called
47 * successfully, which can either be per-buffer or per-queue.
48 *
49 * This makes it easy to check that the 'init' and 'cleanup'
50 * (and variations thereof) stay balanced.
51 */
52
53 #define log_memop(vb, op) \
54 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
55 (vb)->vb2_queue, (vb)->index, #op, \
56 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
57
58 #define call_memop(vb, op, args...) \
59 ({ \
60 struct vb2_queue *_q = (vb)->vb2_queue; \
61 int err; \
62 \
63 log_memop(vb, op); \
64 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
65 if (!err) \
66 (vb)->cnt_mem_ ## op++; \
67 err; \
68 })
69
70 #define call_ptr_memop(vb, op, args...) \
71 ({ \
72 struct vb2_queue *_q = (vb)->vb2_queue; \
73 void *ptr; \
74 \
75 log_memop(vb, op); \
76 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
77 if (!IS_ERR_OR_NULL(ptr)) \
78 (vb)->cnt_mem_ ## op++; \
79 ptr; \
80 })
81
82 #define call_void_memop(vb, op, args...) \
83 ({ \
84 struct vb2_queue *_q = (vb)->vb2_queue; \
85 \
86 log_memop(vb, op); \
87 if (_q->mem_ops->op) \
88 _q->mem_ops->op(args); \
89 (vb)->cnt_mem_ ## op++; \
90 })
91
92 #define log_qop(q, op) \
93 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
94 (q)->ops->op ? "" : " (nop)")
95
96 #define call_qop(q, op, args...) \
97 ({ \
98 int err; \
99 \
100 log_qop(q, op); \
101 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
102 if (!err) \
103 (q)->cnt_ ## op++; \
104 err; \
105 })
106
107 #define call_void_qop(q, op, args...) \
108 ({ \
109 log_qop(q, op); \
110 if ((q)->ops->op) \
111 (q)->ops->op(args); \
112 (q)->cnt_ ## op++; \
113 })
114
115 #define log_vb_qop(vb, op, args...) \
116 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
117 (vb)->vb2_queue, (vb)->index, #op, \
118 (vb)->vb2_queue->ops->op ? "" : " (nop)")
119
120 #define call_vb_qop(vb, op, args...) \
121 ({ \
122 int err; \
123 \
124 log_vb_qop(vb, op); \
125 err = (vb)->vb2_queue->ops->op ? \
126 (vb)->vb2_queue->ops->op(args) : 0; \
127 if (!err) \
128 (vb)->cnt_ ## op++; \
129 err; \
130 })
131
132 #define call_void_vb_qop(vb, op, args...) \
133 ({ \
134 log_vb_qop(vb, op); \
135 if ((vb)->vb2_queue->ops->op) \
136 (vb)->vb2_queue->ops->op(args); \
137 (vb)->cnt_ ## op++; \
138 })
139
140 #else
141
142 #define call_memop(vb, op, args...) \
143 ((vb)->vb2_queue->mem_ops->op ? \
144 (vb)->vb2_queue->mem_ops->op(args) : 0)
145
146 #define call_ptr_memop(vb, op, args...) \
147 ((vb)->vb2_queue->mem_ops->op ? \
148 (vb)->vb2_queue->mem_ops->op(args) : NULL)
149
150 #define call_void_memop(vb, op, args...) \
151 do { \
152 if ((vb)->vb2_queue->mem_ops->op) \
153 (vb)->vb2_queue->mem_ops->op(args); \
154 } while (0)
155
156 #define call_qop(q, op, args...) \
157 ((q)->ops->op ? (q)->ops->op(args) : 0)
158
159 #define call_void_qop(q, op, args...) \
160 do { \
161 if ((q)->ops->op) \
162 (q)->ops->op(args); \
163 } while (0)
164
165 #define call_vb_qop(vb, op, args...) \
166 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
167
168 #define call_void_vb_qop(vb, op, args...) \
169 do { \
170 if ((vb)->vb2_queue->ops->op) \
171 (vb)->vb2_queue->ops->op(args); \
172 } while (0)
173
174 #endif
175
176 #define call_bufop(q, op, args...) \
177 ({ \
178 int ret = 0; \
179 if (q && q->buf_ops && q->buf_ops->op) \
180 ret = q->buf_ops->op(args); \
181 ret; \
182 })
183
184 #define call_void_bufop(q, op, args...) \
185 ({ \
186 if (q && q->buf_ops && q->buf_ops->op) \
187 q->buf_ops->op(args); \
188 })
189
190 static void __vb2_queue_cancel(struct vb2_queue *q);
191 static void __enqueue_in_driver(struct vb2_buffer *vb);
192
193 /*
194 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
195 */
__vb2_buf_mem_alloc(struct vb2_buffer * vb)196 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
197 {
198 struct vb2_queue *q = vb->vb2_queue;
199 void *mem_priv;
200 int plane;
201 int ret = -ENOMEM;
202
203 /*
204 * Allocate memory for all planes in this buffer
205 * NOTE: mmapped areas should be page aligned
206 */
207 for (plane = 0; plane < vb->num_planes; ++plane) {
208 unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
209
210 mem_priv = call_ptr_memop(vb, alloc,
211 q->alloc_devs[plane] ? : q->dev,
212 q->dma_attrs, size, q->dma_dir, q->gfp_flags);
213 if (IS_ERR_OR_NULL(mem_priv)) {
214 if (mem_priv)
215 ret = PTR_ERR(mem_priv);
216 goto free;
217 }
218
219 /* Associate allocator private data with this plane */
220 vb->planes[plane].mem_priv = mem_priv;
221 }
222
223 return 0;
224 free:
225 /* Free already allocated memory if one of the allocations failed */
226 for (; plane > 0; --plane) {
227 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
228 vb->planes[plane - 1].mem_priv = NULL;
229 }
230
231 return ret;
232 }
233
234 /*
235 * __vb2_buf_mem_free() - free memory of the given buffer
236 */
__vb2_buf_mem_free(struct vb2_buffer * vb)237 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
238 {
239 unsigned int plane;
240
241 for (plane = 0; plane < vb->num_planes; ++plane) {
242 call_void_memop(vb, put, vb->planes[plane].mem_priv);
243 vb->planes[plane].mem_priv = NULL;
244 dprintk(3, "freed plane %d of buffer %d\n", plane, vb->index);
245 }
246 }
247
248 /*
249 * __vb2_buf_userptr_put() - release userspace memory associated with
250 * a USERPTR buffer
251 */
__vb2_buf_userptr_put(struct vb2_buffer * vb)252 static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
253 {
254 unsigned int plane;
255
256 for (plane = 0; plane < vb->num_planes; ++plane) {
257 if (vb->planes[plane].mem_priv)
258 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
259 vb->planes[plane].mem_priv = NULL;
260 }
261 }
262
263 /*
264 * __vb2_plane_dmabuf_put() - release memory associated with
265 * a DMABUF shared plane
266 */
__vb2_plane_dmabuf_put(struct vb2_buffer * vb,struct vb2_plane * p)267 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
268 {
269 if (!p->mem_priv)
270 return;
271
272 if (p->dbuf_mapped)
273 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
274
275 call_void_memop(vb, detach_dmabuf, p->mem_priv);
276 dma_buf_put(p->dbuf);
277 p->mem_priv = NULL;
278 p->dbuf = NULL;
279 p->dbuf_mapped = 0;
280 }
281
282 /*
283 * __vb2_buf_dmabuf_put() - release memory associated with
284 * a DMABUF shared buffer
285 */
__vb2_buf_dmabuf_put(struct vb2_buffer * vb)286 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
287 {
288 unsigned int plane;
289
290 for (plane = 0; plane < vb->num_planes; ++plane)
291 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
292 }
293
294 /*
295 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
296 * the buffer.
297 */
__setup_offsets(struct vb2_buffer * vb)298 static void __setup_offsets(struct vb2_buffer *vb)
299 {
300 struct vb2_queue *q = vb->vb2_queue;
301 unsigned int plane;
302 unsigned long off = 0;
303
304 if (vb->index) {
305 struct vb2_buffer *prev = q->bufs[vb->index - 1];
306 struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
307
308 off = PAGE_ALIGN(p->m.offset + p->length);
309 }
310
311 for (plane = 0; plane < vb->num_planes; ++plane) {
312 vb->planes[plane].m.offset = off;
313
314 dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
315 vb->index, plane, off);
316
317 off += vb->planes[plane].length;
318 off = PAGE_ALIGN(off);
319 }
320 }
321
322 /*
323 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
324 * video buffer memory for all buffers/planes on the queue and initializes the
325 * queue
326 *
327 * Returns the number of buffers successfully allocated.
328 */
__vb2_queue_alloc(struct vb2_queue * q,enum vb2_memory memory,unsigned int num_buffers,unsigned int num_planes,const unsigned plane_sizes[VB2_MAX_PLANES])329 static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
330 unsigned int num_buffers, unsigned int num_planes,
331 const unsigned plane_sizes[VB2_MAX_PLANES])
332 {
333 unsigned int buffer, plane;
334 struct vb2_buffer *vb;
335 int ret;
336
337 /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
338 num_buffers = min_t(unsigned int, num_buffers,
339 VB2_MAX_FRAME - q->num_buffers);
340
341 for (buffer = 0; buffer < num_buffers; ++buffer) {
342 /* Allocate videobuf buffer structures */
343 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
344 if (!vb) {
345 dprintk(1, "memory alloc for buffer struct failed\n");
346 break;
347 }
348
349 vb->state = VB2_BUF_STATE_DEQUEUED;
350 vb->vb2_queue = q;
351 vb->num_planes = num_planes;
352 vb->index = q->num_buffers + buffer;
353 vb->type = q->type;
354 vb->memory = memory;
355 for (plane = 0; plane < num_planes; ++plane) {
356 vb->planes[plane].length = plane_sizes[plane];
357 vb->planes[plane].min_length = plane_sizes[plane];
358 }
359 q->bufs[vb->index] = vb;
360
361 /* Allocate video buffer memory for the MMAP type */
362 if (memory == VB2_MEMORY_MMAP) {
363 ret = __vb2_buf_mem_alloc(vb);
364 if (ret) {
365 dprintk(1, "failed allocating memory for buffer %d\n",
366 buffer);
367 q->bufs[vb->index] = NULL;
368 kfree(vb);
369 break;
370 }
371 __setup_offsets(vb);
372 /*
373 * Call the driver-provided buffer initialization
374 * callback, if given. An error in initialization
375 * results in queue setup failure.
376 */
377 ret = call_vb_qop(vb, buf_init, vb);
378 if (ret) {
379 dprintk(1, "buffer %d %p initialization failed\n",
380 buffer, vb);
381 __vb2_buf_mem_free(vb);
382 q->bufs[vb->index] = NULL;
383 kfree(vb);
384 break;
385 }
386 }
387 }
388
389 dprintk(1, "allocated %d buffers, %d plane(s) each\n",
390 buffer, num_planes);
391
392 return buffer;
393 }
394
395 /*
396 * __vb2_free_mem() - release all video buffer memory for a given queue
397 */
__vb2_free_mem(struct vb2_queue * q,unsigned int buffers)398 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
399 {
400 unsigned int buffer;
401 struct vb2_buffer *vb;
402
403 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
404 ++buffer) {
405 vb = q->bufs[buffer];
406 if (!vb)
407 continue;
408
409 /* Free MMAP buffers or release USERPTR buffers */
410 if (q->memory == VB2_MEMORY_MMAP)
411 __vb2_buf_mem_free(vb);
412 else if (q->memory == VB2_MEMORY_DMABUF)
413 __vb2_buf_dmabuf_put(vb);
414 else
415 __vb2_buf_userptr_put(vb);
416 }
417 }
418
419 /*
420 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
421 * related information, if no buffers are left return the queue to an
422 * uninitialized state. Might be called even if the queue has already been freed.
423 */
__vb2_queue_free(struct vb2_queue * q,unsigned int buffers)424 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
425 {
426 unsigned int buffer;
427
428 /*
429 * Sanity check: when preparing a buffer the queue lock is released for
430 * a short while (see __buf_prepare for the details), which would allow
431 * a race with a reqbufs which can call this function. Removing the
432 * buffers from underneath __buf_prepare is obviously a bad idea, so we
433 * check if any of the buffers is in the state PREPARING, and if so we
434 * just return -EAGAIN.
435 */
436 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
437 ++buffer) {
438 if (q->bufs[buffer] == NULL)
439 continue;
440 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
441 dprintk(1, "preparing buffers, cannot free\n");
442 return -EAGAIN;
443 }
444 }
445
446 /* Call driver-provided cleanup function for each buffer, if provided */
447 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
448 ++buffer) {
449 struct vb2_buffer *vb = q->bufs[buffer];
450
451 if (vb && vb->planes[0].mem_priv)
452 call_void_vb_qop(vb, buf_cleanup, vb);
453 }
454
455 /* Release video buffer memory */
456 __vb2_free_mem(q, buffers);
457
458 #ifdef CONFIG_VIDEO_ADV_DEBUG
459 /*
460 * Check that all the calls were balances during the life-time of this
461 * queue. If not (or if the debug level is 1 or up), then dump the
462 * counters to the kernel log.
463 */
464 if (q->num_buffers) {
465 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
466 q->cnt_wait_prepare != q->cnt_wait_finish;
467
468 if (unbalanced || debug) {
469 pr_info("counters for queue %p:%s\n", q,
470 unbalanced ? " UNBALANCED!" : "");
471 pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
472 q->cnt_queue_setup, q->cnt_start_streaming,
473 q->cnt_stop_streaming);
474 pr_info(" wait_prepare: %u wait_finish: %u\n",
475 q->cnt_wait_prepare, q->cnt_wait_finish);
476 }
477 q->cnt_queue_setup = 0;
478 q->cnt_wait_prepare = 0;
479 q->cnt_wait_finish = 0;
480 q->cnt_start_streaming = 0;
481 q->cnt_stop_streaming = 0;
482 }
483 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
484 struct vb2_buffer *vb = q->bufs[buffer];
485 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
486 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
487 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
488 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
489 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
490 vb->cnt_buf_queue != vb->cnt_buf_done ||
491 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
492 vb->cnt_buf_init != vb->cnt_buf_cleanup;
493
494 if (unbalanced || debug) {
495 pr_info(" counters for queue %p, buffer %d:%s\n",
496 q, buffer, unbalanced ? " UNBALANCED!" : "");
497 pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
498 vb->cnt_buf_init, vb->cnt_buf_cleanup,
499 vb->cnt_buf_prepare, vb->cnt_buf_finish);
500 pr_info(" buf_queue: %u buf_done: %u\n",
501 vb->cnt_buf_queue, vb->cnt_buf_done);
502 pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
503 vb->cnt_mem_alloc, vb->cnt_mem_put,
504 vb->cnt_mem_prepare, vb->cnt_mem_finish,
505 vb->cnt_mem_mmap);
506 pr_info(" get_userptr: %u put_userptr: %u\n",
507 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
508 pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
509 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
510 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
511 pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
512 vb->cnt_mem_get_dmabuf,
513 vb->cnt_mem_num_users,
514 vb->cnt_mem_vaddr,
515 vb->cnt_mem_cookie);
516 }
517 }
518 #endif
519
520 /* Free videobuf buffers */
521 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
522 ++buffer) {
523 kfree(q->bufs[buffer]);
524 q->bufs[buffer] = NULL;
525 }
526
527 q->num_buffers -= buffers;
528 if (!q->num_buffers) {
529 q->memory = VB2_MEMORY_UNKNOWN;
530 INIT_LIST_HEAD(&q->queued_list);
531 }
532 return 0;
533 }
534
vb2_buffer_in_use(struct vb2_queue * q,struct vb2_buffer * vb)535 bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
536 {
537 unsigned int plane;
538 for (plane = 0; plane < vb->num_planes; ++plane) {
539 void *mem_priv = vb->planes[plane].mem_priv;
540 /*
541 * If num_users() has not been provided, call_memop
542 * will return 0, apparently nobody cares about this
543 * case anyway. If num_users() returns more than 1,
544 * we are not the only user of the plane's memory.
545 */
546 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
547 return true;
548 }
549 return false;
550 }
551 EXPORT_SYMBOL(vb2_buffer_in_use);
552
553 /*
554 * __buffers_in_use() - return true if any buffers on the queue are in use and
555 * the queue cannot be freed (by the means of REQBUFS(0)) call
556 */
__buffers_in_use(struct vb2_queue * q)557 static bool __buffers_in_use(struct vb2_queue *q)
558 {
559 unsigned int buffer;
560 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
561 if (vb2_buffer_in_use(q, q->bufs[buffer]))
562 return true;
563 }
564 return false;
565 }
566
vb2_core_querybuf(struct vb2_queue * q,unsigned int index,void * pb)567 void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
568 {
569 call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
570 }
571 EXPORT_SYMBOL_GPL(vb2_core_querybuf);
572
573 /*
574 * __verify_userptr_ops() - verify that all memory operations required for
575 * USERPTR queue type have been provided
576 */
__verify_userptr_ops(struct vb2_queue * q)577 static int __verify_userptr_ops(struct vb2_queue *q)
578 {
579 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
580 !q->mem_ops->put_userptr)
581 return -EINVAL;
582
583 return 0;
584 }
585
586 /*
587 * __verify_mmap_ops() - verify that all memory operations required for
588 * MMAP queue type have been provided
589 */
__verify_mmap_ops(struct vb2_queue * q)590 static int __verify_mmap_ops(struct vb2_queue *q)
591 {
592 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
593 !q->mem_ops->put || !q->mem_ops->mmap)
594 return -EINVAL;
595
596 return 0;
597 }
598
599 /*
600 * __verify_dmabuf_ops() - verify that all memory operations required for
601 * DMABUF queue type have been provided
602 */
__verify_dmabuf_ops(struct vb2_queue * q)603 static int __verify_dmabuf_ops(struct vb2_queue *q)
604 {
605 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
606 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
607 !q->mem_ops->unmap_dmabuf)
608 return -EINVAL;
609
610 return 0;
611 }
612
vb2_verify_memory_type(struct vb2_queue * q,enum vb2_memory memory,unsigned int type)613 int vb2_verify_memory_type(struct vb2_queue *q,
614 enum vb2_memory memory, unsigned int type)
615 {
616 if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
617 memory != VB2_MEMORY_DMABUF) {
618 dprintk(1, "unsupported memory type\n");
619 return -EINVAL;
620 }
621
622 if (type != q->type) {
623 dprintk(1, "requested type is incorrect\n");
624 return -EINVAL;
625 }
626
627 /*
628 * Make sure all the required memory ops for given memory type
629 * are available.
630 */
631 if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
632 dprintk(1, "MMAP for current setup unsupported\n");
633 return -EINVAL;
634 }
635
636 if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
637 dprintk(1, "USERPTR for current setup unsupported\n");
638 return -EINVAL;
639 }
640
641 if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
642 dprintk(1, "DMABUF for current setup unsupported\n");
643 return -EINVAL;
644 }
645
646 /*
647 * Place the busy tests at the end: -EBUSY can be ignored when
648 * create_bufs is called with count == 0, but count == 0 should still
649 * do the memory and type validation.
650 */
651 if (vb2_fileio_is_active(q)) {
652 dprintk(1, "file io in progress\n");
653 return -EBUSY;
654 }
655 return 0;
656 }
657 EXPORT_SYMBOL(vb2_verify_memory_type);
658
vb2_core_reqbufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int * count)659 int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
660 unsigned int *count)
661 {
662 unsigned int num_buffers, allocated_buffers, num_planes = 0;
663 unsigned plane_sizes[VB2_MAX_PLANES] = { };
664 int ret;
665
666 if (q->streaming) {
667 dprintk(1, "streaming active\n");
668 return -EBUSY;
669 }
670
671 if (*count == 0 || q->num_buffers != 0 ||
672 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
673 /*
674 * We already have buffers allocated, so first check if they
675 * are not in use and can be freed.
676 */
677 mutex_lock(&q->mmap_lock);
678 if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
679 mutex_unlock(&q->mmap_lock);
680 dprintk(1, "memory in use, cannot free\n");
681 return -EBUSY;
682 }
683
684 /*
685 * Call queue_cancel to clean up any buffers in the PREPARED or
686 * QUEUED state which is possible if buffers were prepared or
687 * queued without ever calling STREAMON.
688 */
689 __vb2_queue_cancel(q);
690 ret = __vb2_queue_free(q, q->num_buffers);
691 mutex_unlock(&q->mmap_lock);
692 if (ret)
693 return ret;
694
695 /*
696 * In case of REQBUFS(0) return immediately without calling
697 * driver's queue_setup() callback and allocating resources.
698 */
699 if (*count == 0)
700 return 0;
701 }
702
703 /*
704 * Make sure the requested values and current defaults are sane.
705 */
706 WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
707 num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
708 num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
709 memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
710 q->memory = memory;
711
712 /*
713 * Ask the driver how many buffers and planes per buffer it requires.
714 * Driver also sets the size and allocator context for each plane.
715 */
716 ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
717 plane_sizes, q->alloc_devs);
718 if (ret)
719 return ret;
720
721 /* Finally, allocate buffers and video memory */
722 allocated_buffers =
723 __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
724 if (allocated_buffers == 0) {
725 dprintk(1, "memory allocation failed\n");
726 return -ENOMEM;
727 }
728
729 /*
730 * There is no point in continuing if we can't allocate the minimum
731 * number of buffers needed by this vb2_queue.
732 */
733 if (allocated_buffers < q->min_buffers_needed)
734 ret = -ENOMEM;
735
736 /*
737 * Check if driver can handle the allocated number of buffers.
738 */
739 if (!ret && allocated_buffers < num_buffers) {
740 num_buffers = allocated_buffers;
741 /*
742 * num_planes is set by the previous queue_setup(), but since it
743 * signals to queue_setup() whether it is called from create_bufs()
744 * vs reqbufs() we zero it here to signal that queue_setup() is
745 * called for the reqbufs() case.
746 */
747 num_planes = 0;
748
749 ret = call_qop(q, queue_setup, q, &num_buffers,
750 &num_planes, plane_sizes, q->alloc_devs);
751
752 if (!ret && allocated_buffers < num_buffers)
753 ret = -ENOMEM;
754
755 /*
756 * Either the driver has accepted a smaller number of buffers,
757 * or .queue_setup() returned an error
758 */
759 }
760
761 mutex_lock(&q->mmap_lock);
762 q->num_buffers = allocated_buffers;
763
764 if (ret < 0) {
765 /*
766 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
767 * from q->num_buffers.
768 */
769 __vb2_queue_free(q, allocated_buffers);
770 mutex_unlock(&q->mmap_lock);
771 return ret;
772 }
773 mutex_unlock(&q->mmap_lock);
774
775 /*
776 * Return the number of successfully allocated buffers
777 * to the userspace.
778 */
779 *count = allocated_buffers;
780 q->waiting_for_buffers = !q->is_output;
781
782 return 0;
783 }
784 EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
785
vb2_core_create_bufs(struct vb2_queue * q,enum vb2_memory memory,unsigned int * count,unsigned requested_planes,const unsigned requested_sizes[])786 int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
787 unsigned int *count, unsigned requested_planes,
788 const unsigned requested_sizes[])
789 {
790 unsigned int num_planes = 0, num_buffers, allocated_buffers;
791 unsigned plane_sizes[VB2_MAX_PLANES] = { };
792 int ret;
793
794 if (q->num_buffers == VB2_MAX_FRAME) {
795 dprintk(1, "maximum number of buffers already allocated\n");
796 return -ENOBUFS;
797 }
798
799 if (!q->num_buffers) {
800 memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
801 q->memory = memory;
802 q->waiting_for_buffers = !q->is_output;
803 }
804
805 num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
806
807 if (requested_planes && requested_sizes) {
808 num_planes = requested_planes;
809 memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
810 }
811
812 /*
813 * Ask the driver, whether the requested number of buffers, planes per
814 * buffer and their sizes are acceptable
815 */
816 ret = call_qop(q, queue_setup, q, &num_buffers,
817 &num_planes, plane_sizes, q->alloc_devs);
818 if (ret)
819 return ret;
820
821 /* Finally, allocate buffers and video memory */
822 allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
823 num_planes, plane_sizes);
824 if (allocated_buffers == 0) {
825 dprintk(1, "memory allocation failed\n");
826 return -ENOMEM;
827 }
828
829 /*
830 * Check if driver can handle the so far allocated number of buffers.
831 */
832 if (allocated_buffers < num_buffers) {
833 num_buffers = allocated_buffers;
834
835 /*
836 * q->num_buffers contains the total number of buffers, that the
837 * queue driver has set up
838 */
839 ret = call_qop(q, queue_setup, q, &num_buffers,
840 &num_planes, plane_sizes, q->alloc_devs);
841
842 if (!ret && allocated_buffers < num_buffers)
843 ret = -ENOMEM;
844
845 /*
846 * Either the driver has accepted a smaller number of buffers,
847 * or .queue_setup() returned an error
848 */
849 }
850
851 mutex_lock(&q->mmap_lock);
852 q->num_buffers += allocated_buffers;
853
854 if (ret < 0) {
855 /*
856 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
857 * from q->num_buffers.
858 */
859 __vb2_queue_free(q, allocated_buffers);
860 mutex_unlock(&q->mmap_lock);
861 return -ENOMEM;
862 }
863 mutex_unlock(&q->mmap_lock);
864
865 /*
866 * Return the number of successfully allocated buffers
867 * to the userspace.
868 */
869 *count = allocated_buffers;
870
871 return 0;
872 }
873 EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
874
vb2_plane_vaddr(struct vb2_buffer * vb,unsigned int plane_no)875 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
876 {
877 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
878 return NULL;
879
880 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
881
882 }
883 EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
884
vb2_plane_cookie(struct vb2_buffer * vb,unsigned int plane_no)885 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
886 {
887 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
888 return NULL;
889
890 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
891 }
892 EXPORT_SYMBOL_GPL(vb2_plane_cookie);
893
vb2_buffer_done(struct vb2_buffer * vb,enum vb2_buffer_state state)894 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
895 {
896 struct vb2_queue *q = vb->vb2_queue;
897 unsigned long flags;
898 unsigned int plane;
899
900 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
901 return;
902
903 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
904 state != VB2_BUF_STATE_ERROR &&
905 state != VB2_BUF_STATE_QUEUED &&
906 state != VB2_BUF_STATE_REQUEUEING))
907 state = VB2_BUF_STATE_ERROR;
908
909 #ifdef CONFIG_VIDEO_ADV_DEBUG
910 /*
911 * Although this is not a callback, it still does have to balance
912 * with the buf_queue op. So update this counter manually.
913 */
914 vb->cnt_buf_done++;
915 #endif
916 dprintk(4, "done processing on buffer %d, state: %d\n",
917 vb->index, state);
918
919 if (state != VB2_BUF_STATE_QUEUED &&
920 state != VB2_BUF_STATE_REQUEUEING) {
921 /* sync buffers */
922 for (plane = 0; plane < vb->num_planes; ++plane)
923 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
924 }
925
926 spin_lock_irqsave(&q->done_lock, flags);
927 if (state == VB2_BUF_STATE_QUEUED ||
928 state == VB2_BUF_STATE_REQUEUEING) {
929 vb->state = VB2_BUF_STATE_QUEUED;
930 } else {
931 /* Add the buffer to the done buffers list */
932 list_add_tail(&vb->done_entry, &q->done_list);
933 vb->state = state;
934 }
935 atomic_dec(&q->owned_by_drv_count);
936 spin_unlock_irqrestore(&q->done_lock, flags);
937
938 trace_vb2_buf_done(q, vb);
939
940 switch (state) {
941 case VB2_BUF_STATE_QUEUED:
942 return;
943 case VB2_BUF_STATE_REQUEUEING:
944 if (q->start_streaming_called)
945 __enqueue_in_driver(vb);
946 return;
947 default:
948 /* Inform any processes that may be waiting for buffers */
949 wake_up(&q->done_wq);
950 break;
951 }
952 }
953 EXPORT_SYMBOL_GPL(vb2_buffer_done);
954
vb2_discard_done(struct vb2_queue * q)955 void vb2_discard_done(struct vb2_queue *q)
956 {
957 struct vb2_buffer *vb;
958 unsigned long flags;
959
960 spin_lock_irqsave(&q->done_lock, flags);
961 list_for_each_entry(vb, &q->done_list, done_entry)
962 vb->state = VB2_BUF_STATE_ERROR;
963 spin_unlock_irqrestore(&q->done_lock, flags);
964 }
965 EXPORT_SYMBOL_GPL(vb2_discard_done);
966
967 /*
968 * __prepare_mmap() - prepare an MMAP buffer
969 */
__prepare_mmap(struct vb2_buffer * vb,const void * pb)970 static int __prepare_mmap(struct vb2_buffer *vb, const void *pb)
971 {
972 int ret = 0;
973
974 if (pb)
975 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
976 vb, pb, vb->planes);
977 return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
978 }
979
980 /*
981 * __prepare_userptr() - prepare a USERPTR buffer
982 */
__prepare_userptr(struct vb2_buffer * vb,const void * pb)983 static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
984 {
985 struct vb2_plane planes[VB2_MAX_PLANES];
986 struct vb2_queue *q = vb->vb2_queue;
987 void *mem_priv;
988 unsigned int plane;
989 int ret = 0;
990 bool reacquired = vb->planes[0].mem_priv == NULL;
991
992 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
993 /* Copy relevant information provided by the userspace */
994 if (pb) {
995 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
996 vb, pb, planes);
997 if (ret)
998 return ret;
999 }
1000
1001 for (plane = 0; plane < vb->num_planes; ++plane) {
1002 /* Skip the plane if already verified */
1003 if (vb->planes[plane].m.userptr &&
1004 vb->planes[plane].m.userptr == planes[plane].m.userptr
1005 && vb->planes[plane].length == planes[plane].length)
1006 continue;
1007
1008 dprintk(3, "userspace address for plane %d changed, reacquiring memory\n",
1009 plane);
1010
1011 /* Check if the provided plane buffer is large enough */
1012 if (planes[plane].length < vb->planes[plane].min_length) {
1013 dprintk(1, "provided buffer size %u is less than setup size %u for plane %d\n",
1014 planes[plane].length,
1015 vb->planes[plane].min_length,
1016 plane);
1017 ret = -EINVAL;
1018 goto err;
1019 }
1020
1021 /* Release previously acquired memory if present */
1022 if (vb->planes[plane].mem_priv) {
1023 if (!reacquired) {
1024 reacquired = true;
1025 call_void_vb_qop(vb, buf_cleanup, vb);
1026 }
1027 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1028 }
1029
1030 vb->planes[plane].mem_priv = NULL;
1031 vb->planes[plane].bytesused = 0;
1032 vb->planes[plane].length = 0;
1033 vb->planes[plane].m.userptr = 0;
1034 vb->planes[plane].data_offset = 0;
1035
1036 /* Acquire each plane's memory */
1037 mem_priv = call_ptr_memop(vb, get_userptr,
1038 q->alloc_devs[plane] ? : q->dev,
1039 planes[plane].m.userptr,
1040 planes[plane].length, q->dma_dir);
1041 if (IS_ERR(mem_priv)) {
1042 dprintk(1, "failed acquiring userspace memory for plane %d\n",
1043 plane);
1044 ret = PTR_ERR(mem_priv);
1045 goto err;
1046 }
1047 vb->planes[plane].mem_priv = mem_priv;
1048 }
1049
1050 /*
1051 * Now that everything is in order, copy relevant information
1052 * provided by userspace.
1053 */
1054 for (plane = 0; plane < vb->num_planes; ++plane) {
1055 vb->planes[plane].bytesused = planes[plane].bytesused;
1056 vb->planes[plane].length = planes[plane].length;
1057 vb->planes[plane].m.userptr = planes[plane].m.userptr;
1058 vb->planes[plane].data_offset = planes[plane].data_offset;
1059 }
1060
1061 if (reacquired) {
1062 /*
1063 * One or more planes changed, so we must call buf_init to do
1064 * the driver-specific initialization on the newly acquired
1065 * buffer, if provided.
1066 */
1067 ret = call_vb_qop(vb, buf_init, vb);
1068 if (ret) {
1069 dprintk(1, "buffer initialization failed\n");
1070 goto err;
1071 }
1072 }
1073
1074 ret = call_vb_qop(vb, buf_prepare, vb);
1075 if (ret) {
1076 dprintk(1, "buffer preparation failed\n");
1077 call_void_vb_qop(vb, buf_cleanup, vb);
1078 goto err;
1079 }
1080
1081 return 0;
1082 err:
1083 /* In case of errors, release planes that were already acquired */
1084 for (plane = 0; plane < vb->num_planes; ++plane) {
1085 if (vb->planes[plane].mem_priv)
1086 call_void_memop(vb, put_userptr,
1087 vb->planes[plane].mem_priv);
1088 vb->planes[plane].mem_priv = NULL;
1089 vb->planes[plane].m.userptr = 0;
1090 vb->planes[plane].length = 0;
1091 }
1092
1093 return ret;
1094 }
1095
1096 /*
1097 * __prepare_dmabuf() - prepare a DMABUF buffer
1098 */
__prepare_dmabuf(struct vb2_buffer * vb,const void * pb)1099 static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
1100 {
1101 struct vb2_plane planes[VB2_MAX_PLANES];
1102 struct vb2_queue *q = vb->vb2_queue;
1103 void *mem_priv;
1104 unsigned int plane;
1105 int ret = 0;
1106 bool reacquired = vb->planes[0].mem_priv == NULL;
1107
1108 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1109 /* Copy relevant information provided by the userspace */
1110 if (pb) {
1111 ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
1112 vb, pb, planes);
1113 if (ret)
1114 return ret;
1115 }
1116
1117 for (plane = 0; plane < vb->num_planes; ++plane) {
1118 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1119
1120 if (IS_ERR_OR_NULL(dbuf)) {
1121 dprintk(1, "invalid dmabuf fd for plane %d\n",
1122 plane);
1123 ret = -EINVAL;
1124 goto err;
1125 }
1126
1127 /* use DMABUF size if length is not provided */
1128 if (planes[plane].length == 0)
1129 planes[plane].length = dbuf->size;
1130
1131 if (planes[plane].length < vb->planes[plane].min_length) {
1132 dprintk(1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
1133 planes[plane].length, plane,
1134 vb->planes[plane].min_length);
1135 dma_buf_put(dbuf);
1136 ret = -EINVAL;
1137 goto err;
1138 }
1139
1140 /* Skip the plane if already verified */
1141 if (dbuf == vb->planes[plane].dbuf &&
1142 vb->planes[plane].length == planes[plane].length) {
1143 dma_buf_put(dbuf);
1144 continue;
1145 }
1146
1147 dprintk(3, "buffer for plane %d changed\n", plane);
1148
1149 if (!reacquired) {
1150 reacquired = true;
1151 call_void_vb_qop(vb, buf_cleanup, vb);
1152 }
1153
1154 /* Release previously acquired memory if present */
1155 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1156 vb->planes[plane].bytesused = 0;
1157 vb->planes[plane].length = 0;
1158 vb->planes[plane].m.fd = 0;
1159 vb->planes[plane].data_offset = 0;
1160
1161 /* Acquire each plane's memory */
1162 mem_priv = call_ptr_memop(vb, attach_dmabuf,
1163 q->alloc_devs[plane] ? : q->dev,
1164 dbuf, planes[plane].length, q->dma_dir);
1165 if (IS_ERR(mem_priv)) {
1166 dprintk(1, "failed to attach dmabuf\n");
1167 ret = PTR_ERR(mem_priv);
1168 dma_buf_put(dbuf);
1169 goto err;
1170 }
1171
1172 vb->planes[plane].dbuf = dbuf;
1173 vb->planes[plane].mem_priv = mem_priv;
1174 }
1175
1176 /*
1177 * This pins the buffer(s) with dma_buf_map_attachment()). It's done
1178 * here instead just before the DMA, while queueing the buffer(s) so
1179 * userspace knows sooner rather than later if the dma-buf map fails.
1180 */
1181 for (plane = 0; plane < vb->num_planes; ++plane) {
1182 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1183 if (ret) {
1184 dprintk(1, "failed to map dmabuf for plane %d\n",
1185 plane);
1186 goto err;
1187 }
1188 vb->planes[plane].dbuf_mapped = 1;
1189 }
1190
1191 /*
1192 * Now that everything is in order, copy relevant information
1193 * provided by userspace.
1194 */
1195 for (plane = 0; plane < vb->num_planes; ++plane) {
1196 vb->planes[plane].bytesused = planes[plane].bytesused;
1197 vb->planes[plane].length = planes[plane].length;
1198 vb->planes[plane].m.fd = planes[plane].m.fd;
1199 vb->planes[plane].data_offset = planes[plane].data_offset;
1200 }
1201
1202 if (reacquired) {
1203 /*
1204 * Call driver-specific initialization on the newly acquired buffer,
1205 * if provided.
1206 */
1207 ret = call_vb_qop(vb, buf_init, vb);
1208 if (ret) {
1209 dprintk(1, "buffer initialization failed\n");
1210 goto err;
1211 }
1212 }
1213
1214 ret = call_vb_qop(vb, buf_prepare, vb);
1215 if (ret) {
1216 dprintk(1, "buffer preparation failed\n");
1217 call_void_vb_qop(vb, buf_cleanup, vb);
1218 goto err;
1219 }
1220
1221 return 0;
1222 err:
1223 /* In case of errors, release planes that were already acquired */
1224 __vb2_buf_dmabuf_put(vb);
1225
1226 return ret;
1227 }
1228
1229 /*
1230 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1231 */
__enqueue_in_driver(struct vb2_buffer * vb)1232 static void __enqueue_in_driver(struct vb2_buffer *vb)
1233 {
1234 struct vb2_queue *q = vb->vb2_queue;
1235
1236 vb->state = VB2_BUF_STATE_ACTIVE;
1237 atomic_inc(&q->owned_by_drv_count);
1238
1239 trace_vb2_buf_queue(q, vb);
1240
1241 call_void_vb_qop(vb, buf_queue, vb);
1242 }
1243
__buf_prepare(struct vb2_buffer * vb,const void * pb)1244 static int __buf_prepare(struct vb2_buffer *vb, const void *pb)
1245 {
1246 struct vb2_queue *q = vb->vb2_queue;
1247 unsigned int plane;
1248 int ret;
1249
1250 if (q->error) {
1251 dprintk(1, "fatal error occurred on queue\n");
1252 return -EIO;
1253 }
1254
1255 vb->state = VB2_BUF_STATE_PREPARING;
1256
1257 switch (q->memory) {
1258 case VB2_MEMORY_MMAP:
1259 ret = __prepare_mmap(vb, pb);
1260 break;
1261 case VB2_MEMORY_USERPTR:
1262 ret = __prepare_userptr(vb, pb);
1263 break;
1264 case VB2_MEMORY_DMABUF:
1265 ret = __prepare_dmabuf(vb, pb);
1266 break;
1267 default:
1268 WARN(1, "Invalid queue type\n");
1269 ret = -EINVAL;
1270 }
1271
1272 if (ret) {
1273 dprintk(1, "buffer preparation failed: %d\n", ret);
1274 vb->state = VB2_BUF_STATE_DEQUEUED;
1275 return ret;
1276 }
1277
1278 /* sync buffers */
1279 for (plane = 0; plane < vb->num_planes; ++plane)
1280 call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
1281
1282 vb->state = VB2_BUF_STATE_PREPARED;
1283
1284 return 0;
1285 }
1286
vb2_core_prepare_buf(struct vb2_queue * q,unsigned int index,void * pb)1287 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
1288 {
1289 struct vb2_buffer *vb;
1290 int ret;
1291
1292 vb = q->bufs[index];
1293 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1294 dprintk(1, "invalid buffer state %d\n",
1295 vb->state);
1296 return -EINVAL;
1297 }
1298
1299 ret = __buf_prepare(vb, pb);
1300 if (ret)
1301 return ret;
1302
1303 /* Fill buffer information for the userspace */
1304 call_void_bufop(q, fill_user_buffer, vb, pb);
1305
1306 dprintk(2, "prepare of buffer %d succeeded\n", vb->index);
1307
1308 return ret;
1309 }
1310 EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
1311
1312 /*
1313 * vb2_start_streaming() - Attempt to start streaming.
1314 * @q: videobuf2 queue
1315 *
1316 * Attempt to start streaming. When this function is called there must be
1317 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1318 * number of buffers required for the DMA engine to function). If the
1319 * @start_streaming op fails it is supposed to return all the driver-owned
1320 * buffers back to vb2 in state QUEUED. Check if that happened and if
1321 * not warn and reclaim them forcefully.
1322 */
vb2_start_streaming(struct vb2_queue * q)1323 static int vb2_start_streaming(struct vb2_queue *q)
1324 {
1325 struct vb2_buffer *vb;
1326 int ret;
1327
1328 /*
1329 * If any buffers were queued before streamon,
1330 * we can now pass them to driver for processing.
1331 */
1332 list_for_each_entry(vb, &q->queued_list, queued_entry)
1333 __enqueue_in_driver(vb);
1334
1335 /* Tell the driver to start streaming */
1336 q->start_streaming_called = 1;
1337 ret = call_qop(q, start_streaming, q,
1338 atomic_read(&q->owned_by_drv_count));
1339 if (!ret)
1340 return 0;
1341
1342 q->start_streaming_called = 0;
1343
1344 dprintk(1, "driver refused to start streaming\n");
1345 /*
1346 * If you see this warning, then the driver isn't cleaning up properly
1347 * after a failed start_streaming(). See the start_streaming()
1348 * documentation in videobuf2-core.h for more information how buffers
1349 * should be returned to vb2 in start_streaming().
1350 */
1351 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1352 unsigned i;
1353
1354 /*
1355 * Forcefully reclaim buffers if the driver did not
1356 * correctly return them to vb2.
1357 */
1358 for (i = 0; i < q->num_buffers; ++i) {
1359 vb = q->bufs[i];
1360 if (vb->state == VB2_BUF_STATE_ACTIVE)
1361 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1362 }
1363 /* Must be zero now */
1364 WARN_ON(atomic_read(&q->owned_by_drv_count));
1365 }
1366 /*
1367 * If done_list is not empty, then start_streaming() didn't call
1368 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
1369 * STATE_DONE.
1370 */
1371 WARN_ON(!list_empty(&q->done_list));
1372 return ret;
1373 }
1374
vb2_core_qbuf(struct vb2_queue * q,unsigned int index,void * pb)1375 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
1376 {
1377 struct vb2_buffer *vb;
1378 int ret;
1379
1380 if (q->error) {
1381 dprintk(1, "fatal error occurred on queue\n");
1382 return -EIO;
1383 }
1384
1385 vb = q->bufs[index];
1386
1387 switch (vb->state) {
1388 case VB2_BUF_STATE_DEQUEUED:
1389 ret = __buf_prepare(vb, pb);
1390 if (ret)
1391 return ret;
1392 break;
1393 case VB2_BUF_STATE_PREPARED:
1394 break;
1395 case VB2_BUF_STATE_PREPARING:
1396 dprintk(1, "buffer still being prepared\n");
1397 return -EINVAL;
1398 default:
1399 dprintk(1, "invalid buffer state %d\n", vb->state);
1400 return -EINVAL;
1401 }
1402
1403 /*
1404 * Add to the queued buffers list, a buffer will stay on it until
1405 * dequeued in dqbuf.
1406 */
1407 list_add_tail(&vb->queued_entry, &q->queued_list);
1408 q->queued_count++;
1409 q->waiting_for_buffers = false;
1410 vb->state = VB2_BUF_STATE_QUEUED;
1411
1412 if (pb)
1413 call_void_bufop(q, copy_timestamp, vb, pb);
1414
1415 trace_vb2_qbuf(q, vb);
1416
1417 /*
1418 * If already streaming, give the buffer to driver for processing.
1419 * If not, the buffer will be given to driver on next streamon.
1420 */
1421 if (q->start_streaming_called)
1422 __enqueue_in_driver(vb);
1423
1424 /* Fill buffer information for the userspace */
1425 if (pb)
1426 call_void_bufop(q, fill_user_buffer, vb, pb);
1427
1428 /*
1429 * If streamon has been called, and we haven't yet called
1430 * start_streaming() since not enough buffers were queued, and
1431 * we now have reached the minimum number of queued buffers,
1432 * then we can finally call start_streaming().
1433 */
1434 if (q->streaming && !q->start_streaming_called &&
1435 q->queued_count >= q->min_buffers_needed) {
1436 ret = vb2_start_streaming(q);
1437 if (ret)
1438 return ret;
1439 }
1440
1441 dprintk(2, "qbuf of buffer %d succeeded\n", vb->index);
1442 return 0;
1443 }
1444 EXPORT_SYMBOL_GPL(vb2_core_qbuf);
1445
1446 /*
1447 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1448 * for dequeuing
1449 *
1450 * Will sleep if required for nonblocking == false.
1451 */
__vb2_wait_for_done_vb(struct vb2_queue * q,int nonblocking)1452 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1453 {
1454 /*
1455 * All operations on vb_done_list are performed under done_lock
1456 * spinlock protection. However, buffers may be removed from
1457 * it and returned to userspace only while holding both driver's
1458 * lock and the done_lock spinlock. Thus we can be sure that as
1459 * long as we hold the driver's lock, the list will remain not
1460 * empty if list_empty() check succeeds.
1461 */
1462
1463 for (;;) {
1464 int ret;
1465
1466 if (!q->streaming) {
1467 dprintk(1, "streaming off, will not wait for buffers\n");
1468 return -EINVAL;
1469 }
1470
1471 if (q->error) {
1472 dprintk(1, "Queue in error state, will not wait for buffers\n");
1473 return -EIO;
1474 }
1475
1476 if (q->last_buffer_dequeued) {
1477 dprintk(3, "last buffer dequeued already, will not wait for buffers\n");
1478 return -EPIPE;
1479 }
1480
1481 if (!list_empty(&q->done_list)) {
1482 /*
1483 * Found a buffer that we were waiting for.
1484 */
1485 break;
1486 }
1487
1488 if (nonblocking) {
1489 dprintk(3, "nonblocking and no buffers to dequeue, will not wait\n");
1490 return -EAGAIN;
1491 }
1492
1493 /*
1494 * We are streaming and blocking, wait for another buffer to
1495 * become ready or for streamoff. Driver's lock is released to
1496 * allow streamoff or qbuf to be called while waiting.
1497 */
1498 call_void_qop(q, wait_prepare, q);
1499
1500 /*
1501 * All locks have been released, it is safe to sleep now.
1502 */
1503 dprintk(3, "will sleep waiting for buffers\n");
1504 ret = wait_event_interruptible(q->done_wq,
1505 !list_empty(&q->done_list) || !q->streaming ||
1506 q->error);
1507
1508 /*
1509 * We need to reevaluate both conditions again after reacquiring
1510 * the locks or return an error if one occurred.
1511 */
1512 call_void_qop(q, wait_finish, q);
1513 if (ret) {
1514 dprintk(1, "sleep was interrupted\n");
1515 return ret;
1516 }
1517 }
1518 return 0;
1519 }
1520
1521 /*
1522 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1523 *
1524 * Will sleep if required for nonblocking == false.
1525 */
__vb2_get_done_vb(struct vb2_queue * q,struct vb2_buffer ** vb,void * pb,int nonblocking)1526 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1527 void *pb, int nonblocking)
1528 {
1529 unsigned long flags;
1530 int ret = 0;
1531
1532 /*
1533 * Wait for at least one buffer to become available on the done_list.
1534 */
1535 ret = __vb2_wait_for_done_vb(q, nonblocking);
1536 if (ret)
1537 return ret;
1538
1539 /*
1540 * Driver's lock has been held since we last verified that done_list
1541 * is not empty, so no need for another list_empty(done_list) check.
1542 */
1543 spin_lock_irqsave(&q->done_lock, flags);
1544 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1545 /*
1546 * Only remove the buffer from done_list if all planes can be
1547 * handled. Some cases such as V4L2 file I/O and DVB have pb
1548 * == NULL; skip the check then as there's nothing to verify.
1549 */
1550 if (pb)
1551 ret = call_bufop(q, verify_planes_array, *vb, pb);
1552 if (!ret)
1553 list_del(&(*vb)->done_entry);
1554 spin_unlock_irqrestore(&q->done_lock, flags);
1555
1556 return ret;
1557 }
1558
vb2_wait_for_all_buffers(struct vb2_queue * q)1559 int vb2_wait_for_all_buffers(struct vb2_queue *q)
1560 {
1561 if (!q->streaming) {
1562 dprintk(1, "streaming off, will not wait for buffers\n");
1563 return -EINVAL;
1564 }
1565
1566 if (q->start_streaming_called)
1567 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
1568 return 0;
1569 }
1570 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1571
1572 /*
1573 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1574 */
__vb2_dqbuf(struct vb2_buffer * vb)1575 static void __vb2_dqbuf(struct vb2_buffer *vb)
1576 {
1577 struct vb2_queue *q = vb->vb2_queue;
1578 unsigned int i;
1579
1580 /* nothing to do if the buffer is already dequeued */
1581 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1582 return;
1583
1584 vb->state = VB2_BUF_STATE_DEQUEUED;
1585
1586 /* unmap DMABUF buffer */
1587 if (q->memory == VB2_MEMORY_DMABUF)
1588 for (i = 0; i < vb->num_planes; ++i) {
1589 if (!vb->planes[i].dbuf_mapped)
1590 continue;
1591 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
1592 vb->planes[i].dbuf_mapped = 0;
1593 }
1594 }
1595
vb2_core_dqbuf(struct vb2_queue * q,unsigned int * pindex,void * pb,bool nonblocking)1596 int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
1597 bool nonblocking)
1598 {
1599 struct vb2_buffer *vb = NULL;
1600 int ret;
1601
1602 ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
1603 if (ret < 0)
1604 return ret;
1605
1606 switch (vb->state) {
1607 case VB2_BUF_STATE_DONE:
1608 dprintk(3, "returning done buffer\n");
1609 break;
1610 case VB2_BUF_STATE_ERROR:
1611 dprintk(3, "returning done buffer with errors\n");
1612 break;
1613 default:
1614 dprintk(1, "invalid buffer state\n");
1615 return -EINVAL;
1616 }
1617
1618 call_void_vb_qop(vb, buf_finish, vb);
1619
1620 if (pindex)
1621 *pindex = vb->index;
1622
1623 /* Fill buffer information for the userspace */
1624 if (pb)
1625 call_void_bufop(q, fill_user_buffer, vb, pb);
1626
1627 /* Remove from videobuf queue */
1628 list_del(&vb->queued_entry);
1629 q->queued_count--;
1630
1631 trace_vb2_dqbuf(q, vb);
1632
1633 /* go back to dequeued state */
1634 __vb2_dqbuf(vb);
1635
1636 dprintk(2, "dqbuf of buffer %d, with state %d\n",
1637 vb->index, vb->state);
1638
1639 return 0;
1640
1641 }
1642 EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
1643
1644 /*
1645 * __vb2_queue_cancel() - cancel and stop (pause) streaming
1646 *
1647 * Removes all queued buffers from driver's queue and all buffers queued by
1648 * userspace from videobuf's queue. Returns to state after reqbufs.
1649 */
__vb2_queue_cancel(struct vb2_queue * q)1650 static void __vb2_queue_cancel(struct vb2_queue *q)
1651 {
1652 unsigned int i;
1653
1654 /*
1655 * Tell driver to stop all transactions and release all queued
1656 * buffers.
1657 */
1658 if (q->start_streaming_called)
1659 call_void_qop(q, stop_streaming, q);
1660
1661 /*
1662 * If you see this warning, then the driver isn't cleaning up properly
1663 * in stop_streaming(). See the stop_streaming() documentation in
1664 * videobuf2-core.h for more information how buffers should be returned
1665 * to vb2 in stop_streaming().
1666 */
1667 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1668 for (i = 0; i < q->num_buffers; ++i)
1669 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
1670 pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
1671 q->bufs[i]);
1672 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
1673 }
1674 /* Must be zero now */
1675 WARN_ON(atomic_read(&q->owned_by_drv_count));
1676 }
1677
1678 q->streaming = 0;
1679 q->start_streaming_called = 0;
1680 q->queued_count = 0;
1681 q->error = 0;
1682
1683 /*
1684 * Remove all buffers from videobuf's list...
1685 */
1686 INIT_LIST_HEAD(&q->queued_list);
1687 /*
1688 * ...and done list; userspace will not receive any buffers it
1689 * has not already dequeued before initiating cancel.
1690 */
1691 INIT_LIST_HEAD(&q->done_list);
1692 atomic_set(&q->owned_by_drv_count, 0);
1693 wake_up_all(&q->done_wq);
1694
1695 /*
1696 * Reinitialize all buffers for next use.
1697 * Make sure to call buf_finish for any queued buffers. Normally
1698 * that's done in dqbuf, but that's not going to happen when we
1699 * cancel the whole queue. Note: this code belongs here, not in
1700 * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
1701 * call to __fill_user_buffer() after buf_finish(). That order can't
1702 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
1703 */
1704 for (i = 0; i < q->num_buffers; ++i) {
1705 struct vb2_buffer *vb = q->bufs[i];
1706
1707 if (vb->state == VB2_BUF_STATE_PREPARED ||
1708 vb->state == VB2_BUF_STATE_QUEUED) {
1709 unsigned int plane;
1710
1711 for (plane = 0; plane < vb->num_planes; ++plane)
1712 call_void_memop(vb, finish,
1713 vb->planes[plane].mem_priv);
1714 }
1715
1716 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1717 vb->state = VB2_BUF_STATE_PREPARED;
1718 call_void_vb_qop(vb, buf_finish, vb);
1719 }
1720 __vb2_dqbuf(vb);
1721 }
1722 }
1723
vb2_core_streamon(struct vb2_queue * q,unsigned int type)1724 int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
1725 {
1726 int ret;
1727
1728 if (type != q->type) {
1729 dprintk(1, "invalid stream type\n");
1730 return -EINVAL;
1731 }
1732
1733 if (q->streaming) {
1734 dprintk(3, "already streaming\n");
1735 return 0;
1736 }
1737
1738 if (!q->num_buffers) {
1739 dprintk(1, "no buffers have been allocated\n");
1740 return -EINVAL;
1741 }
1742
1743 if (q->num_buffers < q->min_buffers_needed) {
1744 dprintk(1, "need at least %u allocated buffers\n",
1745 q->min_buffers_needed);
1746 return -EINVAL;
1747 }
1748
1749 /*
1750 * Tell driver to start streaming provided sufficient buffers
1751 * are available.
1752 */
1753 if (q->queued_count >= q->min_buffers_needed) {
1754 ret = v4l_vb2q_enable_media_source(q);
1755 if (ret)
1756 return ret;
1757 ret = vb2_start_streaming(q);
1758 if (ret) {
1759 __vb2_queue_cancel(q);
1760 return ret;
1761 }
1762 }
1763
1764 q->streaming = 1;
1765
1766 dprintk(3, "successful\n");
1767 return 0;
1768 }
1769 EXPORT_SYMBOL_GPL(vb2_core_streamon);
1770
vb2_queue_error(struct vb2_queue * q)1771 void vb2_queue_error(struct vb2_queue *q)
1772 {
1773 q->error = 1;
1774
1775 wake_up_all(&q->done_wq);
1776 }
1777 EXPORT_SYMBOL_GPL(vb2_queue_error);
1778
vb2_core_streamoff(struct vb2_queue * q,unsigned int type)1779 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
1780 {
1781 if (type != q->type) {
1782 dprintk(1, "invalid stream type\n");
1783 return -EINVAL;
1784 }
1785
1786 /*
1787 * Cancel will pause streaming and remove all buffers from the driver
1788 * and videobuf, effectively returning control over them to userspace.
1789 *
1790 * Note that we do this even if q->streaming == 0: if you prepare or
1791 * queue buffers, and then call streamoff without ever having called
1792 * streamon, you would still expect those buffers to be returned to
1793 * their normal dequeued state.
1794 */
1795 __vb2_queue_cancel(q);
1796 q->waiting_for_buffers = !q->is_output;
1797 q->last_buffer_dequeued = false;
1798
1799 dprintk(3, "successful\n");
1800 return 0;
1801 }
1802 EXPORT_SYMBOL_GPL(vb2_core_streamoff);
1803
1804 /*
1805 * __find_plane_by_offset() - find plane associated with the given offset off
1806 */
__find_plane_by_offset(struct vb2_queue * q,unsigned long off,unsigned int * _buffer,unsigned int * _plane)1807 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1808 unsigned int *_buffer, unsigned int *_plane)
1809 {
1810 struct vb2_buffer *vb;
1811 unsigned int buffer, plane;
1812
1813 /*
1814 * Go over all buffers and their planes, comparing the given offset
1815 * with an offset assigned to each plane. If a match is found,
1816 * return its buffer and plane numbers.
1817 */
1818 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
1819 vb = q->bufs[buffer];
1820
1821 for (plane = 0; plane < vb->num_planes; ++plane) {
1822 if (vb->planes[plane].m.offset == off) {
1823 *_buffer = buffer;
1824 *_plane = plane;
1825 return 0;
1826 }
1827 }
1828 }
1829
1830 return -EINVAL;
1831 }
1832
vb2_core_expbuf(struct vb2_queue * q,int * fd,unsigned int type,unsigned int index,unsigned int plane,unsigned int flags)1833 int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
1834 unsigned int index, unsigned int plane, unsigned int flags)
1835 {
1836 struct vb2_buffer *vb = NULL;
1837 struct vb2_plane *vb_plane;
1838 int ret;
1839 struct dma_buf *dbuf;
1840
1841 if (q->memory != VB2_MEMORY_MMAP) {
1842 dprintk(1, "queue is not currently set up for mmap\n");
1843 return -EINVAL;
1844 }
1845
1846 if (!q->mem_ops->get_dmabuf) {
1847 dprintk(1, "queue does not support DMA buffer exporting\n");
1848 return -EINVAL;
1849 }
1850
1851 if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
1852 dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
1853 return -EINVAL;
1854 }
1855
1856 if (type != q->type) {
1857 dprintk(1, "invalid buffer type\n");
1858 return -EINVAL;
1859 }
1860
1861 if (index >= q->num_buffers) {
1862 dprintk(1, "buffer index out of range\n");
1863 return -EINVAL;
1864 }
1865
1866 vb = q->bufs[index];
1867
1868 if (plane >= vb->num_planes) {
1869 dprintk(1, "buffer plane out of range\n");
1870 return -EINVAL;
1871 }
1872
1873 if (vb2_fileio_is_active(q)) {
1874 dprintk(1, "expbuf: file io in progress\n");
1875 return -EBUSY;
1876 }
1877
1878 vb_plane = &vb->planes[plane];
1879
1880 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
1881 flags & O_ACCMODE);
1882 if (IS_ERR_OR_NULL(dbuf)) {
1883 dprintk(1, "failed to export buffer %d, plane %d\n",
1884 index, plane);
1885 return -EINVAL;
1886 }
1887
1888 ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
1889 if (ret < 0) {
1890 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
1891 index, plane, ret);
1892 dma_buf_put(dbuf);
1893 return ret;
1894 }
1895
1896 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
1897 index, plane, ret);
1898 *fd = ret;
1899
1900 return 0;
1901 }
1902 EXPORT_SYMBOL_GPL(vb2_core_expbuf);
1903
vb2_mmap(struct vb2_queue * q,struct vm_area_struct * vma)1904 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1905 {
1906 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1907 struct vb2_buffer *vb;
1908 unsigned int buffer = 0, plane = 0;
1909 int ret;
1910 unsigned long length;
1911
1912 if (q->memory != VB2_MEMORY_MMAP) {
1913 dprintk(1, "queue is not currently set up for mmap\n");
1914 return -EINVAL;
1915 }
1916
1917 /*
1918 * Check memory area access mode.
1919 */
1920 if (!(vma->vm_flags & VM_SHARED)) {
1921 dprintk(1, "invalid vma flags, VM_SHARED needed\n");
1922 return -EINVAL;
1923 }
1924 if (q->is_output) {
1925 if (!(vma->vm_flags & VM_WRITE)) {
1926 dprintk(1, "invalid vma flags, VM_WRITE needed\n");
1927 return -EINVAL;
1928 }
1929 } else {
1930 if (!(vma->vm_flags & VM_READ)) {
1931 dprintk(1, "invalid vma flags, VM_READ needed\n");
1932 return -EINVAL;
1933 }
1934 }
1935 if (vb2_fileio_is_active(q)) {
1936 dprintk(1, "mmap: file io in progress\n");
1937 return -EBUSY;
1938 }
1939
1940 /*
1941 * Find the plane corresponding to the offset passed by userspace.
1942 */
1943 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1944 if (ret)
1945 return ret;
1946
1947 vb = q->bufs[buffer];
1948
1949 /*
1950 * MMAP requires page_aligned buffers.
1951 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
1952 * so, we need to do the same here.
1953 */
1954 length = PAGE_ALIGN(vb->planes[plane].length);
1955 if (length < (vma->vm_end - vma->vm_start)) {
1956 dprintk(1,
1957 "MMAP invalid, as it would overflow buffer length\n");
1958 return -EINVAL;
1959 }
1960
1961 mutex_lock(&q->mmap_lock);
1962 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
1963 mutex_unlock(&q->mmap_lock);
1964 if (ret)
1965 return ret;
1966
1967 dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
1968 return 0;
1969 }
1970 EXPORT_SYMBOL_GPL(vb2_mmap);
1971
1972 #ifndef CONFIG_MMU
vb2_get_unmapped_area(struct vb2_queue * q,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1973 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
1974 unsigned long addr,
1975 unsigned long len,
1976 unsigned long pgoff,
1977 unsigned long flags)
1978 {
1979 unsigned long off = pgoff << PAGE_SHIFT;
1980 struct vb2_buffer *vb;
1981 unsigned int buffer, plane;
1982 void *vaddr;
1983 int ret;
1984
1985 if (q->memory != VB2_MEMORY_MMAP) {
1986 dprintk(1, "queue is not currently set up for mmap\n");
1987 return -EINVAL;
1988 }
1989
1990 /*
1991 * Find the plane corresponding to the offset passed by userspace.
1992 */
1993 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1994 if (ret)
1995 return ret;
1996
1997 vb = q->bufs[buffer];
1998
1999 vaddr = vb2_plane_vaddr(vb, plane);
2000 return vaddr ? (unsigned long)vaddr : -EINVAL;
2001 }
2002 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2003 #endif
2004
vb2_core_queue_init(struct vb2_queue * q)2005 int vb2_core_queue_init(struct vb2_queue *q)
2006 {
2007 /*
2008 * Sanity check
2009 */
2010 if (WARN_ON(!q) ||
2011 WARN_ON(!q->ops) ||
2012 WARN_ON(!q->mem_ops) ||
2013 WARN_ON(!q->type) ||
2014 WARN_ON(!q->io_modes) ||
2015 WARN_ON(!q->ops->queue_setup) ||
2016 WARN_ON(!q->ops->buf_queue))
2017 return -EINVAL;
2018
2019 INIT_LIST_HEAD(&q->queued_list);
2020 INIT_LIST_HEAD(&q->done_list);
2021 spin_lock_init(&q->done_lock);
2022 mutex_init(&q->mmap_lock);
2023 init_waitqueue_head(&q->done_wq);
2024
2025 q->memory = VB2_MEMORY_UNKNOWN;
2026
2027 if (q->buf_struct_size == 0)
2028 q->buf_struct_size = sizeof(struct vb2_buffer);
2029
2030 if (q->bidirectional)
2031 q->dma_dir = DMA_BIDIRECTIONAL;
2032 else
2033 q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
2034
2035 return 0;
2036 }
2037 EXPORT_SYMBOL_GPL(vb2_core_queue_init);
2038
2039 static int __vb2_init_fileio(struct vb2_queue *q, int read);
2040 static int __vb2_cleanup_fileio(struct vb2_queue *q);
vb2_core_queue_release(struct vb2_queue * q)2041 void vb2_core_queue_release(struct vb2_queue *q)
2042 {
2043 __vb2_cleanup_fileio(q);
2044 __vb2_queue_cancel(q);
2045 mutex_lock(&q->mmap_lock);
2046 __vb2_queue_free(q, q->num_buffers);
2047 mutex_unlock(&q->mmap_lock);
2048 }
2049 EXPORT_SYMBOL_GPL(vb2_core_queue_release);
2050
vb2_core_poll(struct vb2_queue * q,struct file * file,poll_table * wait)2051 __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
2052 poll_table *wait)
2053 {
2054 __poll_t req_events = poll_requested_events(wait);
2055 struct vb2_buffer *vb = NULL;
2056 unsigned long flags;
2057
2058 if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
2059 return 0;
2060 if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
2061 return 0;
2062
2063 /*
2064 * Start file I/O emulator only if streaming API has not been used yet.
2065 */
2066 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
2067 if (!q->is_output && (q->io_modes & VB2_READ) &&
2068 (req_events & (EPOLLIN | EPOLLRDNORM))) {
2069 if (__vb2_init_fileio(q, 1))
2070 return EPOLLERR;
2071 }
2072 if (q->is_output && (q->io_modes & VB2_WRITE) &&
2073 (req_events & (EPOLLOUT | EPOLLWRNORM))) {
2074 if (__vb2_init_fileio(q, 0))
2075 return EPOLLERR;
2076 /*
2077 * Write to OUTPUT queue can be done immediately.
2078 */
2079 return EPOLLOUT | EPOLLWRNORM;
2080 }
2081 }
2082
2083 /*
2084 * There is nothing to wait for if the queue isn't streaming, or if the
2085 * error flag is set.
2086 */
2087 if (!vb2_is_streaming(q) || q->error)
2088 return EPOLLERR;
2089
2090 /*
2091 * If this quirk is set and QBUF hasn't been called yet then
2092 * return EPOLLERR as well. This only affects capture queues, output
2093 * queues will always initialize waiting_for_buffers to false.
2094 * This quirk is set by V4L2 for backwards compatibility reasons.
2095 */
2096 if (q->quirk_poll_must_check_waiting_for_buffers &&
2097 q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
2098 return EPOLLERR;
2099
2100 /*
2101 * For output streams you can call write() as long as there are fewer
2102 * buffers queued than there are buffers available.
2103 */
2104 if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
2105 return EPOLLOUT | EPOLLWRNORM;
2106
2107 if (list_empty(&q->done_list)) {
2108 /*
2109 * If the last buffer was dequeued from a capture queue,
2110 * return immediately. DQBUF will return -EPIPE.
2111 */
2112 if (q->last_buffer_dequeued)
2113 return EPOLLIN | EPOLLRDNORM;
2114
2115 poll_wait(file, &q->done_wq, wait);
2116 }
2117
2118 /*
2119 * Take first buffer available for dequeuing.
2120 */
2121 spin_lock_irqsave(&q->done_lock, flags);
2122 if (!list_empty(&q->done_list))
2123 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2124 done_entry);
2125 spin_unlock_irqrestore(&q->done_lock, flags);
2126
2127 if (vb && (vb->state == VB2_BUF_STATE_DONE
2128 || vb->state == VB2_BUF_STATE_ERROR)) {
2129 return (q->is_output) ?
2130 EPOLLOUT | EPOLLWRNORM :
2131 EPOLLIN | EPOLLRDNORM;
2132 }
2133 return 0;
2134 }
2135 EXPORT_SYMBOL_GPL(vb2_core_poll);
2136
2137 /*
2138 * struct vb2_fileio_buf - buffer context used by file io emulator
2139 *
2140 * vb2 provides a compatibility layer and emulator of file io (read and
2141 * write) calls on top of streaming API. This structure is used for
2142 * tracking context related to the buffers.
2143 */
2144 struct vb2_fileio_buf {
2145 void *vaddr;
2146 unsigned int size;
2147 unsigned int pos;
2148 unsigned int queued:1;
2149 };
2150
2151 /*
2152 * struct vb2_fileio_data - queue context used by file io emulator
2153 *
2154 * @cur_index: the index of the buffer currently being read from or
2155 * written to. If equal to q->num_buffers then a new buffer
2156 * must be dequeued.
2157 * @initial_index: in the read() case all buffers are queued up immediately
2158 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2159 * buffers. However, in the write() case no buffers are initially
2160 * queued, instead whenever a buffer is full it is queued up by
2161 * __vb2_perform_fileio(). Only once all available buffers have
2162 * been queued up will __vb2_perform_fileio() start to dequeue
2163 * buffers. This means that initially __vb2_perform_fileio()
2164 * needs to know what buffer index to use when it is queuing up
2165 * the buffers for the first time. That initial index is stored
2166 * in this field. Once it is equal to q->num_buffers all
2167 * available buffers have been queued and __vb2_perform_fileio()
2168 * should start the normal dequeue/queue cycle.
2169 *
2170 * vb2 provides a compatibility layer and emulator of file io (read and
2171 * write) calls on top of streaming API. For proper operation it required
2172 * this structure to save the driver state between each call of the read
2173 * or write function.
2174 */
2175 struct vb2_fileio_data {
2176 unsigned int count;
2177 unsigned int type;
2178 unsigned int memory;
2179 struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
2180 unsigned int cur_index;
2181 unsigned int initial_index;
2182 unsigned int q_count;
2183 unsigned int dq_count;
2184 unsigned read_once:1;
2185 unsigned write_immediately:1;
2186 };
2187
2188 /*
2189 * __vb2_init_fileio() - initialize file io emulator
2190 * @q: videobuf2 queue
2191 * @read: mode selector (1 means read, 0 means write)
2192 */
__vb2_init_fileio(struct vb2_queue * q,int read)2193 static int __vb2_init_fileio(struct vb2_queue *q, int read)
2194 {
2195 struct vb2_fileio_data *fileio;
2196 int i, ret;
2197 unsigned int count = 0;
2198
2199 /*
2200 * Sanity check
2201 */
2202 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2203 (!read && !(q->io_modes & VB2_WRITE))))
2204 return -EINVAL;
2205
2206 /*
2207 * Check if device supports mapping buffers to kernel virtual space.
2208 */
2209 if (!q->mem_ops->vaddr)
2210 return -EBUSY;
2211
2212 /*
2213 * Check if streaming api has not been already activated.
2214 */
2215 if (q->streaming || q->num_buffers > 0)
2216 return -EBUSY;
2217
2218 /*
2219 * Start with count 1, driver can increase it in queue_setup()
2220 */
2221 count = 1;
2222
2223 dprintk(3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
2224 (read) ? "read" : "write", count, q->fileio_read_once,
2225 q->fileio_write_immediately);
2226
2227 fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
2228 if (fileio == NULL)
2229 return -ENOMEM;
2230
2231 fileio->read_once = q->fileio_read_once;
2232 fileio->write_immediately = q->fileio_write_immediately;
2233
2234 /*
2235 * Request buffers and use MMAP type to force driver
2236 * to allocate buffers by itself.
2237 */
2238 fileio->count = count;
2239 fileio->memory = VB2_MEMORY_MMAP;
2240 fileio->type = q->type;
2241 q->fileio = fileio;
2242 ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
2243 if (ret)
2244 goto err_kfree;
2245
2246 /*
2247 * Check if plane_count is correct
2248 * (multiplane buffers are not supported).
2249 */
2250 if (q->bufs[0]->num_planes != 1) {
2251 ret = -EBUSY;
2252 goto err_reqbufs;
2253 }
2254
2255 /*
2256 * Get kernel address of each buffer.
2257 */
2258 for (i = 0; i < q->num_buffers; i++) {
2259 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
2260 if (fileio->bufs[i].vaddr == NULL) {
2261 ret = -EINVAL;
2262 goto err_reqbufs;
2263 }
2264 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2265 }
2266
2267 /*
2268 * Read mode requires pre queuing of all buffers.
2269 */
2270 if (read) {
2271 /*
2272 * Queue all buffers.
2273 */
2274 for (i = 0; i < q->num_buffers; i++) {
2275 ret = vb2_core_qbuf(q, i, NULL);
2276 if (ret)
2277 goto err_reqbufs;
2278 fileio->bufs[i].queued = 1;
2279 }
2280 /*
2281 * All buffers have been queued, so mark that by setting
2282 * initial_index to q->num_buffers
2283 */
2284 fileio->initial_index = q->num_buffers;
2285 fileio->cur_index = q->num_buffers;
2286 }
2287
2288 /*
2289 * Start streaming.
2290 */
2291 ret = vb2_core_streamon(q, q->type);
2292 if (ret)
2293 goto err_reqbufs;
2294
2295 return ret;
2296
2297 err_reqbufs:
2298 fileio->count = 0;
2299 vb2_core_reqbufs(q, fileio->memory, &fileio->count);
2300
2301 err_kfree:
2302 q->fileio = NULL;
2303 kfree(fileio);
2304 return ret;
2305 }
2306
2307 /*
2308 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2309 * @q: videobuf2 queue
2310 */
__vb2_cleanup_fileio(struct vb2_queue * q)2311 static int __vb2_cleanup_fileio(struct vb2_queue *q)
2312 {
2313 struct vb2_fileio_data *fileio = q->fileio;
2314
2315 if (fileio) {
2316 vb2_core_streamoff(q, q->type);
2317 q->fileio = NULL;
2318 fileio->count = 0;
2319 vb2_core_reqbufs(q, fileio->memory, &fileio->count);
2320 kfree(fileio);
2321 dprintk(3, "file io emulator closed\n");
2322 }
2323 return 0;
2324 }
2325
2326 /*
2327 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2328 * @q: videobuf2 queue
2329 * @data: pointed to target userspace buffer
2330 * @count: number of bytes to read or write
2331 * @ppos: file handle position tracking pointer
2332 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2333 * @read: access mode selector (1 means read, 0 means write)
2334 */
__vb2_perform_fileio(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblock,int read)2335 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2336 loff_t *ppos, int nonblock, int read)
2337 {
2338 struct vb2_fileio_data *fileio;
2339 struct vb2_fileio_buf *buf;
2340 bool is_multiplanar = q->is_multiplanar;
2341 /*
2342 * When using write() to write data to an output video node the vb2 core
2343 * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
2344 * else is able to provide this information with the write() operation.
2345 */
2346 bool copy_timestamp = !read && q->copy_timestamp;
2347 unsigned index;
2348 int ret;
2349
2350 dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
2351 read ? "read" : "write", (long)*ppos, count,
2352 nonblock ? "non" : "");
2353
2354 if (!data)
2355 return -EINVAL;
2356
2357 /*
2358 * Initialize emulator on first call.
2359 */
2360 if (!vb2_fileio_is_active(q)) {
2361 ret = __vb2_init_fileio(q, read);
2362 dprintk(3, "vb2_init_fileio result: %d\n", ret);
2363 if (ret)
2364 return ret;
2365 }
2366 fileio = q->fileio;
2367
2368 /*
2369 * Check if we need to dequeue the buffer.
2370 */
2371 index = fileio->cur_index;
2372 if (index >= q->num_buffers) {
2373 struct vb2_buffer *b;
2374
2375 /*
2376 * Call vb2_dqbuf to get buffer back.
2377 */
2378 ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
2379 dprintk(5, "vb2_dqbuf result: %d\n", ret);
2380 if (ret)
2381 return ret;
2382 fileio->dq_count += 1;
2383
2384 fileio->cur_index = index;
2385 buf = &fileio->bufs[index];
2386 b = q->bufs[index];
2387
2388 /*
2389 * Get number of bytes filled by the driver
2390 */
2391 buf->pos = 0;
2392 buf->queued = 0;
2393 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2394 : vb2_plane_size(q->bufs[index], 0);
2395 /* Compensate for data_offset on read in the multiplanar case. */
2396 if (is_multiplanar && read &&
2397 b->planes[0].data_offset < buf->size) {
2398 buf->pos = b->planes[0].data_offset;
2399 buf->size -= buf->pos;
2400 }
2401 } else {
2402 buf = &fileio->bufs[index];
2403 }
2404
2405 /*
2406 * Limit count on last few bytes of the buffer.
2407 */
2408 if (buf->pos + count > buf->size) {
2409 count = buf->size - buf->pos;
2410 dprintk(5, "reducing read count: %zd\n", count);
2411 }
2412
2413 /*
2414 * Transfer data to userspace.
2415 */
2416 dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
2417 count, index, buf->pos);
2418 if (read)
2419 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2420 else
2421 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2422 if (ret) {
2423 dprintk(3, "error copying data\n");
2424 return -EFAULT;
2425 }
2426
2427 /*
2428 * Update counters.
2429 */
2430 buf->pos += count;
2431 *ppos += count;
2432
2433 /*
2434 * Queue next buffer if required.
2435 */
2436 if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
2437 struct vb2_buffer *b = q->bufs[index];
2438
2439 /*
2440 * Check if this is the last buffer to read.
2441 */
2442 if (read && fileio->read_once && fileio->dq_count == 1) {
2443 dprintk(3, "read limit reached\n");
2444 return __vb2_cleanup_fileio(q);
2445 }
2446
2447 /*
2448 * Call vb2_qbuf and give buffer to the driver.
2449 */
2450 b->planes[0].bytesused = buf->pos;
2451
2452 if (copy_timestamp)
2453 b->timestamp = ktime_get_ns();
2454 ret = vb2_core_qbuf(q, index, NULL);
2455 dprintk(5, "vb2_dbuf result: %d\n", ret);
2456 if (ret)
2457 return ret;
2458
2459 /*
2460 * Buffer has been queued, update the status
2461 */
2462 buf->pos = 0;
2463 buf->queued = 1;
2464 buf->size = vb2_plane_size(q->bufs[index], 0);
2465 fileio->q_count += 1;
2466 /*
2467 * If we are queuing up buffers for the first time, then
2468 * increase initial_index by one.
2469 */
2470 if (fileio->initial_index < q->num_buffers)
2471 fileio->initial_index++;
2472 /*
2473 * The next buffer to use is either a buffer that's going to be
2474 * queued for the first time (initial_index < q->num_buffers)
2475 * or it is equal to q->num_buffers, meaning that the next
2476 * time we need to dequeue a buffer since we've now queued up
2477 * all the 'first time' buffers.
2478 */
2479 fileio->cur_index = fileio->initial_index;
2480 }
2481
2482 /*
2483 * Return proper number of bytes processed.
2484 */
2485 if (ret == 0)
2486 ret = count;
2487 return ret;
2488 }
2489
vb2_read(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking)2490 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2491 loff_t *ppos, int nonblocking)
2492 {
2493 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2494 }
2495 EXPORT_SYMBOL_GPL(vb2_read);
2496
vb2_write(struct vb2_queue * q,const char __user * data,size_t count,loff_t * ppos,int nonblocking)2497 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
2498 loff_t *ppos, int nonblocking)
2499 {
2500 return __vb2_perform_fileio(q, (char __user *) data, count,
2501 ppos, nonblocking, 0);
2502 }
2503 EXPORT_SYMBOL_GPL(vb2_write);
2504
2505 struct vb2_threadio_data {
2506 struct task_struct *thread;
2507 vb2_thread_fnc fnc;
2508 void *priv;
2509 bool stop;
2510 };
2511
vb2_thread(void * data)2512 static int vb2_thread(void *data)
2513 {
2514 struct vb2_queue *q = data;
2515 struct vb2_threadio_data *threadio = q->threadio;
2516 bool copy_timestamp = false;
2517 unsigned prequeue = 0;
2518 unsigned index = 0;
2519 int ret = 0;
2520
2521 if (q->is_output) {
2522 prequeue = q->num_buffers;
2523 copy_timestamp = q->copy_timestamp;
2524 }
2525
2526 set_freezable();
2527
2528 for (;;) {
2529 struct vb2_buffer *vb;
2530
2531 /*
2532 * Call vb2_dqbuf to get buffer back.
2533 */
2534 if (prequeue) {
2535 vb = q->bufs[index++];
2536 prequeue--;
2537 } else {
2538 call_void_qop(q, wait_finish, q);
2539 if (!threadio->stop)
2540 ret = vb2_core_dqbuf(q, &index, NULL, 0);
2541 call_void_qop(q, wait_prepare, q);
2542 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2543 if (!ret)
2544 vb = q->bufs[index];
2545 }
2546 if (ret || threadio->stop)
2547 break;
2548 try_to_freeze();
2549
2550 if (vb->state != VB2_BUF_STATE_ERROR)
2551 if (threadio->fnc(vb, threadio->priv))
2552 break;
2553 call_void_qop(q, wait_finish, q);
2554 if (copy_timestamp)
2555 vb->timestamp = ktime_get_ns();
2556 if (!threadio->stop)
2557 ret = vb2_core_qbuf(q, vb->index, NULL);
2558 call_void_qop(q, wait_prepare, q);
2559 if (ret || threadio->stop)
2560 break;
2561 }
2562
2563 /* Hmm, linux becomes *very* unhappy without this ... */
2564 while (!kthread_should_stop()) {
2565 set_current_state(TASK_INTERRUPTIBLE);
2566 schedule();
2567 }
2568 return 0;
2569 }
2570
2571 /*
2572 * This function should not be used for anything else but the videobuf2-dvb
2573 * support. If you think you have another good use-case for this, then please
2574 * contact the linux-media mailinglist first.
2575 */
vb2_thread_start(struct vb2_queue * q,vb2_thread_fnc fnc,void * priv,const char * thread_name)2576 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
2577 const char *thread_name)
2578 {
2579 struct vb2_threadio_data *threadio;
2580 int ret = 0;
2581
2582 if (q->threadio)
2583 return -EBUSY;
2584 if (vb2_is_busy(q))
2585 return -EBUSY;
2586 if (WARN_ON(q->fileio))
2587 return -EBUSY;
2588
2589 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
2590 if (threadio == NULL)
2591 return -ENOMEM;
2592 threadio->fnc = fnc;
2593 threadio->priv = priv;
2594
2595 ret = __vb2_init_fileio(q, !q->is_output);
2596 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
2597 if (ret)
2598 goto nomem;
2599 q->threadio = threadio;
2600 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
2601 if (IS_ERR(threadio->thread)) {
2602 ret = PTR_ERR(threadio->thread);
2603 threadio->thread = NULL;
2604 goto nothread;
2605 }
2606 return 0;
2607
2608 nothread:
2609 __vb2_cleanup_fileio(q);
2610 nomem:
2611 kfree(threadio);
2612 return ret;
2613 }
2614 EXPORT_SYMBOL_GPL(vb2_thread_start);
2615
vb2_thread_stop(struct vb2_queue * q)2616 int vb2_thread_stop(struct vb2_queue *q)
2617 {
2618 struct vb2_threadio_data *threadio = q->threadio;
2619 int err;
2620
2621 if (threadio == NULL)
2622 return 0;
2623 threadio->stop = true;
2624 /* Wake up all pending sleeps in the thread */
2625 vb2_queue_error(q);
2626 err = kthread_stop(threadio->thread);
2627 __vb2_cleanup_fileio(q);
2628 threadio->thread = NULL;
2629 kfree(threadio);
2630 q->threadio = NULL;
2631 return err;
2632 }
2633 EXPORT_SYMBOL_GPL(vb2_thread_stop);
2634
2635 MODULE_DESCRIPTION("Media buffer core framework");
2636 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
2637 MODULE_LICENSE("GPL");
2638