1 /*
2 * videobuf2-v4l2.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * The vb2_thread implementation was based on code from videobuf-dvb.c:
10 * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation.
15 */
16
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/freezer.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/poll.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27
28 #include <media/v4l2-common.h>
29 #include <media/v4l2-dev.h>
30 #include <media/v4l2-device.h>
31 #include <media/v4l2-event.h>
32 #include <media/v4l2-fh.h>
33
34 #include <media/videobuf2-v4l2.h>
35
36 static int debug;
37 module_param(debug, int, 0644);
38
39 #define dprintk(q, level, fmt, arg...) \
40 do { \
41 if (debug >= level) \
42 pr_info("vb2-v4l2: [%p] %s: " fmt, \
43 (q)->name, __func__, ## arg); \
44 } while (0)
45
46 /* Flags that are set by us */
47 #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
48 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
49 V4L2_BUF_FLAG_PREPARED | \
50 V4L2_BUF_FLAG_IN_REQUEST | \
51 V4L2_BUF_FLAG_REQUEST_FD | \
52 V4L2_BUF_FLAG_TIMESTAMP_MASK)
53 /* Output buffer flags that should be passed on to the driver */
54 #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
55 V4L2_BUF_FLAG_BFRAME | \
56 V4L2_BUF_FLAG_KEYFRAME | \
57 V4L2_BUF_FLAG_TIMECODE | \
58 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
59
60 /*
61 * __verify_planes_array() - verify that the planes array passed in struct
62 * v4l2_buffer from userspace can be safely used
63 */
__verify_planes_array(struct vb2_buffer * vb,const struct v4l2_buffer * b)64 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
65 {
66 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
67 return 0;
68
69 /* Is memory for copying plane information present? */
70 if (b->m.planes == NULL) {
71 dprintk(vb->vb2_queue, 1,
72 "multi-planar buffer passed but planes array not provided\n");
73 return -EINVAL;
74 }
75
76 if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
77 dprintk(vb->vb2_queue, 1,
78 "incorrect planes array length, expected %d, got %d\n",
79 vb->num_planes, b->length);
80 return -EINVAL;
81 }
82
83 return 0;
84 }
85
__verify_planes_array_core(struct vb2_buffer * vb,const void * pb)86 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
87 {
88 return __verify_planes_array(vb, pb);
89 }
90
91 /*
92 * __verify_length() - Verify that the bytesused value for each plane fits in
93 * the plane length and that the data offset doesn't exceed the bytesused value.
94 */
__verify_length(struct vb2_buffer * vb,const struct v4l2_buffer * b)95 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
96 {
97 unsigned int length;
98 unsigned int bytesused;
99 unsigned int plane;
100
101 if (V4L2_TYPE_IS_CAPTURE(b->type))
102 return 0;
103
104 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
105 for (plane = 0; plane < vb->num_planes; ++plane) {
106 length = (b->memory == VB2_MEMORY_USERPTR ||
107 b->memory == VB2_MEMORY_DMABUF)
108 ? b->m.planes[plane].length
109 : vb->planes[plane].length;
110 bytesused = b->m.planes[plane].bytesused
111 ? b->m.planes[plane].bytesused : length;
112
113 if (b->m.planes[plane].bytesused > length)
114 return -EINVAL;
115
116 if (b->m.planes[plane].data_offset > 0 &&
117 b->m.planes[plane].data_offset >= bytesused)
118 return -EINVAL;
119 }
120 } else {
121 length = (b->memory == VB2_MEMORY_USERPTR)
122 ? b->length : vb->planes[0].length;
123
124 if (b->bytesused > length)
125 return -EINVAL;
126 }
127
128 return 0;
129 }
130
131 /*
132 * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
133 */
__init_vb2_v4l2_buffer(struct vb2_buffer * vb)134 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
135 {
136 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
137
138 vbuf->request_fd = -1;
139 }
140
__copy_timestamp(struct vb2_buffer * vb,const void * pb)141 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
142 {
143 const struct v4l2_buffer *b = pb;
144 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
145 struct vb2_queue *q = vb->vb2_queue;
146
147 if (q->is_output) {
148 /*
149 * For output buffers copy the timestamp if needed,
150 * and the timecode field and flag if needed.
151 */
152 if (q->copy_timestamp)
153 vb->timestamp = v4l2_buffer_get_timestamp(b);
154 vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
155 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
156 vbuf->timecode = b->timecode;
157 }
158 };
159
vb2_warn_zero_bytesused(struct vb2_buffer * vb)160 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
161 {
162 static bool check_once;
163
164 if (check_once)
165 return;
166
167 check_once = true;
168
169 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
170 if (vb->vb2_queue->allow_zero_bytesused)
171 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
172 else
173 pr_warn("use the actual size instead.\n");
174 }
175
vb2_fill_vb2_v4l2_buffer(struct vb2_buffer * vb,struct v4l2_buffer * b)176 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
177 {
178 struct vb2_queue *q = vb->vb2_queue;
179 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
180 struct vb2_plane *planes = vbuf->planes;
181 unsigned int plane;
182 int ret;
183
184 ret = __verify_length(vb, b);
185 if (ret < 0) {
186 dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
187 return ret;
188 }
189 if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
190 /*
191 * If the format's field is ALTERNATE, then the buffer's field
192 * should be either TOP or BOTTOM, not ALTERNATE since that
193 * makes no sense. The driver has to know whether the
194 * buffer represents a top or a bottom field in order to
195 * program any DMA correctly. Using ALTERNATE is wrong, since
196 * that just says that it is either a top or a bottom field,
197 * but not which of the two it is.
198 */
199 dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
200 return -EINVAL;
201 }
202 vbuf->sequence = 0;
203 vbuf->request_fd = -1;
204 vbuf->is_held = false;
205
206 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
207 switch (b->memory) {
208 case VB2_MEMORY_USERPTR:
209 for (plane = 0; plane < vb->num_planes; ++plane) {
210 planes[plane].m.userptr =
211 b->m.planes[plane].m.userptr;
212 planes[plane].length =
213 b->m.planes[plane].length;
214 }
215 break;
216 case VB2_MEMORY_DMABUF:
217 for (plane = 0; plane < vb->num_planes; ++plane) {
218 planes[plane].m.fd =
219 b->m.planes[plane].m.fd;
220 planes[plane].length =
221 b->m.planes[plane].length;
222 }
223 break;
224 default:
225 for (plane = 0; plane < vb->num_planes; ++plane) {
226 planes[plane].m.offset =
227 vb->planes[plane].m.offset;
228 planes[plane].length =
229 vb->planes[plane].length;
230 }
231 break;
232 }
233
234 /* Fill in driver-provided information for OUTPUT types */
235 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
236 /*
237 * Will have to go up to b->length when API starts
238 * accepting variable number of planes.
239 *
240 * If bytesused == 0 for the output buffer, then fall
241 * back to the full buffer size. In that case
242 * userspace clearly never bothered to set it and
243 * it's a safe assumption that they really meant to
244 * use the full plane sizes.
245 *
246 * Some drivers, e.g. old codec drivers, use bytesused == 0
247 * as a way to indicate that streaming is finished.
248 * In that case, the driver should use the
249 * allow_zero_bytesused flag to keep old userspace
250 * applications working.
251 */
252 for (plane = 0; plane < vb->num_planes; ++plane) {
253 struct vb2_plane *pdst = &planes[plane];
254 struct v4l2_plane *psrc = &b->m.planes[plane];
255
256 if (psrc->bytesused == 0)
257 vb2_warn_zero_bytesused(vb);
258
259 if (vb->vb2_queue->allow_zero_bytesused)
260 pdst->bytesused = psrc->bytesused;
261 else
262 pdst->bytesused = psrc->bytesused ?
263 psrc->bytesused : pdst->length;
264 pdst->data_offset = psrc->data_offset;
265 }
266 }
267 } else {
268 /*
269 * Single-planar buffers do not use planes array,
270 * so fill in relevant v4l2_buffer struct fields instead.
271 * In videobuf we use our internal V4l2_planes struct for
272 * single-planar buffers as well, for simplicity.
273 *
274 * If bytesused == 0 for the output buffer, then fall back
275 * to the full buffer size as that's a sensible default.
276 *
277 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
278 * a way to indicate that streaming is finished. In that case,
279 * the driver should use the allow_zero_bytesused flag to keep
280 * old userspace applications working.
281 */
282 switch (b->memory) {
283 case VB2_MEMORY_USERPTR:
284 planes[0].m.userptr = b->m.userptr;
285 planes[0].length = b->length;
286 break;
287 case VB2_MEMORY_DMABUF:
288 planes[0].m.fd = b->m.fd;
289 planes[0].length = b->length;
290 break;
291 default:
292 planes[0].m.offset = vb->planes[0].m.offset;
293 planes[0].length = vb->planes[0].length;
294 break;
295 }
296
297 planes[0].data_offset = 0;
298 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
299 if (b->bytesused == 0)
300 vb2_warn_zero_bytesused(vb);
301
302 if (vb->vb2_queue->allow_zero_bytesused)
303 planes[0].bytesused = b->bytesused;
304 else
305 planes[0].bytesused = b->bytesused ?
306 b->bytesused : planes[0].length;
307 } else
308 planes[0].bytesused = 0;
309
310 }
311
312 /* Zero flags that we handle */
313 vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
314 if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
315 /*
316 * Non-COPY timestamps and non-OUTPUT queues will get
317 * their timestamp and timestamp source flags from the
318 * queue.
319 */
320 vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
321 }
322
323 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
324 /*
325 * For output buffers mask out the timecode flag:
326 * this will be handled later in vb2_qbuf().
327 * The 'field' is valid metadata for this output buffer
328 * and so that needs to be copied here.
329 */
330 vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
331 vbuf->field = b->field;
332 if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
333 vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
334 } else {
335 /* Zero any output buffer flags as this is a capture buffer */
336 vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
337 /* Zero last flag, this is a signal from driver to userspace */
338 vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
339 }
340
341 return 0;
342 }
343
set_buffer_cache_hints(struct vb2_queue * q,struct vb2_buffer * vb,struct v4l2_buffer * b)344 static void set_buffer_cache_hints(struct vb2_queue *q,
345 struct vb2_buffer *vb,
346 struct v4l2_buffer *b)
347 {
348 /*
349 * DMA exporter should take care of cache syncs, so we can avoid
350 * explicit ->prepare()/->finish() syncs. For other ->memory types
351 * we always need ->prepare() or/and ->finish() cache sync.
352 */
353 if (q->memory == VB2_MEMORY_DMABUF) {
354 vb->need_cache_sync_on_finish = 0;
355 vb->need_cache_sync_on_prepare = 0;
356 return;
357 }
358
359 /*
360 * Cache sync/invalidation flags are set by default in order to
361 * preserve existing behaviour for old apps/drivers.
362 */
363 vb->need_cache_sync_on_prepare = 1;
364 vb->need_cache_sync_on_finish = 1;
365
366 if (!vb2_queue_allows_cache_hints(q)) {
367 /*
368 * Clear buffer cache flags if queue does not support user
369 * space hints. That's to indicate to userspace that these
370 * flags won't work.
371 */
372 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
373 b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
374 return;
375 }
376
377 /*
378 * ->finish() cache sync can be avoided when queue direction is
379 * TO_DEVICE.
380 */
381 if (q->dma_dir == DMA_TO_DEVICE)
382 vb->need_cache_sync_on_finish = 0;
383
384 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
385 vb->need_cache_sync_on_finish = 0;
386
387 if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
388 vb->need_cache_sync_on_prepare = 0;
389 }
390
vb2_queue_or_prepare_buf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b,bool is_prepare,struct media_request ** p_req)391 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
392 struct v4l2_buffer *b, bool is_prepare,
393 struct media_request **p_req)
394 {
395 const char *opname = is_prepare ? "prepare_buf" : "qbuf";
396 struct media_request *req;
397 struct vb2_v4l2_buffer *vbuf;
398 struct vb2_buffer *vb;
399 int ret;
400
401 if (b->type != q->type) {
402 dprintk(q, 1, "%s: invalid buffer type\n", opname);
403 return -EINVAL;
404 }
405
406 if (b->index >= q->num_buffers) {
407 dprintk(q, 1, "%s: buffer index out of range\n", opname);
408 return -EINVAL;
409 }
410
411 if (q->bufs[b->index] == NULL) {
412 /* Should never happen */
413 dprintk(q, 1, "%s: buffer is NULL\n", opname);
414 return -EINVAL;
415 }
416
417 if (b->memory != q->memory) {
418 dprintk(q, 1, "%s: invalid memory type\n", opname);
419 return -EINVAL;
420 }
421
422 vb = q->bufs[b->index];
423 vbuf = to_vb2_v4l2_buffer(vb);
424 ret = __verify_planes_array(vb, b);
425 if (ret)
426 return ret;
427
428 if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
429 vb->state != VB2_BUF_STATE_DEQUEUED) {
430 dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
431 return -EINVAL;
432 }
433
434 if (!vb->prepared) {
435 set_buffer_cache_hints(q, vb, b);
436 /* Copy relevant information provided by the userspace */
437 memset(vbuf->planes, 0,
438 sizeof(vbuf->planes[0]) * vb->num_planes);
439 ret = vb2_fill_vb2_v4l2_buffer(vb, b);
440 if (ret)
441 return ret;
442 }
443
444 if (is_prepare)
445 return 0;
446
447 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
448 if (q->requires_requests) {
449 dprintk(q, 1, "%s: queue requires requests\n", opname);
450 return -EBADR;
451 }
452 if (q->uses_requests) {
453 dprintk(q, 1, "%s: queue uses requests\n", opname);
454 return -EBUSY;
455 }
456 return 0;
457 } else if (!q->supports_requests) {
458 dprintk(q, 1, "%s: queue does not support requests\n", opname);
459 return -EBADR;
460 } else if (q->uses_qbuf) {
461 dprintk(q, 1, "%s: queue does not use requests\n", opname);
462 return -EBUSY;
463 }
464
465 /*
466 * For proper locking when queueing a request you need to be able
467 * to lock access to the vb2 queue, so check that there is a lock
468 * that we can use. In addition p_req must be non-NULL.
469 */
470 if (WARN_ON(!q->lock || !p_req))
471 return -EINVAL;
472
473 /*
474 * Make sure this op is implemented by the driver. It's easy to forget
475 * this callback, but is it important when canceling a buffer in a
476 * queued request.
477 */
478 if (WARN_ON(!q->ops->buf_request_complete))
479 return -EINVAL;
480 /*
481 * Make sure this op is implemented by the driver for the output queue.
482 * It's easy to forget this callback, but is it important to correctly
483 * validate the 'field' value at QBUF time.
484 */
485 if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
486 q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
487 !q->ops->buf_out_validate))
488 return -EINVAL;
489
490 req = media_request_get_by_fd(mdev, b->request_fd);
491 if (IS_ERR(req)) {
492 dprintk(q, 1, "%s: invalid request_fd\n", opname);
493 return PTR_ERR(req);
494 }
495
496 /*
497 * Early sanity check. This is checked again when the buffer
498 * is bound to the request in vb2_core_qbuf().
499 */
500 if (req->state != MEDIA_REQUEST_STATE_IDLE &&
501 req->state != MEDIA_REQUEST_STATE_UPDATING) {
502 dprintk(q, 1, "%s: request is not idle\n", opname);
503 media_request_put(req);
504 return -EBUSY;
505 }
506
507 *p_req = req;
508 vbuf->request_fd = b->request_fd;
509
510 return 0;
511 }
512
513 /*
514 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
515 * returned to userspace
516 */
__fill_v4l2_buffer(struct vb2_buffer * vb,void * pb)517 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
518 {
519 struct v4l2_buffer *b = pb;
520 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
521 struct vb2_queue *q = vb->vb2_queue;
522 unsigned int plane;
523
524 /* Copy back data such as timestamp, flags, etc. */
525 b->index = vb->index;
526 b->type = vb->type;
527 b->memory = vb->memory;
528 b->bytesused = 0;
529
530 b->flags = vbuf->flags;
531 b->field = vbuf->field;
532 v4l2_buffer_set_timestamp(b, vb->timestamp);
533 b->timecode = vbuf->timecode;
534 b->sequence = vbuf->sequence;
535 b->reserved2 = 0;
536 b->request_fd = 0;
537
538 if (q->is_multiplanar) {
539 /*
540 * Fill in plane-related data if userspace provided an array
541 * for it. The caller has already verified memory and size.
542 */
543 b->length = vb->num_planes;
544 for (plane = 0; plane < vb->num_planes; ++plane) {
545 struct v4l2_plane *pdst = &b->m.planes[plane];
546 struct vb2_plane *psrc = &vb->planes[plane];
547
548 pdst->bytesused = psrc->bytesused;
549 pdst->length = psrc->length;
550 if (q->memory == VB2_MEMORY_MMAP)
551 pdst->m.mem_offset = psrc->m.offset;
552 else if (q->memory == VB2_MEMORY_USERPTR)
553 pdst->m.userptr = psrc->m.userptr;
554 else if (q->memory == VB2_MEMORY_DMABUF)
555 pdst->m.fd = psrc->m.fd;
556 pdst->data_offset = psrc->data_offset;
557 memset(pdst->reserved, 0, sizeof(pdst->reserved));
558 }
559 } else {
560 /*
561 * We use length and offset in v4l2_planes array even for
562 * single-planar buffers, but userspace does not.
563 */
564 b->length = vb->planes[0].length;
565 b->bytesused = vb->planes[0].bytesused;
566 if (q->memory == VB2_MEMORY_MMAP)
567 b->m.offset = vb->planes[0].m.offset;
568 else if (q->memory == VB2_MEMORY_USERPTR)
569 b->m.userptr = vb->planes[0].m.userptr;
570 else if (q->memory == VB2_MEMORY_DMABUF)
571 b->m.fd = vb->planes[0].m.fd;
572 }
573
574 /*
575 * Clear any buffer state related flags.
576 */
577 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
578 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
579 if (!q->copy_timestamp) {
580 /*
581 * For non-COPY timestamps, drop timestamp source bits
582 * and obtain the timestamp source from the queue.
583 */
584 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
585 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
586 }
587
588 switch (vb->state) {
589 case VB2_BUF_STATE_QUEUED:
590 case VB2_BUF_STATE_ACTIVE:
591 b->flags |= V4L2_BUF_FLAG_QUEUED;
592 break;
593 case VB2_BUF_STATE_IN_REQUEST:
594 b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
595 break;
596 case VB2_BUF_STATE_ERROR:
597 b->flags |= V4L2_BUF_FLAG_ERROR;
598 fallthrough;
599 case VB2_BUF_STATE_DONE:
600 b->flags |= V4L2_BUF_FLAG_DONE;
601 break;
602 case VB2_BUF_STATE_PREPARING:
603 case VB2_BUF_STATE_DEQUEUED:
604 /* nothing */
605 break;
606 }
607
608 if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
609 vb->state == VB2_BUF_STATE_IN_REQUEST) &&
610 vb->synced && vb->prepared)
611 b->flags |= V4L2_BUF_FLAG_PREPARED;
612
613 if (vb2_buffer_in_use(q, vb))
614 b->flags |= V4L2_BUF_FLAG_MAPPED;
615 if (vbuf->request_fd >= 0) {
616 b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
617 b->request_fd = vbuf->request_fd;
618 }
619 }
620
621 /*
622 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
623 * v4l2_buffer by the userspace. It also verifies that struct
624 * v4l2_buffer has a valid number of planes.
625 */
__fill_vb2_buffer(struct vb2_buffer * vb,struct vb2_plane * planes)626 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
627 {
628 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
629 unsigned int plane;
630
631 if (!vb->vb2_queue->copy_timestamp)
632 vb->timestamp = 0;
633
634 for (plane = 0; plane < vb->num_planes; ++plane) {
635 if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
636 planes[plane].m = vbuf->planes[plane].m;
637 planes[plane].length = vbuf->planes[plane].length;
638 }
639 planes[plane].bytesused = vbuf->planes[plane].bytesused;
640 planes[plane].data_offset = vbuf->planes[plane].data_offset;
641 }
642 return 0;
643 }
644
645 static const struct vb2_buf_ops v4l2_buf_ops = {
646 .verify_planes_array = __verify_planes_array_core,
647 .init_buffer = __init_vb2_v4l2_buffer,
648 .fill_user_buffer = __fill_v4l2_buffer,
649 .fill_vb2_buffer = __fill_vb2_buffer,
650 .copy_timestamp = __copy_timestamp,
651 };
652
vb2_find_timestamp(const struct vb2_queue * q,u64 timestamp,unsigned int start_idx)653 int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
654 unsigned int start_idx)
655 {
656 unsigned int i;
657
658 for (i = start_idx; i < q->num_buffers; i++)
659 if (q->bufs[i]->copied_timestamp &&
660 q->bufs[i]->timestamp == timestamp)
661 return i;
662 return -1;
663 }
664 EXPORT_SYMBOL_GPL(vb2_find_timestamp);
665
666 /*
667 * vb2_querybuf() - query video buffer information
668 * @q: videobuf queue
669 * @b: buffer struct passed from userspace to vidioc_querybuf handler
670 * in driver
671 *
672 * Should be called from vidioc_querybuf ioctl handler in driver.
673 * This function will verify the passed v4l2_buffer structure and fill the
674 * relevant information for the userspace.
675 *
676 * The return values from this function are intended to be directly returned
677 * from vidioc_querybuf handler in driver.
678 */
vb2_querybuf(struct vb2_queue * q,struct v4l2_buffer * b)679 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
680 {
681 struct vb2_buffer *vb;
682 int ret;
683
684 if (b->type != q->type) {
685 dprintk(q, 1, "wrong buffer type\n");
686 return -EINVAL;
687 }
688
689 if (b->index >= q->num_buffers) {
690 dprintk(q, 1, "buffer index out of range\n");
691 return -EINVAL;
692 }
693 vb = q->bufs[b->index];
694 ret = __verify_planes_array(vb, b);
695 if (!ret)
696 vb2_core_querybuf(q, b->index, b);
697 return ret;
698 }
699 EXPORT_SYMBOL(vb2_querybuf);
700
fill_buf_caps(struct vb2_queue * q,u32 * caps)701 static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
702 {
703 *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
704 if (q->io_modes & VB2_MMAP)
705 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
706 if (q->io_modes & VB2_USERPTR)
707 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
708 if (q->io_modes & VB2_DMABUF)
709 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
710 if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
711 *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
712 if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
713 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
714 #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
715 if (q->supports_requests)
716 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
717 #endif
718 }
719
vb2_reqbufs(struct vb2_queue * q,struct v4l2_requestbuffers * req)720 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
721 {
722 int ret = vb2_verify_memory_type(q, req->memory, req->type);
723
724 fill_buf_caps(q, &req->capabilities);
725 return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
726 }
727 EXPORT_SYMBOL_GPL(vb2_reqbufs);
728
vb2_prepare_buf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b)729 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
730 struct v4l2_buffer *b)
731 {
732 int ret;
733
734 if (vb2_fileio_is_active(q)) {
735 dprintk(q, 1, "file io in progress\n");
736 return -EBUSY;
737 }
738
739 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
740 return -EINVAL;
741
742 ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
743
744 return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
745 }
746 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
747
vb2_create_bufs(struct vb2_queue * q,struct v4l2_create_buffers * create)748 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
749 {
750 unsigned requested_planes = 1;
751 unsigned requested_sizes[VIDEO_MAX_PLANES];
752 struct v4l2_format *f = &create->format;
753 int ret = vb2_verify_memory_type(q, create->memory, f->type);
754 unsigned i;
755
756 fill_buf_caps(q, &create->capabilities);
757 create->index = q->num_buffers;
758 if (create->count == 0)
759 return ret != -EBUSY ? ret : 0;
760
761 switch (f->type) {
762 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
763 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
764 requested_planes = f->fmt.pix_mp.num_planes;
765 if (requested_planes == 0 ||
766 requested_planes > VIDEO_MAX_PLANES)
767 return -EINVAL;
768 for (i = 0; i < requested_planes; i++)
769 requested_sizes[i] =
770 f->fmt.pix_mp.plane_fmt[i].sizeimage;
771 break;
772 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
773 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
774 requested_sizes[0] = f->fmt.pix.sizeimage;
775 break;
776 case V4L2_BUF_TYPE_VBI_CAPTURE:
777 case V4L2_BUF_TYPE_VBI_OUTPUT:
778 requested_sizes[0] = f->fmt.vbi.samples_per_line *
779 (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
780 break;
781 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
782 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
783 requested_sizes[0] = f->fmt.sliced.io_size;
784 break;
785 case V4L2_BUF_TYPE_SDR_CAPTURE:
786 case V4L2_BUF_TYPE_SDR_OUTPUT:
787 requested_sizes[0] = f->fmt.sdr.buffersize;
788 break;
789 case V4L2_BUF_TYPE_META_CAPTURE:
790 case V4L2_BUF_TYPE_META_OUTPUT:
791 requested_sizes[0] = f->fmt.meta.buffersize;
792 break;
793 default:
794 return -EINVAL;
795 }
796 for (i = 0; i < requested_planes; i++)
797 if (requested_sizes[i] == 0)
798 return -EINVAL;
799 return ret ? ret : vb2_core_create_bufs(q, create->memory,
800 &create->count,
801 requested_planes,
802 requested_sizes);
803 }
804 EXPORT_SYMBOL_GPL(vb2_create_bufs);
805
vb2_qbuf(struct vb2_queue * q,struct media_device * mdev,struct v4l2_buffer * b)806 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
807 struct v4l2_buffer *b)
808 {
809 struct media_request *req = NULL;
810 int ret;
811
812 if (vb2_fileio_is_active(q)) {
813 dprintk(q, 1, "file io in progress\n");
814 return -EBUSY;
815 }
816
817 ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
818 if (ret)
819 return ret;
820 ret = vb2_core_qbuf(q, b->index, b, req);
821 if (req)
822 media_request_put(req);
823 return ret;
824 }
825 EXPORT_SYMBOL_GPL(vb2_qbuf);
826
vb2_dqbuf(struct vb2_queue * q,struct v4l2_buffer * b,bool nonblocking)827 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
828 {
829 int ret;
830
831 if (vb2_fileio_is_active(q)) {
832 dprintk(q, 1, "file io in progress\n");
833 return -EBUSY;
834 }
835
836 if (b->type != q->type) {
837 dprintk(q, 1, "invalid buffer type\n");
838 return -EINVAL;
839 }
840
841 ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
842
843 if (!q->is_output &&
844 b->flags & V4L2_BUF_FLAG_DONE &&
845 b->flags & V4L2_BUF_FLAG_LAST)
846 q->last_buffer_dequeued = true;
847
848 /*
849 * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
850 * cleared.
851 */
852 b->flags &= ~V4L2_BUF_FLAG_DONE;
853
854 return ret;
855 }
856 EXPORT_SYMBOL_GPL(vb2_dqbuf);
857
vb2_streamon(struct vb2_queue * q,enum v4l2_buf_type type)858 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
859 {
860 if (vb2_fileio_is_active(q)) {
861 dprintk(q, 1, "file io in progress\n");
862 return -EBUSY;
863 }
864 return vb2_core_streamon(q, type);
865 }
866 EXPORT_SYMBOL_GPL(vb2_streamon);
867
vb2_streamoff(struct vb2_queue * q,enum v4l2_buf_type type)868 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
869 {
870 if (vb2_fileio_is_active(q)) {
871 dprintk(q, 1, "file io in progress\n");
872 return -EBUSY;
873 }
874 return vb2_core_streamoff(q, type);
875 }
876 EXPORT_SYMBOL_GPL(vb2_streamoff);
877
vb2_expbuf(struct vb2_queue * q,struct v4l2_exportbuffer * eb)878 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
879 {
880 return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
881 eb->plane, eb->flags);
882 }
883 EXPORT_SYMBOL_GPL(vb2_expbuf);
884
vb2_queue_init_name(struct vb2_queue * q,const char * name)885 int vb2_queue_init_name(struct vb2_queue *q, const char *name)
886 {
887 /*
888 * Sanity check
889 */
890 if (WARN_ON(!q) ||
891 WARN_ON(q->timestamp_flags &
892 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
893 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
894 return -EINVAL;
895
896 /* Warn that the driver should choose an appropriate timestamp type */
897 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
898 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
899
900 /* Warn that vb2_memory should match with v4l2_memory */
901 if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
902 || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
903 || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
904 return -EINVAL;
905
906 if (q->buf_struct_size == 0)
907 q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
908
909 q->buf_ops = &v4l2_buf_ops;
910 q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
911 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
912 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
913 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
914 /*
915 * For compatibility with vb1: if QBUF hasn't been called yet, then
916 * return EPOLLERR as well. This only affects capture queues, output
917 * queues will always initialize waiting_for_buffers to false.
918 */
919 q->quirk_poll_must_check_waiting_for_buffers = true;
920
921 if (name)
922 strscpy(q->name, name, sizeof(q->name));
923 else
924 q->name[0] = '\0';
925
926 return vb2_core_queue_init(q);
927 }
928 EXPORT_SYMBOL_GPL(vb2_queue_init_name);
929
vb2_queue_init(struct vb2_queue * q)930 int vb2_queue_init(struct vb2_queue *q)
931 {
932 return vb2_queue_init_name(q, NULL);
933 }
934 EXPORT_SYMBOL_GPL(vb2_queue_init);
935
vb2_queue_release(struct vb2_queue * q)936 void vb2_queue_release(struct vb2_queue *q)
937 {
938 vb2_core_queue_release(q);
939 }
940 EXPORT_SYMBOL_GPL(vb2_queue_release);
941
vb2_queue_change_type(struct vb2_queue * q,unsigned int type)942 int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
943 {
944 if (type == q->type)
945 return 0;
946
947 if (vb2_is_busy(q))
948 return -EBUSY;
949
950 q->type = type;
951
952 return 0;
953 }
954 EXPORT_SYMBOL_GPL(vb2_queue_change_type);
955
vb2_poll(struct vb2_queue * q,struct file * file,poll_table * wait)956 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
957 {
958 struct video_device *vfd = video_devdata(file);
959 __poll_t res;
960
961 res = vb2_core_poll(q, file, wait);
962
963 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
964 struct v4l2_fh *fh = file->private_data;
965
966 poll_wait(file, &fh->wait, wait);
967 if (v4l2_event_pending(fh))
968 res |= EPOLLPRI;
969 }
970
971 return res;
972 }
973 EXPORT_SYMBOL_GPL(vb2_poll);
974
975 /*
976 * The following functions are not part of the vb2 core API, but are helper
977 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
978 * and struct vb2_ops.
979 * They contain boilerplate code that most if not all drivers have to do
980 * and so they simplify the driver code.
981 */
982
983 /* The queue is busy if there is a owner and you are not that owner. */
vb2_queue_is_busy(struct video_device * vdev,struct file * file)984 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
985 {
986 return vdev->queue->owner && vdev->queue->owner != file->private_data;
987 }
988
989 /* vb2 ioctl helpers */
990
vb2_ioctl_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)991 int vb2_ioctl_reqbufs(struct file *file, void *priv,
992 struct v4l2_requestbuffers *p)
993 {
994 struct video_device *vdev = video_devdata(file);
995 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
996
997 fill_buf_caps(vdev->queue, &p->capabilities);
998 if (res)
999 return res;
1000 if (vb2_queue_is_busy(vdev, file))
1001 return -EBUSY;
1002 res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
1003 /* If count == 0, then the owner has released all buffers and he
1004 is no longer owner of the queue. Otherwise we have a new owner. */
1005 if (res == 0)
1006 vdev->queue->owner = p->count ? file->private_data : NULL;
1007 return res;
1008 }
1009 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
1010
vb2_ioctl_create_bufs(struct file * file,void * priv,struct v4l2_create_buffers * p)1011 int vb2_ioctl_create_bufs(struct file *file, void *priv,
1012 struct v4l2_create_buffers *p)
1013 {
1014 struct video_device *vdev = video_devdata(file);
1015 int res = vb2_verify_memory_type(vdev->queue, p->memory,
1016 p->format.type);
1017
1018 p->index = vdev->queue->num_buffers;
1019 fill_buf_caps(vdev->queue, &p->capabilities);
1020 /*
1021 * If count == 0, then just check if memory and type are valid.
1022 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
1023 */
1024 if (p->count == 0)
1025 return res != -EBUSY ? res : 0;
1026 if (res)
1027 return res;
1028 if (vb2_queue_is_busy(vdev, file))
1029 return -EBUSY;
1030
1031 res = vb2_create_bufs(vdev->queue, p);
1032 if (res == 0)
1033 vdev->queue->owner = file->private_data;
1034 return res;
1035 }
1036 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
1037
vb2_ioctl_prepare_buf(struct file * file,void * priv,struct v4l2_buffer * p)1038 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
1039 struct v4l2_buffer *p)
1040 {
1041 struct video_device *vdev = video_devdata(file);
1042
1043 if (vb2_queue_is_busy(vdev, file))
1044 return -EBUSY;
1045 return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1046 }
1047 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
1048
vb2_ioctl_querybuf(struct file * file,void * priv,struct v4l2_buffer * p)1049 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
1050 {
1051 struct video_device *vdev = video_devdata(file);
1052
1053 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
1054 return vb2_querybuf(vdev->queue, p);
1055 }
1056 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
1057
vb2_ioctl_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)1058 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1059 {
1060 struct video_device *vdev = video_devdata(file);
1061
1062 if (vb2_queue_is_busy(vdev, file))
1063 return -EBUSY;
1064 return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1065 }
1066 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
1067
vb2_ioctl_dqbuf(struct file * file,void * priv,struct v4l2_buffer * p)1068 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
1069 {
1070 struct video_device *vdev = video_devdata(file);
1071
1072 if (vb2_queue_is_busy(vdev, file))
1073 return -EBUSY;
1074 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
1075 }
1076 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
1077
vb2_ioctl_streamon(struct file * file,void * priv,enum v4l2_buf_type i)1078 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
1079 {
1080 struct video_device *vdev = video_devdata(file);
1081
1082 if (vb2_queue_is_busy(vdev, file))
1083 return -EBUSY;
1084 return vb2_streamon(vdev->queue, i);
1085 }
1086 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1087
vb2_ioctl_streamoff(struct file * file,void * priv,enum v4l2_buf_type i)1088 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1089 {
1090 struct video_device *vdev = video_devdata(file);
1091
1092 if (vb2_queue_is_busy(vdev, file))
1093 return -EBUSY;
1094 return vb2_streamoff(vdev->queue, i);
1095 }
1096 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1097
vb2_ioctl_expbuf(struct file * file,void * priv,struct v4l2_exportbuffer * p)1098 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1099 {
1100 struct video_device *vdev = video_devdata(file);
1101
1102 if (vb2_queue_is_busy(vdev, file))
1103 return -EBUSY;
1104 return vb2_expbuf(vdev->queue, p);
1105 }
1106 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1107
1108 /* v4l2_file_operations helpers */
1109
vb2_fop_mmap(struct file * file,struct vm_area_struct * vma)1110 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1111 {
1112 struct video_device *vdev = video_devdata(file);
1113
1114 return vb2_mmap(vdev->queue, vma);
1115 }
1116 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1117
_vb2_fop_release(struct file * file,struct mutex * lock)1118 int _vb2_fop_release(struct file *file, struct mutex *lock)
1119 {
1120 struct video_device *vdev = video_devdata(file);
1121
1122 if (lock)
1123 mutex_lock(lock);
1124 if (file->private_data == vdev->queue->owner) {
1125 vb2_queue_release(vdev->queue);
1126 vdev->queue->owner = NULL;
1127 }
1128 if (lock)
1129 mutex_unlock(lock);
1130 return v4l2_fh_release(file);
1131 }
1132 EXPORT_SYMBOL_GPL(_vb2_fop_release);
1133
vb2_fop_release(struct file * file)1134 int vb2_fop_release(struct file *file)
1135 {
1136 struct video_device *vdev = video_devdata(file);
1137 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1138
1139 return _vb2_fop_release(file, lock);
1140 }
1141 EXPORT_SYMBOL_GPL(vb2_fop_release);
1142
vb2_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1143 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1144 size_t count, loff_t *ppos)
1145 {
1146 struct video_device *vdev = video_devdata(file);
1147 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1148 int err = -EBUSY;
1149
1150 if (!(vdev->queue->io_modes & VB2_WRITE))
1151 return -EINVAL;
1152 if (lock && mutex_lock_interruptible(lock))
1153 return -ERESTARTSYS;
1154 if (vb2_queue_is_busy(vdev, file))
1155 goto exit;
1156 err = vb2_write(vdev->queue, buf, count, ppos,
1157 file->f_flags & O_NONBLOCK);
1158 if (vdev->queue->fileio)
1159 vdev->queue->owner = file->private_data;
1160 exit:
1161 if (lock)
1162 mutex_unlock(lock);
1163 return err;
1164 }
1165 EXPORT_SYMBOL_GPL(vb2_fop_write);
1166
vb2_fop_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1167 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1168 size_t count, loff_t *ppos)
1169 {
1170 struct video_device *vdev = video_devdata(file);
1171 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1172 int err = -EBUSY;
1173
1174 if (!(vdev->queue->io_modes & VB2_READ))
1175 return -EINVAL;
1176 if (lock && mutex_lock_interruptible(lock))
1177 return -ERESTARTSYS;
1178 if (vb2_queue_is_busy(vdev, file))
1179 goto exit;
1180 err = vb2_read(vdev->queue, buf, count, ppos,
1181 file->f_flags & O_NONBLOCK);
1182 if (vdev->queue->fileio)
1183 vdev->queue->owner = file->private_data;
1184 exit:
1185 if (lock)
1186 mutex_unlock(lock);
1187 return err;
1188 }
1189 EXPORT_SYMBOL_GPL(vb2_fop_read);
1190
vb2_fop_poll(struct file * file,poll_table * wait)1191 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1192 {
1193 struct video_device *vdev = video_devdata(file);
1194 struct vb2_queue *q = vdev->queue;
1195 struct mutex *lock = q->lock ? q->lock : vdev->lock;
1196 __poll_t res;
1197 void *fileio;
1198
1199 /*
1200 * If this helper doesn't know how to lock, then you shouldn't be using
1201 * it but you should write your own.
1202 */
1203 WARN_ON(!lock);
1204
1205 if (lock && mutex_lock_interruptible(lock))
1206 return EPOLLERR;
1207
1208 fileio = q->fileio;
1209
1210 res = vb2_poll(vdev->queue, file, wait);
1211
1212 /* If fileio was started, then we have a new queue owner. */
1213 if (!fileio && q->fileio)
1214 q->owner = file->private_data;
1215 if (lock)
1216 mutex_unlock(lock);
1217 return res;
1218 }
1219 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1220
1221 #ifndef CONFIG_MMU
vb2_fop_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1222 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1223 unsigned long len, unsigned long pgoff, unsigned long flags)
1224 {
1225 struct video_device *vdev = video_devdata(file);
1226
1227 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1228 }
1229 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1230 #endif
1231
vb2_video_unregister_device(struct video_device * vdev)1232 void vb2_video_unregister_device(struct video_device *vdev)
1233 {
1234 /* Check if vdev was ever registered at all */
1235 if (!vdev || !video_is_registered(vdev))
1236 return;
1237
1238 /*
1239 * Calling this function only makes sense if vdev->queue is set.
1240 * If it is NULL, then just call video_unregister_device() instead.
1241 */
1242 WARN_ON(!vdev->queue);
1243
1244 /*
1245 * Take a reference to the device since video_unregister_device()
1246 * calls device_unregister(), but we don't want that to release
1247 * the device since we want to clean up the queue first.
1248 */
1249 get_device(&vdev->dev);
1250 video_unregister_device(vdev);
1251 if (vdev->queue && vdev->queue->owner) {
1252 struct mutex *lock = vdev->queue->lock ?
1253 vdev->queue->lock : vdev->lock;
1254
1255 if (lock)
1256 mutex_lock(lock);
1257 vb2_queue_release(vdev->queue);
1258 vdev->queue->owner = NULL;
1259 if (lock)
1260 mutex_unlock(lock);
1261 }
1262 /*
1263 * Now we put the device, and in most cases this will release
1264 * everything.
1265 */
1266 put_device(&vdev->dev);
1267 }
1268 EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
1269
1270 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
1271
vb2_ops_wait_prepare(struct vb2_queue * vq)1272 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1273 {
1274 mutex_unlock(vq->lock);
1275 }
1276 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1277
vb2_ops_wait_finish(struct vb2_queue * vq)1278 void vb2_ops_wait_finish(struct vb2_queue *vq)
1279 {
1280 mutex_lock(vq->lock);
1281 }
1282 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1283
1284 /*
1285 * Note that this function is called during validation time and
1286 * thus the req_queue_mutex is held to ensure no request objects
1287 * can be added or deleted while validating. So there is no need
1288 * to protect the objects list.
1289 */
vb2_request_validate(struct media_request * req)1290 int vb2_request_validate(struct media_request *req)
1291 {
1292 struct media_request_object *obj;
1293 int ret = 0;
1294
1295 if (!vb2_request_buffer_cnt(req))
1296 return -ENOENT;
1297
1298 list_for_each_entry(obj, &req->objects, list) {
1299 if (!obj->ops->prepare)
1300 continue;
1301
1302 ret = obj->ops->prepare(obj);
1303 if (ret)
1304 break;
1305 }
1306
1307 if (ret) {
1308 list_for_each_entry_continue_reverse(obj, &req->objects, list)
1309 if (obj->ops->unprepare)
1310 obj->ops->unprepare(obj);
1311 return ret;
1312 }
1313 return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(vb2_request_validate);
1316
vb2_request_queue(struct media_request * req)1317 void vb2_request_queue(struct media_request *req)
1318 {
1319 struct media_request_object *obj, *obj_safe;
1320
1321 /*
1322 * Queue all objects. Note that buffer objects are at the end of the
1323 * objects list, after all other object types. Once buffer objects
1324 * are queued, the driver might delete them immediately (if the driver
1325 * processes the buffer at once), so we have to use
1326 * list_for_each_entry_safe() to handle the case where the object we
1327 * queue is deleted.
1328 */
1329 list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1330 if (obj->ops->queue)
1331 obj->ops->queue(obj);
1332 }
1333 EXPORT_SYMBOL_GPL(vb2_request_queue);
1334
1335 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1336 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1337 MODULE_LICENSE("GPL");
1338