1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vivid-vid-cap.c - video capture support functions.
4 *
5 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
6 */
7
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/vmalloc.h>
12 #include <linux/videodev2.h>
13 #include <linux/v4l2-dv-timings.h>
14 #include <media/v4l2-common.h>
15 #include <media/v4l2-event.h>
16 #include <media/v4l2-dv-timings.h>
17 #include <media/v4l2-rect.h>
18
19 #include "vivid-core.h"
20 #include "vivid-vid-common.h"
21 #include "vivid-kthread-cap.h"
22 #include "vivid-vid-cap.h"
23
24 static const struct vivid_fmt formats_ovl[] = {
25 {
26 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
27 .vdownsampling = { 1 },
28 .bit_depth = { 16 },
29 .planes = 1,
30 .buffers = 1,
31 },
32 {
33 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
34 .vdownsampling = { 1 },
35 .bit_depth = { 16 },
36 .planes = 1,
37 .buffers = 1,
38 },
39 {
40 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
41 .vdownsampling = { 1 },
42 .bit_depth = { 16 },
43 .planes = 1,
44 .buffers = 1,
45 },
46 };
47
48 /* The number of discrete webcam framesizes */
49 #define VIVID_WEBCAM_SIZES 6
50 /* The number of discrete webcam frameintervals */
51 #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
52
53 /* Sizes must be in increasing order */
54 static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
55 { 320, 180 },
56 { 640, 360 },
57 { 640, 480 },
58 { 1280, 720 },
59 { 1920, 1080 },
60 { 3840, 2160 },
61 };
62
63 /*
64 * Intervals must be in increasing order and there must be twice as many
65 * elements in this array as there are in webcam_sizes.
66 */
67 static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
68 { 1, 1 },
69 { 1, 2 },
70 { 1, 4 },
71 { 1, 5 },
72 { 1, 10 },
73 { 2, 25 },
74 { 1, 15 },
75 { 1, 25 },
76 { 1, 30 },
77 { 1, 40 },
78 { 1, 50 },
79 { 1, 60 },
80 };
81
vid_cap_queue_setup(struct vb2_queue * vq,unsigned * nbuffers,unsigned * nplanes,unsigned sizes[],struct device * alloc_devs[])82 static int vid_cap_queue_setup(struct vb2_queue *vq,
83 unsigned *nbuffers, unsigned *nplanes,
84 unsigned sizes[], struct device *alloc_devs[])
85 {
86 struct vivid_dev *dev = vb2_get_drv_priv(vq);
87 unsigned buffers = tpg_g_buffers(&dev->tpg);
88 unsigned h = dev->fmt_cap_rect.height;
89 unsigned p;
90
91 if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
92 /*
93 * You cannot use read() with FIELD_ALTERNATE since the field
94 * information (TOP/BOTTOM) cannot be passed back to the user.
95 */
96 if (vb2_fileio_is_active(vq))
97 return -EINVAL;
98 }
99
100 if (dev->queue_setup_error) {
101 /*
102 * Error injection: test what happens if queue_setup() returns
103 * an error.
104 */
105 dev->queue_setup_error = false;
106 return -EINVAL;
107 }
108 if (*nplanes) {
109 /*
110 * Check if the number of requested planes match
111 * the number of buffers in the current format. You can't mix that.
112 */
113 if (*nplanes != buffers)
114 return -EINVAL;
115 for (p = 0; p < buffers; p++) {
116 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
117 dev->fmt_cap->data_offset[p])
118 return -EINVAL;
119 }
120 } else {
121 for (p = 0; p < buffers; p++)
122 sizes[p] = (tpg_g_line_width(&dev->tpg, p) * h) /
123 dev->fmt_cap->vdownsampling[p] +
124 dev->fmt_cap->data_offset[p];
125 }
126
127 if (vq->num_buffers + *nbuffers < 2)
128 *nbuffers = 2 - vq->num_buffers;
129
130 *nplanes = buffers;
131
132 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
133 for (p = 0; p < buffers; p++)
134 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
135
136 return 0;
137 }
138
vid_cap_buf_prepare(struct vb2_buffer * vb)139 static int vid_cap_buf_prepare(struct vb2_buffer *vb)
140 {
141 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
142 unsigned long size;
143 unsigned buffers = tpg_g_buffers(&dev->tpg);
144 unsigned p;
145
146 dprintk(dev, 1, "%s\n", __func__);
147
148 if (WARN_ON(NULL == dev->fmt_cap))
149 return -EINVAL;
150
151 if (dev->buf_prepare_error) {
152 /*
153 * Error injection: test what happens if buf_prepare() returns
154 * an error.
155 */
156 dev->buf_prepare_error = false;
157 return -EINVAL;
158 }
159 for (p = 0; p < buffers; p++) {
160 size = (tpg_g_line_width(&dev->tpg, p) *
161 dev->fmt_cap_rect.height) /
162 dev->fmt_cap->vdownsampling[p] +
163 dev->fmt_cap->data_offset[p];
164
165 if (vb2_plane_size(vb, p) < size) {
166 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
167 __func__, p, vb2_plane_size(vb, p), size);
168 return -EINVAL;
169 }
170
171 vb2_set_plane_payload(vb, p, size);
172 vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
173 }
174
175 return 0;
176 }
177
vid_cap_buf_finish(struct vb2_buffer * vb)178 static void vid_cap_buf_finish(struct vb2_buffer *vb)
179 {
180 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
181 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
182 struct v4l2_timecode *tc = &vbuf->timecode;
183 unsigned fps = 25;
184 unsigned seq = vbuf->sequence;
185
186 if (!vivid_is_sdtv_cap(dev))
187 return;
188
189 /*
190 * Set the timecode. Rarely used, so it is interesting to
191 * test this.
192 */
193 vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
194 if (dev->std_cap[dev->input] & V4L2_STD_525_60)
195 fps = 30;
196 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
197 tc->flags = 0;
198 tc->frames = seq % fps;
199 tc->seconds = (seq / fps) % 60;
200 tc->minutes = (seq / (60 * fps)) % 60;
201 tc->hours = (seq / (60 * 60 * fps)) % 24;
202 }
203
vid_cap_buf_queue(struct vb2_buffer * vb)204 static void vid_cap_buf_queue(struct vb2_buffer *vb)
205 {
206 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
207 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
208 struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
209
210 dprintk(dev, 1, "%s\n", __func__);
211
212 spin_lock(&dev->slock);
213 list_add_tail(&buf->list, &dev->vid_cap_active);
214 spin_unlock(&dev->slock);
215 }
216
vid_cap_start_streaming(struct vb2_queue * vq,unsigned count)217 static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
218 {
219 struct vivid_dev *dev = vb2_get_drv_priv(vq);
220 unsigned i;
221 int err;
222
223 if (vb2_is_streaming(&dev->vb_vid_out_q))
224 dev->can_loop_video = vivid_vid_can_loop(dev);
225
226 dev->vid_cap_seq_count = 0;
227 dprintk(dev, 1, "%s\n", __func__);
228 for (i = 0; i < VIDEO_MAX_FRAME; i++)
229 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
230 if (dev->start_streaming_error) {
231 dev->start_streaming_error = false;
232 err = -EINVAL;
233 } else {
234 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
235 }
236 if (err) {
237 struct vivid_buffer *buf, *tmp;
238
239 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
240 list_del(&buf->list);
241 vb2_buffer_done(&buf->vb.vb2_buf,
242 VB2_BUF_STATE_QUEUED);
243 }
244 }
245 return err;
246 }
247
248 /* abort streaming and wait for last buffer */
vid_cap_stop_streaming(struct vb2_queue * vq)249 static void vid_cap_stop_streaming(struct vb2_queue *vq)
250 {
251 struct vivid_dev *dev = vb2_get_drv_priv(vq);
252
253 dprintk(dev, 1, "%s\n", __func__);
254 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
255 dev->can_loop_video = false;
256 }
257
vid_cap_buf_request_complete(struct vb2_buffer * vb)258 static void vid_cap_buf_request_complete(struct vb2_buffer *vb)
259 {
260 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
261
262 v4l2_ctrl_request_complete(vb->req_obj.req, &dev->ctrl_hdl_vid_cap);
263 }
264
265 const struct vb2_ops vivid_vid_cap_qops = {
266 .queue_setup = vid_cap_queue_setup,
267 .buf_prepare = vid_cap_buf_prepare,
268 .buf_finish = vid_cap_buf_finish,
269 .buf_queue = vid_cap_buf_queue,
270 .start_streaming = vid_cap_start_streaming,
271 .stop_streaming = vid_cap_stop_streaming,
272 .buf_request_complete = vid_cap_buf_request_complete,
273 .wait_prepare = vb2_ops_wait_prepare,
274 .wait_finish = vb2_ops_wait_finish,
275 };
276
277 /*
278 * Determine the 'picture' quality based on the current TV frequency: either
279 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
280 * signal or NOISE for no signal.
281 */
vivid_update_quality(struct vivid_dev * dev)282 void vivid_update_quality(struct vivid_dev *dev)
283 {
284 unsigned freq_modulus;
285
286 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
287 /*
288 * The 'noise' will only be replaced by the actual video
289 * if the output video matches the input video settings.
290 */
291 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
292 return;
293 }
294 if (vivid_is_hdmi_cap(dev) &&
295 VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])) {
296 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
297 return;
298 }
299 if (vivid_is_sdtv_cap(dev) &&
300 VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) {
301 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
302 return;
303 }
304 if (!vivid_is_tv_cap(dev)) {
305 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
306 return;
307 }
308
309 /*
310 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
311 * From +/- 0.25 MHz around the channel there is color, and from
312 * +/- 1 MHz there is grayscale (chroma is lost).
313 * Everywhere else it is just noise.
314 */
315 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
316 if (freq_modulus > 2 * 16) {
317 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
318 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
319 return;
320 }
321 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
322 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
323 else
324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
325 }
326
327 /*
328 * Get the current picture quality and the associated afc value.
329 */
vivid_get_quality(struct vivid_dev * dev,s32 * afc)330 static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
331 {
332 unsigned freq_modulus;
333
334 if (afc)
335 *afc = 0;
336 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
337 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
338 return tpg_g_quality(&dev->tpg);
339
340 /*
341 * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
342 * From +/- 0.25 MHz around the channel there is color, and from
343 * +/- 1 MHz there is grayscale (chroma is lost).
344 * Everywhere else it is just gray.
345 */
346 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
347 if (afc)
348 *afc = freq_modulus - 1 * 16;
349 return TPG_QUAL_GRAY;
350 }
351
vivid_get_video_aspect(const struct vivid_dev * dev)352 enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
353 {
354 if (vivid_is_sdtv_cap(dev))
355 return dev->std_aspect_ratio[dev->input];
356
357 if (vivid_is_hdmi_cap(dev))
358 return dev->dv_timings_aspect_ratio[dev->input];
359
360 return TPG_VIDEO_ASPECT_IMAGE;
361 }
362
vivid_get_pixel_aspect(const struct vivid_dev * dev)363 static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
364 {
365 if (vivid_is_sdtv_cap(dev))
366 return (dev->std_cap[dev->input] & V4L2_STD_525_60) ?
367 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
368
369 if (vivid_is_hdmi_cap(dev) &&
370 dev->src_rect.width == 720 && dev->src_rect.height <= 576)
371 return dev->src_rect.height == 480 ?
372 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
373
374 return TPG_PIXEL_ASPECT_SQUARE;
375 }
376
377 /*
378 * Called whenever the format has to be reset which can occur when
379 * changing inputs, standard, timings, etc.
380 */
vivid_update_format_cap(struct vivid_dev * dev,bool keep_controls)381 void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
382 {
383 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
384 unsigned size;
385 u64 pixelclock;
386
387 switch (dev->input_type[dev->input]) {
388 case WEBCAM:
389 default:
390 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
391 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
392 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
393 dev->field_cap = V4L2_FIELD_NONE;
394 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
395 break;
396 case TV:
397 case SVID:
398 dev->field_cap = dev->tv_field_cap;
399 dev->src_rect.width = 720;
400 if (dev->std_cap[dev->input] & V4L2_STD_525_60) {
401 dev->src_rect.height = 480;
402 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
403 dev->service_set_cap = V4L2_SLICED_CAPTION_525;
404 } else {
405 dev->src_rect.height = 576;
406 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
407 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
408 }
409 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
410 break;
411 case HDMI:
412 dev->src_rect.width = bt->width;
413 dev->src_rect.height = bt->height;
414 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
415 if (dev->reduced_fps && can_reduce_fps(bt)) {
416 pixelclock = div_u64(bt->pixelclock * 1000, 1001);
417 bt->flags |= V4L2_DV_FL_REDUCED_FPS;
418 } else {
419 pixelclock = bt->pixelclock;
420 bt->flags &= ~V4L2_DV_FL_REDUCED_FPS;
421 }
422 dev->timeperframe_vid_cap = (struct v4l2_fract) {
423 size / 100, (u32)pixelclock / 100
424 };
425 if (bt->interlaced)
426 dev->field_cap = V4L2_FIELD_ALTERNATE;
427 else
428 dev->field_cap = V4L2_FIELD_NONE;
429
430 /*
431 * We can be called from within s_ctrl, in that case we can't
432 * set/get controls. Luckily we don't need to in that case.
433 */
434 if (keep_controls || !dev->colorspace)
435 break;
436 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
437 if (bt->width == 720 && bt->height <= 576)
438 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
439 else
440 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
441 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
442 } else {
443 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
444 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
445 }
446 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
447 break;
448 }
449 vfree(dev->bitmap_cap);
450 dev->bitmap_cap = NULL;
451 vivid_update_quality(dev);
452 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
453 dev->crop_cap = dev->src_rect;
454 dev->crop_bounds_cap = dev->src_rect;
455 dev->compose_cap = dev->crop_cap;
456 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
457 dev->compose_cap.height /= 2;
458 dev->fmt_cap_rect = dev->compose_cap;
459 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
460 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
461 tpg_update_mv_step(&dev->tpg);
462 }
463
464 /* Map the field to something that is valid for the current input */
vivid_field_cap(struct vivid_dev * dev,enum v4l2_field field)465 static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
466 {
467 if (vivid_is_sdtv_cap(dev)) {
468 switch (field) {
469 case V4L2_FIELD_INTERLACED_TB:
470 case V4L2_FIELD_INTERLACED_BT:
471 case V4L2_FIELD_SEQ_TB:
472 case V4L2_FIELD_SEQ_BT:
473 case V4L2_FIELD_TOP:
474 case V4L2_FIELD_BOTTOM:
475 case V4L2_FIELD_ALTERNATE:
476 return field;
477 case V4L2_FIELD_INTERLACED:
478 default:
479 return V4L2_FIELD_INTERLACED;
480 }
481 }
482 if (vivid_is_hdmi_cap(dev))
483 return dev->dv_timings_cap[dev->input].bt.interlaced ?
484 V4L2_FIELD_ALTERNATE : V4L2_FIELD_NONE;
485 return V4L2_FIELD_NONE;
486 }
487
vivid_colorspace_cap(struct vivid_dev * dev)488 static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
489 {
490 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
491 return tpg_g_colorspace(&dev->tpg);
492 return dev->colorspace_out;
493 }
494
vivid_xfer_func_cap(struct vivid_dev * dev)495 static unsigned vivid_xfer_func_cap(struct vivid_dev *dev)
496 {
497 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
498 return tpg_g_xfer_func(&dev->tpg);
499 return dev->xfer_func_out;
500 }
501
vivid_ycbcr_enc_cap(struct vivid_dev * dev)502 static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
503 {
504 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
505 return tpg_g_ycbcr_enc(&dev->tpg);
506 return dev->ycbcr_enc_out;
507 }
508
vivid_hsv_enc_cap(struct vivid_dev * dev)509 static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
510 {
511 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
512 return tpg_g_hsv_enc(&dev->tpg);
513 return dev->hsv_enc_out;
514 }
515
vivid_quantization_cap(struct vivid_dev * dev)516 static unsigned vivid_quantization_cap(struct vivid_dev *dev)
517 {
518 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
519 return tpg_g_quantization(&dev->tpg);
520 return dev->quantization_out;
521 }
522
vivid_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)523 int vivid_g_fmt_vid_cap(struct file *file, void *priv,
524 struct v4l2_format *f)
525 {
526 struct vivid_dev *dev = video_drvdata(file);
527 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
528 unsigned p;
529
530 mp->width = dev->fmt_cap_rect.width;
531 mp->height = dev->fmt_cap_rect.height;
532 mp->field = dev->field_cap;
533 mp->pixelformat = dev->fmt_cap->fourcc;
534 mp->colorspace = vivid_colorspace_cap(dev);
535 mp->xfer_func = vivid_xfer_func_cap(dev);
536 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
537 mp->hsv_enc = vivid_hsv_enc_cap(dev);
538 else
539 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
540 mp->quantization = vivid_quantization_cap(dev);
541 mp->num_planes = dev->fmt_cap->buffers;
542 for (p = 0; p < mp->num_planes; p++) {
543 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
544 mp->plane_fmt[p].sizeimage =
545 (tpg_g_line_width(&dev->tpg, p) * mp->height) /
546 dev->fmt_cap->vdownsampling[p] +
547 dev->fmt_cap->data_offset[p];
548 }
549 return 0;
550 }
551
vivid_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)552 int vivid_try_fmt_vid_cap(struct file *file, void *priv,
553 struct v4l2_format *f)
554 {
555 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
556 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
557 struct vivid_dev *dev = video_drvdata(file);
558 const struct vivid_fmt *fmt;
559 unsigned bytesperline, max_bpl;
560 unsigned factor = 1;
561 unsigned w, h;
562 unsigned p;
563 bool user_set_csc = !!(mp->flags & V4L2_PIX_FMT_FLAG_SET_CSC);
564
565 fmt = vivid_get_format(dev, mp->pixelformat);
566 if (!fmt) {
567 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
568 mp->pixelformat);
569 mp->pixelformat = V4L2_PIX_FMT_YUYV;
570 fmt = vivid_get_format(dev, mp->pixelformat);
571 }
572
573 mp->field = vivid_field_cap(dev, mp->field);
574 if (vivid_is_webcam(dev)) {
575 const struct v4l2_frmsize_discrete *sz =
576 v4l2_find_nearest_size(webcam_sizes,
577 VIVID_WEBCAM_SIZES, width,
578 height, mp->width, mp->height);
579
580 w = sz->width;
581 h = sz->height;
582 } else if (vivid_is_sdtv_cap(dev)) {
583 w = 720;
584 h = (dev->std_cap[dev->input] & V4L2_STD_525_60) ? 480 : 576;
585 } else {
586 w = dev->src_rect.width;
587 h = dev->src_rect.height;
588 }
589 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
590 factor = 2;
591 if (vivid_is_webcam(dev) ||
592 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
593 mp->width = w;
594 mp->height = h / factor;
595 } else {
596 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
597
598 v4l2_rect_set_min_size(&r, &vivid_min_rect);
599 v4l2_rect_set_max_size(&r, &vivid_max_rect);
600 if (dev->has_scaler_cap && !dev->has_compose_cap) {
601 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
602
603 v4l2_rect_set_max_size(&r, &max_r);
604 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
605 v4l2_rect_set_max_size(&r, &dev->src_rect);
606 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
607 v4l2_rect_set_min_size(&r, &dev->src_rect);
608 }
609 mp->width = r.width;
610 mp->height = r.height / factor;
611 }
612
613 /* This driver supports custom bytesperline values */
614
615 mp->num_planes = fmt->buffers;
616 for (p = 0; p < fmt->buffers; p++) {
617 /* Calculate the minimum supported bytesperline value */
618 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
619 /* Calculate the maximum supported bytesperline value */
620 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
621
622 if (pfmt[p].bytesperline > max_bpl)
623 pfmt[p].bytesperline = max_bpl;
624 if (pfmt[p].bytesperline < bytesperline)
625 pfmt[p].bytesperline = bytesperline;
626
627 pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
628 fmt->vdownsampling[p] + fmt->data_offset[p];
629
630 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
631 }
632 for (p = fmt->buffers; p < fmt->planes; p++)
633 pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
634 (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
635 (fmt->bit_depth[0] / fmt->vdownsampling[0]);
636
637 if (!user_set_csc || !v4l2_is_colorspace_valid(mp->colorspace))
638 mp->colorspace = vivid_colorspace_cap(dev);
639
640 if (!user_set_csc || !v4l2_is_xfer_func_valid(mp->xfer_func))
641 mp->xfer_func = vivid_xfer_func_cap(dev);
642
643 if (fmt->color_enc == TGP_COLOR_ENC_HSV) {
644 if (!user_set_csc || !v4l2_is_hsv_enc_valid(mp->hsv_enc))
645 mp->hsv_enc = vivid_hsv_enc_cap(dev);
646 } else if (fmt->color_enc == TGP_COLOR_ENC_YCBCR) {
647 if (!user_set_csc || !v4l2_is_ycbcr_enc_valid(mp->ycbcr_enc))
648 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
649 } else {
650 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
651 }
652
653 if (fmt->color_enc == TGP_COLOR_ENC_YCBCR ||
654 fmt->color_enc == TGP_COLOR_ENC_RGB) {
655 if (!user_set_csc || !v4l2_is_quant_valid(mp->quantization))
656 mp->quantization = vivid_quantization_cap(dev);
657 } else {
658 mp->quantization = vivid_quantization_cap(dev);
659 }
660
661 memset(mp->reserved, 0, sizeof(mp->reserved));
662 return 0;
663 }
664
vivid_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)665 int vivid_s_fmt_vid_cap(struct file *file, void *priv,
666 struct v4l2_format *f)
667 {
668 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
669 struct vivid_dev *dev = video_drvdata(file);
670 struct v4l2_rect *crop = &dev->crop_cap;
671 struct v4l2_rect *compose = &dev->compose_cap;
672 struct vb2_queue *q = &dev->vb_vid_cap_q;
673 int ret = vivid_try_fmt_vid_cap(file, priv, f);
674 unsigned factor = 1;
675 unsigned p;
676 unsigned i;
677
678 if (ret < 0)
679 return ret;
680
681 if (vb2_is_busy(q)) {
682 dprintk(dev, 1, "%s device busy\n", __func__);
683 return -EBUSY;
684 }
685
686 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
687 dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
688 return -EBUSY;
689 }
690
691 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
692 if (V4L2_FIELD_HAS_T_OR_B(mp->field))
693 factor = 2;
694
695 /* Note: the webcam input doesn't support scaling, cropping or composing */
696
697 if (!vivid_is_webcam(dev) &&
698 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
699 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
700
701 if (dev->has_scaler_cap) {
702 if (dev->has_compose_cap)
703 v4l2_rect_map_inside(compose, &r);
704 else
705 *compose = r;
706 if (dev->has_crop_cap && !dev->has_compose_cap) {
707 struct v4l2_rect min_r = {
708 0, 0,
709 r.width / MAX_ZOOM,
710 factor * r.height / MAX_ZOOM
711 };
712 struct v4l2_rect max_r = {
713 0, 0,
714 r.width * MAX_ZOOM,
715 factor * r.height * MAX_ZOOM
716 };
717
718 v4l2_rect_set_min_size(crop, &min_r);
719 v4l2_rect_set_max_size(crop, &max_r);
720 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
721 } else if (dev->has_crop_cap) {
722 struct v4l2_rect min_r = {
723 0, 0,
724 compose->width / MAX_ZOOM,
725 factor * compose->height / MAX_ZOOM
726 };
727 struct v4l2_rect max_r = {
728 0, 0,
729 compose->width * MAX_ZOOM,
730 factor * compose->height * MAX_ZOOM
731 };
732
733 v4l2_rect_set_min_size(crop, &min_r);
734 v4l2_rect_set_max_size(crop, &max_r);
735 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
736 }
737 } else if (dev->has_crop_cap && !dev->has_compose_cap) {
738 r.height *= factor;
739 v4l2_rect_set_size_to(crop, &r);
740 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
741 r = *crop;
742 r.height /= factor;
743 v4l2_rect_set_size_to(compose, &r);
744 } else if (!dev->has_crop_cap) {
745 v4l2_rect_map_inside(compose, &r);
746 } else {
747 r.height *= factor;
748 v4l2_rect_set_max_size(crop, &r);
749 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
750 compose->top *= factor;
751 compose->height *= factor;
752 v4l2_rect_set_size_to(compose, crop);
753 v4l2_rect_map_inside(compose, &r);
754 compose->top /= factor;
755 compose->height /= factor;
756 }
757 } else if (vivid_is_webcam(dev)) {
758 /* Guaranteed to be a match */
759 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
760 if (webcam_sizes[i].width == mp->width &&
761 webcam_sizes[i].height == mp->height)
762 break;
763 dev->webcam_size_idx = i;
764 if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
765 dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
766 vivid_update_format_cap(dev, false);
767 } else {
768 struct v4l2_rect r = { 0, 0, mp->width, mp->height };
769
770 v4l2_rect_set_size_to(compose, &r);
771 r.height *= factor;
772 v4l2_rect_set_size_to(crop, &r);
773 }
774
775 dev->fmt_cap_rect.width = mp->width;
776 dev->fmt_cap_rect.height = mp->height;
777 tpg_s_buf_height(&dev->tpg, mp->height);
778 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
779 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
780 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
781 dev->field_cap = mp->field;
782 if (dev->field_cap == V4L2_FIELD_ALTERNATE)
783 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
784 else
785 tpg_s_field(&dev->tpg, dev->field_cap, false);
786 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
787 if (vivid_is_sdtv_cap(dev))
788 dev->tv_field_cap = mp->field;
789 tpg_update_mv_step(&dev->tpg);
790 dev->tpg.colorspace = mp->colorspace;
791 dev->tpg.xfer_func = mp->xfer_func;
792 if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_YCBCR)
793 dev->tpg.ycbcr_enc = mp->ycbcr_enc;
794 else
795 dev->tpg.hsv_enc = mp->hsv_enc;
796 dev->tpg.quantization = mp->quantization;
797
798 return 0;
799 }
800
vidioc_g_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)801 int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
802 struct v4l2_format *f)
803 {
804 struct vivid_dev *dev = video_drvdata(file);
805
806 if (!dev->multiplanar)
807 return -ENOTTY;
808 return vivid_g_fmt_vid_cap(file, priv, f);
809 }
810
vidioc_try_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)811 int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
812 struct v4l2_format *f)
813 {
814 struct vivid_dev *dev = video_drvdata(file);
815
816 if (!dev->multiplanar)
817 return -ENOTTY;
818 return vivid_try_fmt_vid_cap(file, priv, f);
819 }
820
vidioc_s_fmt_vid_cap_mplane(struct file * file,void * priv,struct v4l2_format * f)821 int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
822 struct v4l2_format *f)
823 {
824 struct vivid_dev *dev = video_drvdata(file);
825
826 if (!dev->multiplanar)
827 return -ENOTTY;
828 return vivid_s_fmt_vid_cap(file, priv, f);
829 }
830
vidioc_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)831 int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
832 struct v4l2_format *f)
833 {
834 struct vivid_dev *dev = video_drvdata(file);
835
836 if (dev->multiplanar)
837 return -ENOTTY;
838 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
839 }
840
vidioc_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)841 int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
842 struct v4l2_format *f)
843 {
844 struct vivid_dev *dev = video_drvdata(file);
845
846 if (dev->multiplanar)
847 return -ENOTTY;
848 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
849 }
850
vidioc_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)851 int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
852 struct v4l2_format *f)
853 {
854 struct vivid_dev *dev = video_drvdata(file);
855
856 if (dev->multiplanar)
857 return -ENOTTY;
858 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
859 }
860
vivid_vid_cap_g_selection(struct file * file,void * priv,struct v4l2_selection * sel)861 int vivid_vid_cap_g_selection(struct file *file, void *priv,
862 struct v4l2_selection *sel)
863 {
864 struct vivid_dev *dev = video_drvdata(file);
865
866 if (!dev->has_crop_cap && !dev->has_compose_cap)
867 return -ENOTTY;
868 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
869 return -EINVAL;
870 if (vivid_is_webcam(dev))
871 return -ENODATA;
872
873 sel->r.left = sel->r.top = 0;
874 switch (sel->target) {
875 case V4L2_SEL_TGT_CROP:
876 if (!dev->has_crop_cap)
877 return -EINVAL;
878 sel->r = dev->crop_cap;
879 break;
880 case V4L2_SEL_TGT_CROP_DEFAULT:
881 case V4L2_SEL_TGT_CROP_BOUNDS:
882 if (!dev->has_crop_cap)
883 return -EINVAL;
884 sel->r = dev->src_rect;
885 break;
886 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
887 if (!dev->has_compose_cap)
888 return -EINVAL;
889 sel->r = vivid_max_rect;
890 break;
891 case V4L2_SEL_TGT_COMPOSE:
892 if (!dev->has_compose_cap)
893 return -EINVAL;
894 sel->r = dev->compose_cap;
895 break;
896 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
897 if (!dev->has_compose_cap)
898 return -EINVAL;
899 sel->r = dev->fmt_cap_rect;
900 break;
901 default:
902 return -EINVAL;
903 }
904 return 0;
905 }
906
vivid_vid_cap_s_selection(struct file * file,void * fh,struct v4l2_selection * s)907 int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
908 {
909 struct vivid_dev *dev = video_drvdata(file);
910 struct v4l2_rect *crop = &dev->crop_cap;
911 struct v4l2_rect *compose = &dev->compose_cap;
912 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
913 int ret;
914
915 if (!dev->has_crop_cap && !dev->has_compose_cap)
916 return -ENOTTY;
917 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
918 return -EINVAL;
919 if (vivid_is_webcam(dev))
920 return -ENODATA;
921
922 switch (s->target) {
923 case V4L2_SEL_TGT_CROP:
924 if (!dev->has_crop_cap)
925 return -EINVAL;
926 ret = vivid_vid_adjust_sel(s->flags, &s->r);
927 if (ret)
928 return ret;
929 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
930 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
931 v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
932 s->r.top /= factor;
933 s->r.height /= factor;
934 if (dev->has_scaler_cap) {
935 struct v4l2_rect fmt = dev->fmt_cap_rect;
936 struct v4l2_rect max_rect = {
937 0, 0,
938 s->r.width * MAX_ZOOM,
939 s->r.height * MAX_ZOOM
940 };
941 struct v4l2_rect min_rect = {
942 0, 0,
943 s->r.width / MAX_ZOOM,
944 s->r.height / MAX_ZOOM
945 };
946
947 v4l2_rect_set_min_size(&fmt, &min_rect);
948 if (!dev->has_compose_cap)
949 v4l2_rect_set_max_size(&fmt, &max_rect);
950 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
951 vb2_is_busy(&dev->vb_vid_cap_q))
952 return -EBUSY;
953 if (dev->has_compose_cap) {
954 v4l2_rect_set_min_size(compose, &min_rect);
955 v4l2_rect_set_max_size(compose, &max_rect);
956 }
957 dev->fmt_cap_rect = fmt;
958 tpg_s_buf_height(&dev->tpg, fmt.height);
959 } else if (dev->has_compose_cap) {
960 struct v4l2_rect fmt = dev->fmt_cap_rect;
961
962 v4l2_rect_set_min_size(&fmt, &s->r);
963 if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
964 vb2_is_busy(&dev->vb_vid_cap_q))
965 return -EBUSY;
966 dev->fmt_cap_rect = fmt;
967 tpg_s_buf_height(&dev->tpg, fmt.height);
968 v4l2_rect_set_size_to(compose, &s->r);
969 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
970 } else {
971 if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
972 vb2_is_busy(&dev->vb_vid_cap_q))
973 return -EBUSY;
974 v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
975 v4l2_rect_set_size_to(compose, &s->r);
976 v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
977 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
978 }
979 s->r.top *= factor;
980 s->r.height *= factor;
981 *crop = s->r;
982 break;
983 case V4L2_SEL_TGT_COMPOSE:
984 if (!dev->has_compose_cap)
985 return -EINVAL;
986 ret = vivid_vid_adjust_sel(s->flags, &s->r);
987 if (ret)
988 return ret;
989 v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
990 v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
991 if (dev->has_scaler_cap) {
992 struct v4l2_rect max_rect = {
993 0, 0,
994 dev->src_rect.width * MAX_ZOOM,
995 (dev->src_rect.height / factor) * MAX_ZOOM
996 };
997
998 v4l2_rect_set_max_size(&s->r, &max_rect);
999 if (dev->has_crop_cap) {
1000 struct v4l2_rect min_rect = {
1001 0, 0,
1002 s->r.width / MAX_ZOOM,
1003 (s->r.height * factor) / MAX_ZOOM
1004 };
1005 struct v4l2_rect max_rect = {
1006 0, 0,
1007 s->r.width * MAX_ZOOM,
1008 (s->r.height * factor) * MAX_ZOOM
1009 };
1010
1011 v4l2_rect_set_min_size(crop, &min_rect);
1012 v4l2_rect_set_max_size(crop, &max_rect);
1013 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
1014 }
1015 } else if (dev->has_crop_cap) {
1016 s->r.top *= factor;
1017 s->r.height *= factor;
1018 v4l2_rect_set_max_size(&s->r, &dev->src_rect);
1019 v4l2_rect_set_size_to(crop, &s->r);
1020 v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
1021 s->r.top /= factor;
1022 s->r.height /= factor;
1023 } else {
1024 v4l2_rect_set_size_to(&s->r, &dev->src_rect);
1025 s->r.height /= factor;
1026 }
1027 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
1028 if (dev->bitmap_cap && (compose->width != s->r.width ||
1029 compose->height != s->r.height)) {
1030 vfree(dev->bitmap_cap);
1031 dev->bitmap_cap = NULL;
1032 }
1033 *compose = s->r;
1034 break;
1035 default:
1036 return -EINVAL;
1037 }
1038
1039 tpg_s_crop_compose(&dev->tpg, crop, compose);
1040 return 0;
1041 }
1042
vivid_vid_cap_g_pixelaspect(struct file * file,void * priv,int type,struct v4l2_fract * f)1043 int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv,
1044 int type, struct v4l2_fract *f)
1045 {
1046 struct vivid_dev *dev = video_drvdata(file);
1047
1048 if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1049 return -EINVAL;
1050
1051 switch (vivid_get_pixel_aspect(dev)) {
1052 case TPG_PIXEL_ASPECT_NTSC:
1053 f->numerator = 11;
1054 f->denominator = 10;
1055 break;
1056 case TPG_PIXEL_ASPECT_PAL:
1057 f->numerator = 54;
1058 f->denominator = 59;
1059 break;
1060 default:
1061 break;
1062 }
1063 return 0;
1064 }
1065
vidioc_enum_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_fmtdesc * f)1066 int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
1067 struct v4l2_fmtdesc *f)
1068 {
1069 struct vivid_dev *dev = video_drvdata(file);
1070 const struct vivid_fmt *fmt;
1071
1072 if (dev->multiplanar)
1073 return -ENOTTY;
1074
1075 if (f->index >= ARRAY_SIZE(formats_ovl))
1076 return -EINVAL;
1077
1078 fmt = &formats_ovl[f->index];
1079
1080 f->pixelformat = fmt->fourcc;
1081 return 0;
1082 }
1083
vidioc_g_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1084 int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
1085 struct v4l2_format *f)
1086 {
1087 struct vivid_dev *dev = video_drvdata(file);
1088 const struct v4l2_rect *compose = &dev->compose_cap;
1089 struct v4l2_window *win = &f->fmt.win;
1090 unsigned clipcount = win->clipcount;
1091
1092 if (dev->multiplanar)
1093 return -ENOTTY;
1094
1095 win->w.top = dev->overlay_cap_top;
1096 win->w.left = dev->overlay_cap_left;
1097 win->w.width = compose->width;
1098 win->w.height = compose->height;
1099 win->field = dev->overlay_cap_field;
1100 win->clipcount = dev->clipcount_cap;
1101 if (clipcount > dev->clipcount_cap)
1102 clipcount = dev->clipcount_cap;
1103 if (dev->bitmap_cap == NULL)
1104 win->bitmap = NULL;
1105 else if (win->bitmap) {
1106 if (copy_to_user(win->bitmap, dev->bitmap_cap,
1107 ((compose->width + 7) / 8) * compose->height))
1108 return -EFAULT;
1109 }
1110 if (clipcount && win->clips) {
1111 if (copy_to_user(win->clips, dev->clips_cap,
1112 clipcount * sizeof(dev->clips_cap[0])))
1113 return -EFAULT;
1114 }
1115 return 0;
1116 }
1117
vidioc_try_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1118 int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
1119 struct v4l2_format *f)
1120 {
1121 struct vivid_dev *dev = video_drvdata(file);
1122 const struct v4l2_rect *compose = &dev->compose_cap;
1123 struct v4l2_window *win = &f->fmt.win;
1124 int i, j;
1125
1126 if (dev->multiplanar)
1127 return -ENOTTY;
1128
1129 win->w.left = clamp_t(int, win->w.left,
1130 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1131 win->w.top = clamp_t(int, win->w.top,
1132 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1133 win->w.width = compose->width;
1134 win->w.height = compose->height;
1135 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
1136 win->field = V4L2_FIELD_ANY;
1137 win->chromakey = 0;
1138 win->global_alpha = 0;
1139 if (win->clipcount && !win->clips)
1140 win->clipcount = 0;
1141 if (win->clipcount > MAX_CLIPS)
1142 win->clipcount = MAX_CLIPS;
1143 if (win->clipcount) {
1144 if (copy_from_user(dev->try_clips_cap, win->clips,
1145 win->clipcount * sizeof(dev->clips_cap[0])))
1146 return -EFAULT;
1147 for (i = 0; i < win->clipcount; i++) {
1148 struct v4l2_rect *r = &dev->try_clips_cap[i].c;
1149
1150 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
1151 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
1152 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
1153 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
1154 }
1155 /*
1156 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
1157 * number and it's typically a one-time deal.
1158 */
1159 for (i = 0; i < win->clipcount - 1; i++) {
1160 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
1161
1162 for (j = i + 1; j < win->clipcount; j++) {
1163 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
1164
1165 if (v4l2_rect_overlap(r1, r2))
1166 return -EINVAL;
1167 }
1168 }
1169 if (copy_to_user(win->clips, dev->try_clips_cap,
1170 win->clipcount * sizeof(dev->clips_cap[0])))
1171 return -EFAULT;
1172 }
1173 return 0;
1174 }
1175
vidioc_s_fmt_vid_overlay(struct file * file,void * priv,struct v4l2_format * f)1176 int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
1177 struct v4l2_format *f)
1178 {
1179 struct vivid_dev *dev = video_drvdata(file);
1180 const struct v4l2_rect *compose = &dev->compose_cap;
1181 struct v4l2_window *win = &f->fmt.win;
1182 int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
1183 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
1184 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
1185 void *new_bitmap = NULL;
1186
1187 if (ret)
1188 return ret;
1189
1190 if (win->bitmap) {
1191 new_bitmap = vzalloc(bitmap_size);
1192
1193 if (new_bitmap == NULL)
1194 return -ENOMEM;
1195 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
1196 vfree(new_bitmap);
1197 return -EFAULT;
1198 }
1199 }
1200
1201 dev->overlay_cap_top = win->w.top;
1202 dev->overlay_cap_left = win->w.left;
1203 dev->overlay_cap_field = win->field;
1204 vfree(dev->bitmap_cap);
1205 dev->bitmap_cap = new_bitmap;
1206 dev->clipcount_cap = win->clipcount;
1207 if (dev->clipcount_cap)
1208 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
1209 return 0;
1210 }
1211
vivid_vid_cap_overlay(struct file * file,void * fh,unsigned i)1212 int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
1213 {
1214 struct vivid_dev *dev = video_drvdata(file);
1215
1216 if (dev->multiplanar)
1217 return -ENOTTY;
1218
1219 if (i && dev->fb_vbase_cap == NULL)
1220 return -EINVAL;
1221
1222 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
1223 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
1224 return -EINVAL;
1225 }
1226
1227 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
1228 return -EBUSY;
1229 dev->overlay_cap_owner = i ? fh : NULL;
1230 return 0;
1231 }
1232
vivid_vid_cap_g_fbuf(struct file * file,void * fh,struct v4l2_framebuffer * a)1233 int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
1234 struct v4l2_framebuffer *a)
1235 {
1236 struct vivid_dev *dev = video_drvdata(file);
1237
1238 if (dev->multiplanar)
1239 return -ENOTTY;
1240
1241 *a = dev->fb_cap;
1242 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
1243 V4L2_FBUF_CAP_LIST_CLIPPING;
1244 a->flags = V4L2_FBUF_FLAG_PRIMARY;
1245 a->fmt.field = V4L2_FIELD_NONE;
1246 a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
1247 a->fmt.priv = 0;
1248 return 0;
1249 }
1250
vivid_vid_cap_s_fbuf(struct file * file,void * fh,const struct v4l2_framebuffer * a)1251 int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
1252 const struct v4l2_framebuffer *a)
1253 {
1254 struct vivid_dev *dev = video_drvdata(file);
1255 const struct vivid_fmt *fmt;
1256
1257 if (dev->multiplanar)
1258 return -ENOTTY;
1259
1260 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
1261 return -EPERM;
1262
1263 if (dev->overlay_cap_owner)
1264 return -EBUSY;
1265
1266 if (a->base == NULL) {
1267 dev->fb_cap.base = NULL;
1268 dev->fb_vbase_cap = NULL;
1269 return 0;
1270 }
1271
1272 if (a->fmt.width < 48 || a->fmt.height < 32)
1273 return -EINVAL;
1274 fmt = vivid_get_format(dev, a->fmt.pixelformat);
1275 if (!fmt || !fmt->can_do_overlay)
1276 return -EINVAL;
1277 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
1278 return -EINVAL;
1279 if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
1280 return -EINVAL;
1281
1282 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
1283 dev->fb_cap = *a;
1284 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
1285 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
1286 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
1287 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
1288 return 0;
1289 }
1290
1291 static const struct v4l2_audio vivid_audio_inputs[] = {
1292 { 0, "TV", V4L2_AUDCAP_STEREO },
1293 { 1, "Line-In", V4L2_AUDCAP_STEREO },
1294 };
1295
vidioc_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1296 int vidioc_enum_input(struct file *file, void *priv,
1297 struct v4l2_input *inp)
1298 {
1299 struct vivid_dev *dev = video_drvdata(file);
1300
1301 if (inp->index >= dev->num_inputs)
1302 return -EINVAL;
1303
1304 inp->type = V4L2_INPUT_TYPE_CAMERA;
1305 switch (dev->input_type[inp->index]) {
1306 case WEBCAM:
1307 snprintf(inp->name, sizeof(inp->name), "Webcam %u",
1308 dev->input_name_counter[inp->index]);
1309 inp->capabilities = 0;
1310 break;
1311 case TV:
1312 snprintf(inp->name, sizeof(inp->name), "TV %u",
1313 dev->input_name_counter[inp->index]);
1314 inp->type = V4L2_INPUT_TYPE_TUNER;
1315 inp->std = V4L2_STD_ALL;
1316 if (dev->has_audio_inputs)
1317 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1318 inp->capabilities = V4L2_IN_CAP_STD;
1319 break;
1320 case SVID:
1321 snprintf(inp->name, sizeof(inp->name), "S-Video %u",
1322 dev->input_name_counter[inp->index]);
1323 inp->std = V4L2_STD_ALL;
1324 if (dev->has_audio_inputs)
1325 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
1326 inp->capabilities = V4L2_IN_CAP_STD;
1327 break;
1328 case HDMI:
1329 snprintf(inp->name, sizeof(inp->name), "HDMI %u",
1330 dev->input_name_counter[inp->index]);
1331 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
1332 if (dev->edid_blocks == 0 ||
1333 dev->dv_timings_signal_mode[dev->input] == NO_SIGNAL)
1334 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1335 else if (dev->dv_timings_signal_mode[dev->input] == NO_LOCK ||
1336 dev->dv_timings_signal_mode[dev->input] == OUT_OF_RANGE)
1337 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1338 break;
1339 }
1340 if (dev->sensor_hflip)
1341 inp->status |= V4L2_IN_ST_HFLIP;
1342 if (dev->sensor_vflip)
1343 inp->status |= V4L2_IN_ST_VFLIP;
1344 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
1345 if (dev->std_signal_mode[dev->input] == NO_SIGNAL) {
1346 inp->status |= V4L2_IN_ST_NO_SIGNAL;
1347 } else if (dev->std_signal_mode[dev->input] == NO_LOCK) {
1348 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1349 } else if (vivid_is_tv_cap(dev)) {
1350 switch (tpg_g_quality(&dev->tpg)) {
1351 case TPG_QUAL_GRAY:
1352 inp->status |= V4L2_IN_ST_COLOR_KILL;
1353 break;
1354 case TPG_QUAL_NOISE:
1355 inp->status |= V4L2_IN_ST_NO_H_LOCK;
1356 break;
1357 default:
1358 break;
1359 }
1360 }
1361 }
1362 return 0;
1363 }
1364
vidioc_g_input(struct file * file,void * priv,unsigned * i)1365 int vidioc_g_input(struct file *file, void *priv, unsigned *i)
1366 {
1367 struct vivid_dev *dev = video_drvdata(file);
1368
1369 *i = dev->input;
1370 return 0;
1371 }
1372
vidioc_s_input(struct file * file,void * priv,unsigned i)1373 int vidioc_s_input(struct file *file, void *priv, unsigned i)
1374 {
1375 struct vivid_dev *dev = video_drvdata(file);
1376 struct v4l2_bt_timings *bt = &dev->dv_timings_cap[dev->input].bt;
1377 unsigned brightness;
1378
1379 if (i >= dev->num_inputs)
1380 return -EINVAL;
1381
1382 if (i == dev->input)
1383 return 0;
1384
1385 if (vb2_is_busy(&dev->vb_vid_cap_q) ||
1386 vb2_is_busy(&dev->vb_vbi_cap_q) ||
1387 vb2_is_busy(&dev->vb_meta_cap_q))
1388 return -EBUSY;
1389
1390 dev->input = i;
1391 dev->vid_cap_dev.tvnorms = 0;
1392 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
1393 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
1394 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
1395 }
1396 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1397 dev->meta_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
1398 vivid_update_format_cap(dev, false);
1399
1400 if (dev->colorspace) {
1401 switch (dev->input_type[i]) {
1402 case WEBCAM:
1403 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1404 break;
1405 case TV:
1406 case SVID:
1407 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1408 break;
1409 case HDMI:
1410 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
1411 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
1412 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
1413 else
1414 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
1415 } else {
1416 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
1417 }
1418 break;
1419 }
1420 }
1421
1422 /*
1423 * Modify the brightness range depending on the input.
1424 * This makes it easy to use vivid to test if applications can
1425 * handle control range modifications and is also how this is
1426 * typically used in practice as different inputs may be hooked
1427 * up to different receivers with different control ranges.
1428 */
1429 brightness = 128 * i + dev->input_brightness[i];
1430 v4l2_ctrl_modify_range(dev->brightness,
1431 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
1432 v4l2_ctrl_s_ctrl(dev->brightness, brightness);
1433
1434 /* Restore per-input states. */
1435 v4l2_ctrl_activate(dev->ctrl_dv_timings_signal_mode,
1436 vivid_is_hdmi_cap(dev));
1437 v4l2_ctrl_activate(dev->ctrl_dv_timings, vivid_is_hdmi_cap(dev) &&
1438 dev->dv_timings_signal_mode[dev->input] ==
1439 SELECTED_DV_TIMINGS);
1440 v4l2_ctrl_activate(dev->ctrl_std_signal_mode, vivid_is_sdtv_cap(dev));
1441 v4l2_ctrl_activate(dev->ctrl_standard, vivid_is_sdtv_cap(dev) &&
1442 dev->std_signal_mode[dev->input]);
1443
1444 if (vivid_is_hdmi_cap(dev)) {
1445 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings_signal_mode,
1446 dev->dv_timings_signal_mode[dev->input]);
1447 v4l2_ctrl_s_ctrl(dev->ctrl_dv_timings,
1448 dev->query_dv_timings[dev->input]);
1449 } else if (vivid_is_sdtv_cap(dev)) {
1450 v4l2_ctrl_s_ctrl(dev->ctrl_std_signal_mode,
1451 dev->std_signal_mode[dev->input]);
1452 v4l2_ctrl_s_ctrl(dev->ctrl_standard,
1453 dev->std_signal_mode[dev->input]);
1454 }
1455
1456 return 0;
1457 }
1458
vidioc_enumaudio(struct file * file,void * fh,struct v4l2_audio * vin)1459 int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
1460 {
1461 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1462 return -EINVAL;
1463 *vin = vivid_audio_inputs[vin->index];
1464 return 0;
1465 }
1466
vidioc_g_audio(struct file * file,void * fh,struct v4l2_audio * vin)1467 int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
1468 {
1469 struct vivid_dev *dev = video_drvdata(file);
1470
1471 if (!vivid_is_sdtv_cap(dev))
1472 return -EINVAL;
1473 *vin = vivid_audio_inputs[dev->tv_audio_input];
1474 return 0;
1475 }
1476
vidioc_s_audio(struct file * file,void * fh,const struct v4l2_audio * vin)1477 int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
1478 {
1479 struct vivid_dev *dev = video_drvdata(file);
1480
1481 if (!vivid_is_sdtv_cap(dev))
1482 return -EINVAL;
1483 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
1484 return -EINVAL;
1485 dev->tv_audio_input = vin->index;
1486 return 0;
1487 }
1488
vivid_video_g_frequency(struct file * file,void * fh,struct v4l2_frequency * vf)1489 int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
1490 {
1491 struct vivid_dev *dev = video_drvdata(file);
1492
1493 if (vf->tuner != 0)
1494 return -EINVAL;
1495 vf->frequency = dev->tv_freq;
1496 return 0;
1497 }
1498
vivid_video_s_frequency(struct file * file,void * fh,const struct v4l2_frequency * vf)1499 int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
1500 {
1501 struct vivid_dev *dev = video_drvdata(file);
1502
1503 if (vf->tuner != 0)
1504 return -EINVAL;
1505 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
1506 if (vivid_is_tv_cap(dev))
1507 vivid_update_quality(dev);
1508 return 0;
1509 }
1510
vivid_video_s_tuner(struct file * file,void * fh,const struct v4l2_tuner * vt)1511 int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
1512 {
1513 struct vivid_dev *dev = video_drvdata(file);
1514
1515 if (vt->index != 0)
1516 return -EINVAL;
1517 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
1518 return -EINVAL;
1519 dev->tv_audmode = vt->audmode;
1520 return 0;
1521 }
1522
vivid_video_g_tuner(struct file * file,void * fh,struct v4l2_tuner * vt)1523 int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
1524 {
1525 struct vivid_dev *dev = video_drvdata(file);
1526 enum tpg_quality qual;
1527
1528 if (vt->index != 0)
1529 return -EINVAL;
1530
1531 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
1532 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
1533 vt->audmode = dev->tv_audmode;
1534 vt->rangelow = MIN_TV_FREQ;
1535 vt->rangehigh = MAX_TV_FREQ;
1536 qual = vivid_get_quality(dev, &vt->afc);
1537 if (qual == TPG_QUAL_COLOR)
1538 vt->signal = 0xffff;
1539 else if (qual == TPG_QUAL_GRAY)
1540 vt->signal = 0x8000;
1541 else
1542 vt->signal = 0;
1543 if (qual == TPG_QUAL_NOISE) {
1544 vt->rxsubchans = 0;
1545 } else if (qual == TPG_QUAL_GRAY) {
1546 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1547 } else {
1548 unsigned int channel_nr = dev->tv_freq / (6 * 16);
1549 unsigned int options =
1550 (dev->std_cap[dev->input] & V4L2_STD_NTSC_M) ? 4 : 3;
1551
1552 switch (channel_nr % options) {
1553 case 0:
1554 vt->rxsubchans = V4L2_TUNER_SUB_MONO;
1555 break;
1556 case 1:
1557 vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
1558 break;
1559 case 2:
1560 if (dev->std_cap[dev->input] & V4L2_STD_NTSC_M)
1561 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
1562 else
1563 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
1564 break;
1565 case 3:
1566 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
1567 break;
1568 }
1569 }
1570 strscpy(vt->name, "TV Tuner", sizeof(vt->name));
1571 return 0;
1572 }
1573
1574 /* Must remain in sync with the vivid_ctrl_standard_strings array */
1575 const v4l2_std_id vivid_standard[] = {
1576 V4L2_STD_NTSC_M,
1577 V4L2_STD_NTSC_M_JP,
1578 V4L2_STD_NTSC_M_KR,
1579 V4L2_STD_NTSC_443,
1580 V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
1581 V4L2_STD_PAL_I,
1582 V4L2_STD_PAL_DK,
1583 V4L2_STD_PAL_M,
1584 V4L2_STD_PAL_N,
1585 V4L2_STD_PAL_Nc,
1586 V4L2_STD_PAL_60,
1587 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
1588 V4L2_STD_SECAM_DK,
1589 V4L2_STD_SECAM_L,
1590 V4L2_STD_SECAM_LC,
1591 V4L2_STD_UNKNOWN
1592 };
1593
1594 /* Must remain in sync with the vivid_standard array */
1595 const char * const vivid_ctrl_standard_strings[] = {
1596 "NTSC-M",
1597 "NTSC-M-JP",
1598 "NTSC-M-KR",
1599 "NTSC-443",
1600 "PAL-BGH",
1601 "PAL-I",
1602 "PAL-DK",
1603 "PAL-M",
1604 "PAL-N",
1605 "PAL-Nc",
1606 "PAL-60",
1607 "SECAM-BGH",
1608 "SECAM-DK",
1609 "SECAM-L",
1610 "SECAM-Lc",
1611 NULL,
1612 };
1613
vidioc_querystd(struct file * file,void * priv,v4l2_std_id * id)1614 int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
1615 {
1616 struct vivid_dev *dev = video_drvdata(file);
1617 unsigned int last = dev->query_std_last[dev->input];
1618
1619 if (!vivid_is_sdtv_cap(dev))
1620 return -ENODATA;
1621 if (dev->std_signal_mode[dev->input] == NO_SIGNAL ||
1622 dev->std_signal_mode[dev->input] == NO_LOCK) {
1623 *id = V4L2_STD_UNKNOWN;
1624 return 0;
1625 }
1626 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
1627 *id = V4L2_STD_UNKNOWN;
1628 } else if (dev->std_signal_mode[dev->input] == CURRENT_STD) {
1629 *id = dev->std_cap[dev->input];
1630 } else if (dev->std_signal_mode[dev->input] == SELECTED_STD) {
1631 *id = dev->query_std[dev->input];
1632 } else {
1633 *id = vivid_standard[last];
1634 dev->query_std_last[dev->input] =
1635 (last + 1) % ARRAY_SIZE(vivid_standard);
1636 }
1637
1638 return 0;
1639 }
1640
vivid_vid_cap_s_std(struct file * file,void * priv,v4l2_std_id id)1641 int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
1642 {
1643 struct vivid_dev *dev = video_drvdata(file);
1644
1645 if (!vivid_is_sdtv_cap(dev))
1646 return -ENODATA;
1647 if (dev->std_cap[dev->input] == id)
1648 return 0;
1649 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
1650 return -EBUSY;
1651 dev->std_cap[dev->input] = id;
1652 vivid_update_format_cap(dev, false);
1653 return 0;
1654 }
1655
find_aspect_ratio(u32 width,u32 height,u32 * num,u32 * denom)1656 static void find_aspect_ratio(u32 width, u32 height,
1657 u32 *num, u32 *denom)
1658 {
1659 if (!(height % 3) && ((height * 4 / 3) == width)) {
1660 *num = 4;
1661 *denom = 3;
1662 } else if (!(height % 9) && ((height * 16 / 9) == width)) {
1663 *num = 16;
1664 *denom = 9;
1665 } else if (!(height % 10) && ((height * 16 / 10) == width)) {
1666 *num = 16;
1667 *denom = 10;
1668 } else if (!(height % 4) && ((height * 5 / 4) == width)) {
1669 *num = 5;
1670 *denom = 4;
1671 } else if (!(height % 9) && ((height * 15 / 9) == width)) {
1672 *num = 15;
1673 *denom = 9;
1674 } else { /* default to 16:9 */
1675 *num = 16;
1676 *denom = 9;
1677 }
1678 }
1679
valid_cvt_gtf_timings(struct v4l2_dv_timings * timings)1680 static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
1681 {
1682 struct v4l2_bt_timings *bt = &timings->bt;
1683 u32 total_h_pixel;
1684 u32 total_v_lines;
1685 u32 h_freq;
1686
1687 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
1688 NULL, NULL))
1689 return false;
1690
1691 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
1692 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
1693
1694 h_freq = (u32)bt->pixelclock / total_h_pixel;
1695
1696 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
1697 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
1698 bt->polarities, bt->interlaced, timings))
1699 return true;
1700 }
1701
1702 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
1703 struct v4l2_fract aspect_ratio;
1704
1705 find_aspect_ratio(bt->width, bt->height,
1706 &aspect_ratio.numerator,
1707 &aspect_ratio.denominator);
1708 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
1709 bt->polarities, bt->interlaced,
1710 aspect_ratio, timings))
1711 return true;
1712 }
1713 return false;
1714 }
1715
vivid_vid_cap_s_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1716 int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
1717 struct v4l2_dv_timings *timings)
1718 {
1719 struct vivid_dev *dev = video_drvdata(file);
1720
1721 if (!vivid_is_hdmi_cap(dev))
1722 return -ENODATA;
1723 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
1724 0, NULL, NULL) &&
1725 !valid_cvt_gtf_timings(timings))
1726 return -EINVAL;
1727
1728 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap[dev->input],
1729 0, false))
1730 return 0;
1731 if (vb2_is_busy(&dev->vb_vid_cap_q))
1732 return -EBUSY;
1733
1734 dev->dv_timings_cap[dev->input] = *timings;
1735 vivid_update_format_cap(dev, false);
1736 return 0;
1737 }
1738
vidioc_query_dv_timings(struct file * file,void * _fh,struct v4l2_dv_timings * timings)1739 int vidioc_query_dv_timings(struct file *file, void *_fh,
1740 struct v4l2_dv_timings *timings)
1741 {
1742 struct vivid_dev *dev = video_drvdata(file);
1743 unsigned int input = dev->input;
1744 unsigned int last = dev->query_dv_timings_last[input];
1745
1746 if (!vivid_is_hdmi_cap(dev))
1747 return -ENODATA;
1748 if (dev->dv_timings_signal_mode[input] == NO_SIGNAL ||
1749 dev->edid_blocks == 0)
1750 return -ENOLINK;
1751 if (dev->dv_timings_signal_mode[input] == NO_LOCK)
1752 return -ENOLCK;
1753 if (dev->dv_timings_signal_mode[input] == OUT_OF_RANGE) {
1754 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
1755 return -ERANGE;
1756 }
1757 if (dev->dv_timings_signal_mode[input] == CURRENT_DV_TIMINGS) {
1758 *timings = dev->dv_timings_cap[input];
1759 } else if (dev->dv_timings_signal_mode[input] ==
1760 SELECTED_DV_TIMINGS) {
1761 *timings =
1762 v4l2_dv_timings_presets[dev->query_dv_timings[input]];
1763 } else {
1764 *timings =
1765 v4l2_dv_timings_presets[last];
1766 dev->query_dv_timings_last[input] =
1767 (last + 1) % dev->query_dv_timings_size;
1768 }
1769 return 0;
1770 }
1771
vidioc_s_edid(struct file * file,void * _fh,struct v4l2_edid * edid)1772 int vidioc_s_edid(struct file *file, void *_fh,
1773 struct v4l2_edid *edid)
1774 {
1775 struct vivid_dev *dev = video_drvdata(file);
1776 u16 phys_addr;
1777 u32 display_present = 0;
1778 unsigned int i, j;
1779 int ret;
1780
1781 memset(edid->reserved, 0, sizeof(edid->reserved));
1782 if (edid->pad >= dev->num_inputs)
1783 return -EINVAL;
1784 if (dev->input_type[edid->pad] != HDMI || edid->start_block)
1785 return -EINVAL;
1786 if (edid->blocks == 0) {
1787 dev->edid_blocks = 0;
1788 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, 0);
1789 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, 0);
1790 phys_addr = CEC_PHYS_ADDR_INVALID;
1791 goto set_phys_addr;
1792 }
1793 if (edid->blocks > dev->edid_max_blocks) {
1794 edid->blocks = dev->edid_max_blocks;
1795 return -E2BIG;
1796 }
1797 phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
1798 ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
1799 if (ret)
1800 return ret;
1801
1802 if (vb2_is_busy(&dev->vb_vid_cap_q))
1803 return -EBUSY;
1804
1805 dev->edid_blocks = edid->blocks;
1806 memcpy(dev->edid, edid->edid, edid->blocks * 128);
1807
1808 for (i = 0, j = 0; i < dev->num_outputs; i++)
1809 if (dev->output_type[i] == HDMI)
1810 display_present |=
1811 dev->display_present[i] << j++;
1812
1813 v4l2_ctrl_s_ctrl(dev->ctrl_tx_edid_present, display_present);
1814 v4l2_ctrl_s_ctrl(dev->ctrl_tx_hotplug, display_present);
1815
1816 set_phys_addr:
1817 /* TODO: a proper hotplug detect cycle should be emulated here */
1818 cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
1819
1820 for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
1821 cec_s_phys_addr(dev->cec_tx_adap[i],
1822 dev->display_present[i] ?
1823 v4l2_phys_addr_for_input(phys_addr, i + 1) :
1824 CEC_PHYS_ADDR_INVALID,
1825 false);
1826 return 0;
1827 }
1828
vidioc_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1829 int vidioc_enum_framesizes(struct file *file, void *fh,
1830 struct v4l2_frmsizeenum *fsize)
1831 {
1832 struct vivid_dev *dev = video_drvdata(file);
1833
1834 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
1835 return -EINVAL;
1836 if (vivid_get_format(dev, fsize->pixel_format) == NULL)
1837 return -EINVAL;
1838 if (vivid_is_webcam(dev)) {
1839 if (fsize->index >= ARRAY_SIZE(webcam_sizes))
1840 return -EINVAL;
1841 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1842 fsize->discrete = webcam_sizes[fsize->index];
1843 return 0;
1844 }
1845 if (fsize->index)
1846 return -EINVAL;
1847 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
1848 fsize->stepwise.min_width = MIN_WIDTH;
1849 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
1850 fsize->stepwise.step_width = 2;
1851 fsize->stepwise.min_height = MIN_HEIGHT;
1852 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
1853 fsize->stepwise.step_height = 2;
1854 return 0;
1855 }
1856
1857 /* timeperframe is arbitrary and continuous */
vidioc_enum_frameintervals(struct file * file,void * priv,struct v4l2_frmivalenum * fival)1858 int vidioc_enum_frameintervals(struct file *file, void *priv,
1859 struct v4l2_frmivalenum *fival)
1860 {
1861 struct vivid_dev *dev = video_drvdata(file);
1862 const struct vivid_fmt *fmt;
1863 int i;
1864
1865 fmt = vivid_get_format(dev, fival->pixel_format);
1866 if (!fmt)
1867 return -EINVAL;
1868
1869 if (!vivid_is_webcam(dev)) {
1870 if (fival->index)
1871 return -EINVAL;
1872 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
1873 return -EINVAL;
1874 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
1875 return -EINVAL;
1876 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1877 fival->discrete = dev->timeperframe_vid_cap;
1878 return 0;
1879 }
1880
1881 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
1882 if (fival->width == webcam_sizes[i].width &&
1883 fival->height == webcam_sizes[i].height)
1884 break;
1885 if (i == ARRAY_SIZE(webcam_sizes))
1886 return -EINVAL;
1887 if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
1888 return -EINVAL;
1889 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1890 fival->discrete = webcam_intervals[fival->index];
1891 return 0;
1892 }
1893
vivid_vid_cap_g_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1894 int vivid_vid_cap_g_parm(struct file *file, void *priv,
1895 struct v4l2_streamparm *parm)
1896 {
1897 struct vivid_dev *dev = video_drvdata(file);
1898
1899 if (parm->type != (dev->multiplanar ?
1900 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1901 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1902 return -EINVAL;
1903
1904 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1905 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
1906 parm->parm.capture.readbuffers = 1;
1907 return 0;
1908 }
1909
vivid_vid_cap_s_parm(struct file * file,void * priv,struct v4l2_streamparm * parm)1910 int vivid_vid_cap_s_parm(struct file *file, void *priv,
1911 struct v4l2_streamparm *parm)
1912 {
1913 struct vivid_dev *dev = video_drvdata(file);
1914 unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
1915 struct v4l2_fract tpf;
1916 unsigned i;
1917
1918 if (parm->type != (dev->multiplanar ?
1919 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
1920 V4L2_BUF_TYPE_VIDEO_CAPTURE))
1921 return -EINVAL;
1922 if (!vivid_is_webcam(dev))
1923 return vivid_vid_cap_g_parm(file, priv, parm);
1924
1925 tpf = parm->parm.capture.timeperframe;
1926
1927 if (tpf.denominator == 0)
1928 tpf = webcam_intervals[ival_sz - 1];
1929 for (i = 0; i < ival_sz; i++)
1930 if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i]))
1931 break;
1932 if (i == ival_sz)
1933 i = ival_sz - 1;
1934 dev->webcam_ival_idx = i;
1935 tpf = webcam_intervals[dev->webcam_ival_idx];
1936
1937 /* resync the thread's timings */
1938 dev->cap_seq_resync = true;
1939 dev->timeperframe_vid_cap = tpf;
1940 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
1941 parm->parm.capture.timeperframe = tpf;
1942 parm->parm.capture.readbuffers = 1;
1943 return 0;
1944 }
1945