1 /*
2 * uvc_video.c -- USB Video Class driver - Video handling
3 *
4 * Copyright (C) 2005-2010
5 * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/usb.h>
19 #include <linux/videodev2.h>
20 #include <linux/vmalloc.h>
21 #include <linux/wait.h>
22 #include <linux/atomic.h>
23 #include <asm/unaligned.h>
24
25 #include <media/v4l2-common.h>
26
27 #include "uvcvideo.h"
28
29 /* ------------------------------------------------------------------------
30 * UVC Controls
31 */
32
__uvc_query_ctrl(struct uvc_device * dev,u8 query,u8 unit,u8 intfnum,u8 cs,void * data,u16 size,int timeout)33 static int __uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
34 u8 intfnum, u8 cs, void *data, u16 size,
35 int timeout)
36 {
37 u8 type = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
38 unsigned int pipe;
39
40 pipe = (query & 0x80) ? usb_rcvctrlpipe(dev->udev, 0)
41 : usb_sndctrlpipe(dev->udev, 0);
42 type |= (query & 0x80) ? USB_DIR_IN : USB_DIR_OUT;
43
44 return usb_control_msg(dev->udev, pipe, query, type, cs << 8,
45 unit << 8 | intfnum, data, size, timeout);
46 }
47
uvc_query_name(u8 query)48 static const char *uvc_query_name(u8 query)
49 {
50 switch (query) {
51 case UVC_SET_CUR:
52 return "SET_CUR";
53 case UVC_GET_CUR:
54 return "GET_CUR";
55 case UVC_GET_MIN:
56 return "GET_MIN";
57 case UVC_GET_MAX:
58 return "GET_MAX";
59 case UVC_GET_RES:
60 return "GET_RES";
61 case UVC_GET_LEN:
62 return "GET_LEN";
63 case UVC_GET_INFO:
64 return "GET_INFO";
65 case UVC_GET_DEF:
66 return "GET_DEF";
67 default:
68 return "<invalid>";
69 }
70 }
71
uvc_query_ctrl(struct uvc_device * dev,u8 query,u8 unit,u8 intfnum,u8 cs,void * data,u16 size)72 int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
73 u8 intfnum, u8 cs, void *data, u16 size)
74 {
75 int ret;
76 u8 error;
77 u8 tmp;
78
79 ret = __uvc_query_ctrl(dev, query, unit, intfnum, cs, data, size,
80 UVC_CTRL_CONTROL_TIMEOUT);
81 if (likely(ret == size))
82 return 0;
83
84 uvc_printk(KERN_ERR,
85 "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
86 uvc_query_name(query), cs, unit, ret, size);
87
88 if (ret != -EPIPE)
89 return ret;
90
91 tmp = *(u8 *)data;
92
93 ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum,
94 UVC_VC_REQUEST_ERROR_CODE_CONTROL, data, 1,
95 UVC_CTRL_CONTROL_TIMEOUT);
96
97 error = *(u8 *)data;
98 *(u8 *)data = tmp;
99
100 if (ret != 1)
101 return ret < 0 ? ret : -EPIPE;
102
103 uvc_trace(UVC_TRACE_CONTROL, "Control error %u\n", error);
104
105 switch (error) {
106 case 0:
107 /* Cannot happen - we received a STALL */
108 return -EPIPE;
109 case 1: /* Not ready */
110 return -EBUSY;
111 case 2: /* Wrong state */
112 return -EILSEQ;
113 case 3: /* Power */
114 return -EREMOTE;
115 case 4: /* Out of range */
116 return -ERANGE;
117 case 5: /* Invalid unit */
118 case 6: /* Invalid control */
119 case 7: /* Invalid Request */
120 case 8: /* Invalid value within range */
121 return -EINVAL;
122 default: /* reserved or unknown */
123 break;
124 }
125
126 return -EPIPE;
127 }
128
uvc_fixup_video_ctrl(struct uvc_streaming * stream,struct uvc_streaming_control * ctrl)129 static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
130 struct uvc_streaming_control *ctrl)
131 {
132 struct uvc_format *format = NULL;
133 struct uvc_frame *frame = NULL;
134 unsigned int i;
135
136 for (i = 0; i < stream->nformats; ++i) {
137 if (stream->format[i].index == ctrl->bFormatIndex) {
138 format = &stream->format[i];
139 break;
140 }
141 }
142
143 if (format == NULL)
144 return;
145
146 for (i = 0; i < format->nframes; ++i) {
147 if (format->frame[i].bFrameIndex == ctrl->bFrameIndex) {
148 frame = &format->frame[i];
149 break;
150 }
151 }
152
153 if (frame == NULL)
154 return;
155
156 if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) ||
157 (ctrl->dwMaxVideoFrameSize == 0 &&
158 stream->dev->uvc_version < 0x0110))
159 ctrl->dwMaxVideoFrameSize =
160 frame->dwMaxVideoFrameBufferSize;
161
162 /* The "TOSHIBA Web Camera - 5M" Chicony device (04f2:b50b) seems to
163 * compute the bandwidth on 16 bits and erroneously sign-extend it to
164 * 32 bits, resulting in a huge bandwidth value. Detect and fix that
165 * condition by setting the 16 MSBs to 0 when they're all equal to 1.
166 */
167 if ((ctrl->dwMaxPayloadTransferSize & 0xffff0000) == 0xffff0000)
168 ctrl->dwMaxPayloadTransferSize &= ~0xffff0000;
169
170 if (!(format->flags & UVC_FMT_FLAG_COMPRESSED) &&
171 stream->dev->quirks & UVC_QUIRK_FIX_BANDWIDTH &&
172 stream->intf->num_altsetting > 1) {
173 u32 interval;
174 u32 bandwidth;
175
176 interval = (ctrl->dwFrameInterval > 100000)
177 ? ctrl->dwFrameInterval
178 : frame->dwFrameInterval[0];
179
180 /* Compute a bandwidth estimation by multiplying the frame
181 * size by the number of video frames per second, divide the
182 * result by the number of USB frames (or micro-frames for
183 * high-speed devices) per second and add the UVC header size
184 * (assumed to be 12 bytes long).
185 */
186 bandwidth = frame->wWidth * frame->wHeight / 8 * format->bpp;
187 bandwidth *= 10000000 / interval + 1;
188 bandwidth /= 1000;
189 if (stream->dev->udev->speed == USB_SPEED_HIGH)
190 bandwidth /= 8;
191 bandwidth += 12;
192
193 /* The bandwidth estimate is too low for many cameras. Don't use
194 * maximum packet sizes lower than 1024 bytes to try and work
195 * around the problem. According to measurements done on two
196 * different camera models, the value is high enough to get most
197 * resolutions working while not preventing two simultaneous
198 * VGA streams at 15 fps.
199 */
200 bandwidth = max_t(u32, bandwidth, 1024);
201
202 ctrl->dwMaxPayloadTransferSize = bandwidth;
203 }
204 }
205
uvc_video_ctrl_size(struct uvc_streaming * stream)206 static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
207 {
208 /*
209 * Return the size of the video probe and commit controls, which depends
210 * on the protocol version.
211 */
212 if (stream->dev->uvc_version < 0x0110)
213 return 26;
214 else if (stream->dev->uvc_version < 0x0150)
215 return 34;
216 else
217 return 48;
218 }
219
uvc_get_video_ctrl(struct uvc_streaming * stream,struct uvc_streaming_control * ctrl,int probe,u8 query)220 static int uvc_get_video_ctrl(struct uvc_streaming *stream,
221 struct uvc_streaming_control *ctrl, int probe, u8 query)
222 {
223 u16 size = uvc_video_ctrl_size(stream);
224 u8 *data;
225 int ret;
226
227 if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
228 query == UVC_GET_DEF)
229 return -EIO;
230
231 data = kmalloc(size, GFP_KERNEL);
232 if (data == NULL)
233 return -ENOMEM;
234
235 ret = __uvc_query_ctrl(stream->dev, query, 0, stream->intfnum,
236 probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data,
237 size, uvc_timeout_param);
238
239 if ((query == UVC_GET_MIN || query == UVC_GET_MAX) && ret == 2) {
240 /* Some cameras, mostly based on Bison Electronics chipsets,
241 * answer a GET_MIN or GET_MAX request with the wCompQuality
242 * field only.
243 */
244 uvc_warn_once(stream->dev, UVC_WARN_MINMAX, "UVC non "
245 "compliance - GET_MIN/MAX(PROBE) incorrectly "
246 "supported. Enabling workaround.\n");
247 memset(ctrl, 0, sizeof(*ctrl));
248 ctrl->wCompQuality = le16_to_cpup((__le16 *)data);
249 ret = 0;
250 goto out;
251 } else if (query == UVC_GET_DEF && probe == 1 && ret != size) {
252 /* Many cameras don't support the GET_DEF request on their
253 * video probe control. Warn once and return, the caller will
254 * fall back to GET_CUR.
255 */
256 uvc_warn_once(stream->dev, UVC_WARN_PROBE_DEF, "UVC non "
257 "compliance - GET_DEF(PROBE) not supported. "
258 "Enabling workaround.\n");
259 ret = -EIO;
260 goto out;
261 } else if (ret != size) {
262 uvc_printk(KERN_ERR, "Failed to query (%u) UVC %s control : "
263 "%d (exp. %u).\n", query, probe ? "probe" : "commit",
264 ret, size);
265 ret = -EIO;
266 goto out;
267 }
268
269 ctrl->bmHint = le16_to_cpup((__le16 *)&data[0]);
270 ctrl->bFormatIndex = data[2];
271 ctrl->bFrameIndex = data[3];
272 ctrl->dwFrameInterval = le32_to_cpup((__le32 *)&data[4]);
273 ctrl->wKeyFrameRate = le16_to_cpup((__le16 *)&data[8]);
274 ctrl->wPFrameRate = le16_to_cpup((__le16 *)&data[10]);
275 ctrl->wCompQuality = le16_to_cpup((__le16 *)&data[12]);
276 ctrl->wCompWindowSize = le16_to_cpup((__le16 *)&data[14]);
277 ctrl->wDelay = le16_to_cpup((__le16 *)&data[16]);
278 ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
279 ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
280
281 if (size >= 34) {
282 ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
283 ctrl->bmFramingInfo = data[30];
284 ctrl->bPreferedVersion = data[31];
285 ctrl->bMinVersion = data[32];
286 ctrl->bMaxVersion = data[33];
287 } else {
288 ctrl->dwClockFrequency = stream->dev->clock_frequency;
289 ctrl->bmFramingInfo = 0;
290 ctrl->bPreferedVersion = 0;
291 ctrl->bMinVersion = 0;
292 ctrl->bMaxVersion = 0;
293 }
294
295 /* Some broken devices return null or wrong dwMaxVideoFrameSize and
296 * dwMaxPayloadTransferSize fields. Try to get the value from the
297 * format and frame descriptors.
298 */
299 uvc_fixup_video_ctrl(stream, ctrl);
300 ret = 0;
301
302 out:
303 kfree(data);
304 return ret;
305 }
306
uvc_set_video_ctrl(struct uvc_streaming * stream,struct uvc_streaming_control * ctrl,int probe)307 static int uvc_set_video_ctrl(struct uvc_streaming *stream,
308 struct uvc_streaming_control *ctrl, int probe)
309 {
310 u16 size = uvc_video_ctrl_size(stream);
311 u8 *data;
312 int ret;
313
314 data = kzalloc(size, GFP_KERNEL);
315 if (data == NULL)
316 return -ENOMEM;
317
318 *(__le16 *)&data[0] = cpu_to_le16(ctrl->bmHint);
319 data[2] = ctrl->bFormatIndex;
320 data[3] = ctrl->bFrameIndex;
321 *(__le32 *)&data[4] = cpu_to_le32(ctrl->dwFrameInterval);
322 *(__le16 *)&data[8] = cpu_to_le16(ctrl->wKeyFrameRate);
323 *(__le16 *)&data[10] = cpu_to_le16(ctrl->wPFrameRate);
324 *(__le16 *)&data[12] = cpu_to_le16(ctrl->wCompQuality);
325 *(__le16 *)&data[14] = cpu_to_le16(ctrl->wCompWindowSize);
326 *(__le16 *)&data[16] = cpu_to_le16(ctrl->wDelay);
327 put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
328 put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
329
330 if (size >= 34) {
331 put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
332 data[30] = ctrl->bmFramingInfo;
333 data[31] = ctrl->bPreferedVersion;
334 data[32] = ctrl->bMinVersion;
335 data[33] = ctrl->bMaxVersion;
336 }
337
338 ret = __uvc_query_ctrl(stream->dev, UVC_SET_CUR, 0, stream->intfnum,
339 probe ? UVC_VS_PROBE_CONTROL : UVC_VS_COMMIT_CONTROL, data,
340 size, uvc_timeout_param);
341 if (ret != size) {
342 uvc_printk(KERN_ERR, "Failed to set UVC %s control : "
343 "%d (exp. %u).\n", probe ? "probe" : "commit",
344 ret, size);
345 ret = -EIO;
346 }
347
348 kfree(data);
349 return ret;
350 }
351
uvc_probe_video(struct uvc_streaming * stream,struct uvc_streaming_control * probe)352 int uvc_probe_video(struct uvc_streaming *stream,
353 struct uvc_streaming_control *probe)
354 {
355 struct uvc_streaming_control probe_min, probe_max;
356 u16 bandwidth;
357 unsigned int i;
358 int ret;
359
360 /* Perform probing. The device should adjust the requested values
361 * according to its capabilities. However, some devices, namely the
362 * first generation UVC Logitech webcams, don't implement the Video
363 * Probe control properly, and just return the needed bandwidth. For
364 * that reason, if the needed bandwidth exceeds the maximum available
365 * bandwidth, try to lower the quality.
366 */
367 ret = uvc_set_video_ctrl(stream, probe, 1);
368 if (ret < 0)
369 goto done;
370
371 /* Get the minimum and maximum values for compression settings. */
372 if (!(stream->dev->quirks & UVC_QUIRK_PROBE_MINMAX)) {
373 ret = uvc_get_video_ctrl(stream, &probe_min, 1, UVC_GET_MIN);
374 if (ret < 0)
375 goto done;
376 ret = uvc_get_video_ctrl(stream, &probe_max, 1, UVC_GET_MAX);
377 if (ret < 0)
378 goto done;
379
380 probe->wCompQuality = probe_max.wCompQuality;
381 }
382
383 for (i = 0; i < 2; ++i) {
384 ret = uvc_set_video_ctrl(stream, probe, 1);
385 if (ret < 0)
386 goto done;
387 ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR);
388 if (ret < 0)
389 goto done;
390
391 if (stream->intf->num_altsetting == 1)
392 break;
393
394 bandwidth = probe->dwMaxPayloadTransferSize;
395 if (bandwidth <= stream->maxpsize)
396 break;
397
398 if (stream->dev->quirks & UVC_QUIRK_PROBE_MINMAX) {
399 ret = -ENOSPC;
400 goto done;
401 }
402
403 /* TODO: negotiate compression parameters */
404 probe->wKeyFrameRate = probe_min.wKeyFrameRate;
405 probe->wPFrameRate = probe_min.wPFrameRate;
406 probe->wCompQuality = probe_max.wCompQuality;
407 probe->wCompWindowSize = probe_min.wCompWindowSize;
408 }
409
410 done:
411 return ret;
412 }
413
uvc_commit_video(struct uvc_streaming * stream,struct uvc_streaming_control * probe)414 static int uvc_commit_video(struct uvc_streaming *stream,
415 struct uvc_streaming_control *probe)
416 {
417 return uvc_set_video_ctrl(stream, probe, 0);
418 }
419
420 /* -----------------------------------------------------------------------------
421 * Clocks and timestamps
422 */
423
uvc_video_get_time(void)424 static inline ktime_t uvc_video_get_time(void)
425 {
426 if (uvc_clock_param == CLOCK_MONOTONIC)
427 return ktime_get();
428 else
429 return ktime_get_real();
430 }
431
432 static void
uvc_video_clock_decode(struct uvc_streaming * stream,struct uvc_buffer * buf,const u8 * data,int len)433 uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
434 const u8 *data, int len)
435 {
436 struct uvc_clock_sample *sample;
437 unsigned int header_size;
438 bool has_pts = false;
439 bool has_scr = false;
440 unsigned long flags;
441 ktime_t time;
442 u16 host_sof;
443 u16 dev_sof;
444
445 switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
446 case UVC_STREAM_PTS | UVC_STREAM_SCR:
447 header_size = 12;
448 has_pts = true;
449 has_scr = true;
450 break;
451 case UVC_STREAM_PTS:
452 header_size = 6;
453 has_pts = true;
454 break;
455 case UVC_STREAM_SCR:
456 header_size = 8;
457 has_scr = true;
458 break;
459 default:
460 header_size = 2;
461 break;
462 }
463
464 /* Check for invalid headers. */
465 if (len < header_size)
466 return;
467
468 /* Extract the timestamps:
469 *
470 * - store the frame PTS in the buffer structure
471 * - if the SCR field is present, retrieve the host SOF counter and
472 * kernel timestamps and store them with the SCR STC and SOF fields
473 * in the ring buffer
474 */
475 if (has_pts && buf != NULL)
476 buf->pts = get_unaligned_le32(&data[2]);
477
478 if (!has_scr)
479 return;
480
481 /* To limit the amount of data, drop SCRs with an SOF identical to the
482 * previous one.
483 */
484 dev_sof = get_unaligned_le16(&data[header_size - 2]);
485 if (dev_sof == stream->clock.last_sof)
486 return;
487
488 stream->clock.last_sof = dev_sof;
489
490 host_sof = usb_get_current_frame_number(stream->dev->udev);
491 time = uvc_video_get_time();
492
493 /* The UVC specification allows device implementations that can't obtain
494 * the USB frame number to keep their own frame counters as long as they
495 * match the size and frequency of the frame number associated with USB
496 * SOF tokens. The SOF values sent by such devices differ from the USB
497 * SOF tokens by a fixed offset that needs to be estimated and accounted
498 * for to make timestamp recovery as accurate as possible.
499 *
500 * The offset is estimated the first time a device SOF value is received
501 * as the difference between the host and device SOF values. As the two
502 * SOF values can differ slightly due to transmission delays, consider
503 * that the offset is null if the difference is not higher than 10 ms
504 * (negative differences can not happen and are thus considered as an
505 * offset). The video commit control wDelay field should be used to
506 * compute a dynamic threshold instead of using a fixed 10 ms value, but
507 * devices don't report reliable wDelay values.
508 *
509 * See uvc_video_clock_host_sof() for an explanation regarding why only
510 * the 8 LSBs of the delta are kept.
511 */
512 if (stream->clock.sof_offset == (u16)-1) {
513 u16 delta_sof = (host_sof - dev_sof) & 255;
514 if (delta_sof >= 10)
515 stream->clock.sof_offset = delta_sof;
516 else
517 stream->clock.sof_offset = 0;
518 }
519
520 dev_sof = (dev_sof + stream->clock.sof_offset) & 2047;
521
522 spin_lock_irqsave(&stream->clock.lock, flags);
523
524 sample = &stream->clock.samples[stream->clock.head];
525 sample->dev_stc = get_unaligned_le32(&data[header_size - 6]);
526 sample->dev_sof = dev_sof;
527 sample->host_sof = host_sof;
528 sample->host_time = time;
529
530 /* Update the sliding window head and count. */
531 stream->clock.head = (stream->clock.head + 1) % stream->clock.size;
532
533 if (stream->clock.count < stream->clock.size)
534 stream->clock.count++;
535
536 spin_unlock_irqrestore(&stream->clock.lock, flags);
537 }
538
uvc_video_clock_reset(struct uvc_streaming * stream)539 static void uvc_video_clock_reset(struct uvc_streaming *stream)
540 {
541 struct uvc_clock *clock = &stream->clock;
542
543 clock->head = 0;
544 clock->count = 0;
545 clock->last_sof = -1;
546 clock->sof_offset = -1;
547 }
548
uvc_video_clock_init(struct uvc_streaming * stream)549 static int uvc_video_clock_init(struct uvc_streaming *stream)
550 {
551 struct uvc_clock *clock = &stream->clock;
552
553 spin_lock_init(&clock->lock);
554 clock->size = 32;
555
556 clock->samples = kmalloc_array(clock->size, sizeof(*clock->samples),
557 GFP_KERNEL);
558 if (clock->samples == NULL)
559 return -ENOMEM;
560
561 uvc_video_clock_reset(stream);
562
563 return 0;
564 }
565
uvc_video_clock_cleanup(struct uvc_streaming * stream)566 static void uvc_video_clock_cleanup(struct uvc_streaming *stream)
567 {
568 kfree(stream->clock.samples);
569 stream->clock.samples = NULL;
570 }
571
572 /*
573 * uvc_video_clock_host_sof - Return the host SOF value for a clock sample
574 *
575 * Host SOF counters reported by usb_get_current_frame_number() usually don't
576 * cover the whole 11-bits SOF range (0-2047) but are limited to the HCI frame
577 * schedule window. They can be limited to 8, 9 or 10 bits depending on the host
578 * controller and its configuration.
579 *
580 * We thus need to recover the SOF value corresponding to the host frame number.
581 * As the device and host frame numbers are sampled in a short interval, the
582 * difference between their values should be equal to a small delta plus an
583 * integer multiple of 256 caused by the host frame number limited precision.
584 *
585 * To obtain the recovered host SOF value, compute the small delta by masking
586 * the high bits of the host frame counter and device SOF difference and add it
587 * to the device SOF value.
588 */
uvc_video_clock_host_sof(const struct uvc_clock_sample * sample)589 static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
590 {
591 /* The delta value can be negative. */
592 s8 delta_sof;
593
594 delta_sof = (sample->host_sof - sample->dev_sof) & 255;
595
596 return (sample->dev_sof + delta_sof) & 2047;
597 }
598
599 /*
600 * uvc_video_clock_update - Update the buffer timestamp
601 *
602 * This function converts the buffer PTS timestamp to the host clock domain by
603 * going through the USB SOF clock domain and stores the result in the V4L2
604 * buffer timestamp field.
605 *
606 * The relationship between the device clock and the host clock isn't known.
607 * However, the device and the host share the common USB SOF clock which can be
608 * used to recover that relationship.
609 *
610 * The relationship between the device clock and the USB SOF clock is considered
611 * to be linear over the clock samples sliding window and is given by
612 *
613 * SOF = m * PTS + p
614 *
615 * Several methods to compute the slope (m) and intercept (p) can be used. As
616 * the clock drift should be small compared to the sliding window size, we
617 * assume that the line that goes through the points at both ends of the window
618 * is a good approximation. Naming those points P1 and P2, we get
619 *
620 * SOF = (SOF2 - SOF1) / (STC2 - STC1) * PTS
621 * + (SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1)
622 *
623 * or
624 *
625 * SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1)
626 *
627 * to avoid losing precision in the division. Similarly, the host timestamp is
628 * computed with
629 *
630 * TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
631 *
632 * SOF values are coded on 11 bits by USB. We extend their precision with 16
633 * decimal bits, leading to a 11.16 coding.
634 *
635 * TODO: To avoid surprises with device clock values, PTS/STC timestamps should
636 * be normalized using the nominal device clock frequency reported through the
637 * UVC descriptors.
638 *
639 * Both the PTS/STC and SOF counters roll over, after a fixed but device
640 * specific amount of time for PTS/STC and after 2048ms for SOF. As long as the
641 * sliding window size is smaller than the rollover period, differences computed
642 * on unsigned integers will produce the correct result. However, the p term in
643 * the linear relations will be miscomputed.
644 *
645 * To fix the issue, we subtract a constant from the PTS and STC values to bring
646 * PTS to half the 32 bit STC range. The sliding window STC values then fit into
647 * the 32 bit range without any rollover.
648 *
649 * Similarly, we add 2048 to the device SOF values to make sure that the SOF
650 * computed by (1) will never be smaller than 0. This offset is then compensated
651 * by adding 2048 to the SOF values used in (2). However, this doesn't prevent
652 * rollovers between (1) and (2): the SOF value computed by (1) can be slightly
653 * lower than 4096, and the host SOF counters can have rolled over to 2048. This
654 * case is handled by subtracting 2048 from the SOF value if it exceeds the host
655 * SOF value at the end of the sliding window.
656 *
657 * Finally we subtract a constant from the host timestamps to bring the first
658 * timestamp of the sliding window to 1s.
659 */
uvc_video_clock_update(struct uvc_streaming * stream,struct vb2_v4l2_buffer * vbuf,struct uvc_buffer * buf)660 void uvc_video_clock_update(struct uvc_streaming *stream,
661 struct vb2_v4l2_buffer *vbuf,
662 struct uvc_buffer *buf)
663 {
664 struct uvc_clock *clock = &stream->clock;
665 struct uvc_clock_sample *first;
666 struct uvc_clock_sample *last;
667 unsigned long flags;
668 u64 timestamp;
669 u32 delta_stc;
670 u32 y1, y2;
671 u32 x1, x2;
672 u32 mean;
673 u32 sof;
674 u64 y;
675
676 if (!uvc_hw_timestamps_param)
677 return;
678
679 spin_lock_irqsave(&clock->lock, flags);
680
681 if (clock->count < clock->size)
682 goto done;
683
684 first = &clock->samples[clock->head];
685 last = &clock->samples[(clock->head - 1) % clock->size];
686
687 /* First step, PTS to SOF conversion. */
688 delta_stc = buf->pts - (1UL << 31);
689 x1 = first->dev_stc - delta_stc;
690 x2 = last->dev_stc - delta_stc;
691 if (x1 == x2)
692 goto done;
693
694 y1 = (first->dev_sof + 2048) << 16;
695 y2 = (last->dev_sof + 2048) << 16;
696 if (y2 < y1)
697 y2 += 2048 << 16;
698
699 y = (u64)(y2 - y1) * (1ULL << 31) + (u64)y1 * (u64)x2
700 - (u64)y2 * (u64)x1;
701 y = div_u64(y, x2 - x1);
702
703 sof = y;
704
705 uvc_trace(UVC_TRACE_CLOCK, "%s: PTS %u y %llu.%06llu SOF %u.%06llu "
706 "(x1 %u x2 %u y1 %u y2 %u SOF offset %u)\n",
707 stream->dev->name, buf->pts,
708 y >> 16, div_u64((y & 0xffff) * 1000000, 65536),
709 sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
710 x1, x2, y1, y2, clock->sof_offset);
711
712 /* Second step, SOF to host clock conversion. */
713 x1 = (uvc_video_clock_host_sof(first) + 2048) << 16;
714 x2 = (uvc_video_clock_host_sof(last) + 2048) << 16;
715 if (x2 < x1)
716 x2 += 2048 << 16;
717 if (x1 == x2)
718 goto done;
719
720 y1 = NSEC_PER_SEC;
721 y2 = (u32)ktime_to_ns(ktime_sub(last->host_time, first->host_time)) + y1;
722
723 /* Interpolated and host SOF timestamps can wrap around at slightly
724 * different times. Handle this by adding or removing 2048 to or from
725 * the computed SOF value to keep it close to the SOF samples mean
726 * value.
727 */
728 mean = (x1 + x2) / 2;
729 if (mean - (1024 << 16) > sof)
730 sof += 2048 << 16;
731 else if (sof > mean + (1024 << 16))
732 sof -= 2048 << 16;
733
734 y = (u64)(y2 - y1) * (u64)sof + (u64)y1 * (u64)x2
735 - (u64)y2 * (u64)x1;
736 y = div_u64(y, x2 - x1);
737
738 timestamp = ktime_to_ns(first->host_time) + y - y1;
739
740 uvc_trace(UVC_TRACE_CLOCK, "%s: SOF %u.%06llu y %llu ts %llu "
741 "buf ts %llu (x1 %u/%u/%u x2 %u/%u/%u y1 %u y2 %u)\n",
742 stream->dev->name,
743 sof >> 16, div_u64(((u64)sof & 0xffff) * 1000000LLU, 65536),
744 y, timestamp, vbuf->vb2_buf.timestamp,
745 x1, first->host_sof, first->dev_sof,
746 x2, last->host_sof, last->dev_sof, y1, y2);
747
748 /* Update the V4L2 buffer. */
749 vbuf->vb2_buf.timestamp = timestamp;
750
751 done:
752 spin_unlock_irqrestore(&clock->lock, flags);
753 }
754
755 /* ------------------------------------------------------------------------
756 * Stream statistics
757 */
758
uvc_video_stats_decode(struct uvc_streaming * stream,const u8 * data,int len)759 static void uvc_video_stats_decode(struct uvc_streaming *stream,
760 const u8 *data, int len)
761 {
762 unsigned int header_size;
763 bool has_pts = false;
764 bool has_scr = false;
765 u16 uninitialized_var(scr_sof);
766 u32 uninitialized_var(scr_stc);
767 u32 uninitialized_var(pts);
768
769 if (stream->stats.stream.nb_frames == 0 &&
770 stream->stats.frame.nb_packets == 0)
771 stream->stats.stream.start_ts = ktime_get();
772
773 switch (data[1] & (UVC_STREAM_PTS | UVC_STREAM_SCR)) {
774 case UVC_STREAM_PTS | UVC_STREAM_SCR:
775 header_size = 12;
776 has_pts = true;
777 has_scr = true;
778 break;
779 case UVC_STREAM_PTS:
780 header_size = 6;
781 has_pts = true;
782 break;
783 case UVC_STREAM_SCR:
784 header_size = 8;
785 has_scr = true;
786 break;
787 default:
788 header_size = 2;
789 break;
790 }
791
792 /* Check for invalid headers. */
793 if (len < header_size || data[0] < header_size) {
794 stream->stats.frame.nb_invalid++;
795 return;
796 }
797
798 /* Extract the timestamps. */
799 if (has_pts)
800 pts = get_unaligned_le32(&data[2]);
801
802 if (has_scr) {
803 scr_stc = get_unaligned_le32(&data[header_size - 6]);
804 scr_sof = get_unaligned_le16(&data[header_size - 2]);
805 }
806
807 /* Is PTS constant through the whole frame ? */
808 if (has_pts && stream->stats.frame.nb_pts) {
809 if (stream->stats.frame.pts != pts) {
810 stream->stats.frame.nb_pts_diffs++;
811 stream->stats.frame.last_pts_diff =
812 stream->stats.frame.nb_packets;
813 }
814 }
815
816 if (has_pts) {
817 stream->stats.frame.nb_pts++;
818 stream->stats.frame.pts = pts;
819 }
820
821 /* Do all frames have a PTS in their first non-empty packet, or before
822 * their first empty packet ?
823 */
824 if (stream->stats.frame.size == 0) {
825 if (len > header_size)
826 stream->stats.frame.has_initial_pts = has_pts;
827 if (len == header_size && has_pts)
828 stream->stats.frame.has_early_pts = true;
829 }
830
831 /* Do the SCR.STC and SCR.SOF fields vary through the frame ? */
832 if (has_scr && stream->stats.frame.nb_scr) {
833 if (stream->stats.frame.scr_stc != scr_stc)
834 stream->stats.frame.nb_scr_diffs++;
835 }
836
837 if (has_scr) {
838 /* Expand the SOF counter to 32 bits and store its value. */
839 if (stream->stats.stream.nb_frames > 0 ||
840 stream->stats.frame.nb_scr > 0)
841 stream->stats.stream.scr_sof_count +=
842 (scr_sof - stream->stats.stream.scr_sof) % 2048;
843 stream->stats.stream.scr_sof = scr_sof;
844
845 stream->stats.frame.nb_scr++;
846 stream->stats.frame.scr_stc = scr_stc;
847 stream->stats.frame.scr_sof = scr_sof;
848
849 if (scr_sof < stream->stats.stream.min_sof)
850 stream->stats.stream.min_sof = scr_sof;
851 if (scr_sof > stream->stats.stream.max_sof)
852 stream->stats.stream.max_sof = scr_sof;
853 }
854
855 /* Record the first non-empty packet number. */
856 if (stream->stats.frame.size == 0 && len > header_size)
857 stream->stats.frame.first_data = stream->stats.frame.nb_packets;
858
859 /* Update the frame size. */
860 stream->stats.frame.size += len - header_size;
861
862 /* Update the packets counters. */
863 stream->stats.frame.nb_packets++;
864 if (len <= header_size)
865 stream->stats.frame.nb_empty++;
866
867 if (data[1] & UVC_STREAM_ERR)
868 stream->stats.frame.nb_errors++;
869 }
870
uvc_video_stats_update(struct uvc_streaming * stream)871 static void uvc_video_stats_update(struct uvc_streaming *stream)
872 {
873 struct uvc_stats_frame *frame = &stream->stats.frame;
874
875 uvc_trace(UVC_TRACE_STATS, "frame %u stats: %u/%u/%u packets, "
876 "%u/%u/%u pts (%searly %sinitial), %u/%u scr, "
877 "last pts/stc/sof %u/%u/%u\n",
878 stream->sequence, frame->first_data,
879 frame->nb_packets - frame->nb_empty, frame->nb_packets,
880 frame->nb_pts_diffs, frame->last_pts_diff, frame->nb_pts,
881 frame->has_early_pts ? "" : "!",
882 frame->has_initial_pts ? "" : "!",
883 frame->nb_scr_diffs, frame->nb_scr,
884 frame->pts, frame->scr_stc, frame->scr_sof);
885
886 stream->stats.stream.nb_frames++;
887 stream->stats.stream.nb_packets += stream->stats.frame.nb_packets;
888 stream->stats.stream.nb_empty += stream->stats.frame.nb_empty;
889 stream->stats.stream.nb_errors += stream->stats.frame.nb_errors;
890 stream->stats.stream.nb_invalid += stream->stats.frame.nb_invalid;
891
892 if (frame->has_early_pts)
893 stream->stats.stream.nb_pts_early++;
894 if (frame->has_initial_pts)
895 stream->stats.stream.nb_pts_initial++;
896 if (frame->last_pts_diff <= frame->first_data)
897 stream->stats.stream.nb_pts_constant++;
898 if (frame->nb_scr >= frame->nb_packets - frame->nb_empty)
899 stream->stats.stream.nb_scr_count_ok++;
900 if (frame->nb_scr_diffs + 1 == frame->nb_scr)
901 stream->stats.stream.nb_scr_diffs_ok++;
902
903 memset(&stream->stats.frame, 0, sizeof(stream->stats.frame));
904 }
905
uvc_video_stats_dump(struct uvc_streaming * stream,char * buf,size_t size)906 size_t uvc_video_stats_dump(struct uvc_streaming *stream, char *buf,
907 size_t size)
908 {
909 unsigned int scr_sof_freq;
910 unsigned int duration;
911 size_t count = 0;
912
913 /* Compute the SCR.SOF frequency estimate. At the nominal 1kHz SOF
914 * frequency this will not overflow before more than 1h.
915 */
916 duration = ktime_ms_delta(stream->stats.stream.stop_ts,
917 stream->stats.stream.start_ts);
918 if (duration != 0)
919 scr_sof_freq = stream->stats.stream.scr_sof_count * 1000
920 / duration;
921 else
922 scr_sof_freq = 0;
923
924 count += scnprintf(buf + count, size - count,
925 "frames: %u\npackets: %u\nempty: %u\n"
926 "errors: %u\ninvalid: %u\n",
927 stream->stats.stream.nb_frames,
928 stream->stats.stream.nb_packets,
929 stream->stats.stream.nb_empty,
930 stream->stats.stream.nb_errors,
931 stream->stats.stream.nb_invalid);
932 count += scnprintf(buf + count, size - count,
933 "pts: %u early, %u initial, %u ok\n",
934 stream->stats.stream.nb_pts_early,
935 stream->stats.stream.nb_pts_initial,
936 stream->stats.stream.nb_pts_constant);
937 count += scnprintf(buf + count, size - count,
938 "scr: %u count ok, %u diff ok\n",
939 stream->stats.stream.nb_scr_count_ok,
940 stream->stats.stream.nb_scr_diffs_ok);
941 count += scnprintf(buf + count, size - count,
942 "sof: %u <= sof <= %u, freq %u.%03u kHz\n",
943 stream->stats.stream.min_sof,
944 stream->stats.stream.max_sof,
945 scr_sof_freq / 1000, scr_sof_freq % 1000);
946
947 return count;
948 }
949
uvc_video_stats_start(struct uvc_streaming * stream)950 static void uvc_video_stats_start(struct uvc_streaming *stream)
951 {
952 memset(&stream->stats, 0, sizeof(stream->stats));
953 stream->stats.stream.min_sof = 2048;
954 }
955
uvc_video_stats_stop(struct uvc_streaming * stream)956 static void uvc_video_stats_stop(struct uvc_streaming *stream)
957 {
958 stream->stats.stream.stop_ts = ktime_get();
959 }
960
961 /* ------------------------------------------------------------------------
962 * Video codecs
963 */
964
965 /* Video payload decoding is handled by uvc_video_decode_start(),
966 * uvc_video_decode_data() and uvc_video_decode_end().
967 *
968 * uvc_video_decode_start is called with URB data at the start of a bulk or
969 * isochronous payload. It processes header data and returns the header size
970 * in bytes if successful. If an error occurs, it returns a negative error
971 * code. The following error codes have special meanings.
972 *
973 * - EAGAIN informs the caller that the current video buffer should be marked
974 * as done, and that the function should be called again with the same data
975 * and a new video buffer. This is used when end of frame conditions can be
976 * reliably detected at the beginning of the next frame only.
977 *
978 * If an error other than -EAGAIN is returned, the caller will drop the current
979 * payload. No call to uvc_video_decode_data and uvc_video_decode_end will be
980 * made until the next payload. -ENODATA can be used to drop the current
981 * payload if no other error code is appropriate.
982 *
983 * uvc_video_decode_data is called for every URB with URB data. It copies the
984 * data to the video buffer.
985 *
986 * uvc_video_decode_end is called with header data at the end of a bulk or
987 * isochronous payload. It performs any additional header data processing and
988 * returns 0 or a negative error code if an error occurred. As header data have
989 * already been processed by uvc_video_decode_start, this functions isn't
990 * required to perform sanity checks a second time.
991 *
992 * For isochronous transfers where a payload is always transferred in a single
993 * URB, the three functions will be called in a row.
994 *
995 * To let the decoder process header data and update its internal state even
996 * when no video buffer is available, uvc_video_decode_start must be prepared
997 * to be called with a NULL buf parameter. uvc_video_decode_data and
998 * uvc_video_decode_end will never be called with a NULL buffer.
999 */
uvc_video_decode_start(struct uvc_streaming * stream,struct uvc_buffer * buf,const u8 * data,int len)1000 static int uvc_video_decode_start(struct uvc_streaming *stream,
1001 struct uvc_buffer *buf, const u8 *data, int len)
1002 {
1003 u8 fid;
1004
1005 /* Sanity checks:
1006 * - packet must be at least 2 bytes long
1007 * - bHeaderLength value must be at least 2 bytes (see above)
1008 * - bHeaderLength value can't be larger than the packet size.
1009 */
1010 if (len < 2 || data[0] < 2 || data[0] > len) {
1011 stream->stats.frame.nb_invalid++;
1012 return -EINVAL;
1013 }
1014
1015 fid = data[1] & UVC_STREAM_FID;
1016
1017 /* Increase the sequence number regardless of any buffer states, so
1018 * that discontinuous sequence numbers always indicate lost frames.
1019 */
1020 if (stream->last_fid != fid) {
1021 stream->sequence++;
1022 if (stream->sequence)
1023 uvc_video_stats_update(stream);
1024 }
1025
1026 uvc_video_clock_decode(stream, buf, data, len);
1027 uvc_video_stats_decode(stream, data, len);
1028
1029 /* Store the payload FID bit and return immediately when the buffer is
1030 * NULL.
1031 */
1032 if (buf == NULL) {
1033 stream->last_fid = fid;
1034 return -ENODATA;
1035 }
1036
1037 /* Mark the buffer as bad if the error bit is set. */
1038 if (data[1] & UVC_STREAM_ERR) {
1039 uvc_trace(UVC_TRACE_FRAME, "Marking buffer as bad (error bit "
1040 "set).\n");
1041 buf->error = 1;
1042 }
1043
1044 /* Synchronize to the input stream by waiting for the FID bit to be
1045 * toggled when the the buffer state is not UVC_BUF_STATE_ACTIVE.
1046 * stream->last_fid is initialized to -1, so the first isochronous
1047 * frame will always be in sync.
1048 *
1049 * If the device doesn't toggle the FID bit, invert stream->last_fid
1050 * when the EOF bit is set to force synchronisation on the next packet.
1051 */
1052 if (buf->state != UVC_BUF_STATE_ACTIVE) {
1053 if (fid == stream->last_fid) {
1054 uvc_trace(UVC_TRACE_FRAME, "Dropping payload (out of "
1055 "sync).\n");
1056 if ((stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) &&
1057 (data[1] & UVC_STREAM_EOF))
1058 stream->last_fid ^= UVC_STREAM_FID;
1059 return -ENODATA;
1060 }
1061
1062 buf->buf.field = V4L2_FIELD_NONE;
1063 buf->buf.sequence = stream->sequence;
1064 buf->buf.vb2_buf.timestamp = ktime_to_ns(uvc_video_get_time());
1065
1066 /* TODO: Handle PTS and SCR. */
1067 buf->state = UVC_BUF_STATE_ACTIVE;
1068 }
1069
1070 /* Mark the buffer as done if we're at the beginning of a new frame.
1071 * End of frame detection is better implemented by checking the EOF
1072 * bit (FID bit toggling is delayed by one frame compared to the EOF
1073 * bit), but some devices don't set the bit at end of frame (and the
1074 * last payload can be lost anyway). We thus must check if the FID has
1075 * been toggled.
1076 *
1077 * stream->last_fid is initialized to -1, so the first isochronous
1078 * frame will never trigger an end of frame detection.
1079 *
1080 * Empty buffers (bytesused == 0) don't trigger end of frame detection
1081 * as it doesn't make sense to return an empty buffer. This also
1082 * avoids detecting end of frame conditions at FID toggling if the
1083 * previous payload had the EOF bit set.
1084 */
1085 if (fid != stream->last_fid && buf->bytesused != 0) {
1086 uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit "
1087 "toggled).\n");
1088 buf->state = UVC_BUF_STATE_READY;
1089 return -EAGAIN;
1090 }
1091
1092 stream->last_fid = fid;
1093
1094 return data[0];
1095 }
1096
uvc_video_decode_data(struct uvc_streaming * stream,struct uvc_buffer * buf,const u8 * data,int len)1097 static void uvc_video_decode_data(struct uvc_streaming *stream,
1098 struct uvc_buffer *buf, const u8 *data, int len)
1099 {
1100 unsigned int maxlen, nbytes;
1101 void *mem;
1102
1103 if (len <= 0)
1104 return;
1105
1106 /* Copy the video data to the buffer. */
1107 maxlen = buf->length - buf->bytesused;
1108 mem = buf->mem + buf->bytesused;
1109 nbytes = min((unsigned int)len, maxlen);
1110 memcpy(mem, data, nbytes);
1111 buf->bytesused += nbytes;
1112
1113 /* Complete the current frame if the buffer size was exceeded. */
1114 if (len > maxlen) {
1115 uvc_trace(UVC_TRACE_FRAME, "Frame complete (overflow).\n");
1116 buf->error = 1;
1117 buf->state = UVC_BUF_STATE_READY;
1118 }
1119 }
1120
uvc_video_decode_end(struct uvc_streaming * stream,struct uvc_buffer * buf,const u8 * data,int len)1121 static void uvc_video_decode_end(struct uvc_streaming *stream,
1122 struct uvc_buffer *buf, const u8 *data, int len)
1123 {
1124 /* Mark the buffer as done if the EOF marker is set. */
1125 if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) {
1126 uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
1127 if (data[0] == len)
1128 uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n");
1129 buf->state = UVC_BUF_STATE_READY;
1130 if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID)
1131 stream->last_fid ^= UVC_STREAM_FID;
1132 }
1133 }
1134
1135 /* Video payload encoding is handled by uvc_video_encode_header() and
1136 * uvc_video_encode_data(). Only bulk transfers are currently supported.
1137 *
1138 * uvc_video_encode_header is called at the start of a payload. It adds header
1139 * data to the transfer buffer and returns the header size. As the only known
1140 * UVC output device transfers a whole frame in a single payload, the EOF bit
1141 * is always set in the header.
1142 *
1143 * uvc_video_encode_data is called for every URB and copies the data from the
1144 * video buffer to the transfer buffer.
1145 */
uvc_video_encode_header(struct uvc_streaming * stream,struct uvc_buffer * buf,u8 * data,int len)1146 static int uvc_video_encode_header(struct uvc_streaming *stream,
1147 struct uvc_buffer *buf, u8 *data, int len)
1148 {
1149 data[0] = 2; /* Header length */
1150 data[1] = UVC_STREAM_EOH | UVC_STREAM_EOF
1151 | (stream->last_fid & UVC_STREAM_FID);
1152 return 2;
1153 }
1154
uvc_video_encode_data(struct uvc_streaming * stream,struct uvc_buffer * buf,u8 * data,int len)1155 static int uvc_video_encode_data(struct uvc_streaming *stream,
1156 struct uvc_buffer *buf, u8 *data, int len)
1157 {
1158 struct uvc_video_queue *queue = &stream->queue;
1159 unsigned int nbytes;
1160 void *mem;
1161
1162 /* Copy video data to the URB buffer. */
1163 mem = buf->mem + queue->buf_used;
1164 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
1165 nbytes = min(stream->bulk.max_payload_size - stream->bulk.payload_size,
1166 nbytes);
1167 memcpy(data, mem, nbytes);
1168
1169 queue->buf_used += nbytes;
1170
1171 return nbytes;
1172 }
1173
1174 /* ------------------------------------------------------------------------
1175 * Metadata
1176 */
1177
1178 /*
1179 * Additionally to the payload headers we also want to provide the user with USB
1180 * Frame Numbers and system time values. The resulting buffer is thus composed
1181 * of blocks, containing a 64-bit timestamp in nanoseconds, a 16-bit USB Frame
1182 * Number, and a copy of the payload header.
1183 *
1184 * Ideally we want to capture all payload headers for each frame. However, their
1185 * number is unknown and unbound. We thus drop headers that contain no vendor
1186 * data and that either contain no SCR value or an SCR value identical to the
1187 * previous header.
1188 */
uvc_video_decode_meta(struct uvc_streaming * stream,struct uvc_buffer * meta_buf,const u8 * mem,unsigned int length)1189 static void uvc_video_decode_meta(struct uvc_streaming *stream,
1190 struct uvc_buffer *meta_buf,
1191 const u8 *mem, unsigned int length)
1192 {
1193 struct uvc_meta_buf *meta;
1194 size_t len_std = 2;
1195 bool has_pts, has_scr;
1196 unsigned long flags;
1197 unsigned int sof;
1198 ktime_t time;
1199 const u8 *scr;
1200
1201 if (!meta_buf || length == 2)
1202 return;
1203
1204 if (meta_buf->length - meta_buf->bytesused <
1205 length + sizeof(meta->ns) + sizeof(meta->sof)) {
1206 meta_buf->error = 1;
1207 return;
1208 }
1209
1210 has_pts = mem[1] & UVC_STREAM_PTS;
1211 has_scr = mem[1] & UVC_STREAM_SCR;
1212
1213 if (has_pts) {
1214 len_std += 4;
1215 scr = mem + 6;
1216 } else {
1217 scr = mem + 2;
1218 }
1219
1220 if (has_scr)
1221 len_std += 6;
1222
1223 if (stream->meta.format == V4L2_META_FMT_UVC)
1224 length = len_std;
1225
1226 if (length == len_std && (!has_scr ||
1227 !memcmp(scr, stream->clock.last_scr, 6)))
1228 return;
1229
1230 meta = (struct uvc_meta_buf *)((u8 *)meta_buf->mem + meta_buf->bytesused);
1231 local_irq_save(flags);
1232 time = uvc_video_get_time();
1233 sof = usb_get_current_frame_number(stream->dev->udev);
1234 local_irq_restore(flags);
1235 put_unaligned(ktime_to_ns(time), &meta->ns);
1236 put_unaligned(sof, &meta->sof);
1237
1238 if (has_scr)
1239 memcpy(stream->clock.last_scr, scr, 6);
1240
1241 memcpy(&meta->length, mem, length);
1242 meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
1243
1244 uvc_trace(UVC_TRACE_FRAME,
1245 "%s(): t-sys %lluns, SOF %u, len %u, flags 0x%x, PTS %u, STC %u frame SOF %u\n",
1246 __func__, ktime_to_ns(time), meta->sof, meta->length,
1247 meta->flags,
1248 has_pts ? *(u32 *)meta->buf : 0,
1249 has_scr ? *(u32 *)scr : 0,
1250 has_scr ? *(u32 *)(scr + 4) & 0x7ff : 0);
1251 }
1252
1253 /* ------------------------------------------------------------------------
1254 * URB handling
1255 */
1256
1257 /*
1258 * Set error flag for incomplete buffer.
1259 */
uvc_video_validate_buffer(const struct uvc_streaming * stream,struct uvc_buffer * buf)1260 static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
1261 struct uvc_buffer *buf)
1262 {
1263 if (stream->ctrl.dwMaxVideoFrameSize != buf->bytesused &&
1264 !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED))
1265 buf->error = 1;
1266 }
1267
1268 /*
1269 * Completion handler for video URBs.
1270 */
1271
uvc_video_next_buffers(struct uvc_streaming * stream,struct uvc_buffer ** video_buf,struct uvc_buffer ** meta_buf)1272 static void uvc_video_next_buffers(struct uvc_streaming *stream,
1273 struct uvc_buffer **video_buf, struct uvc_buffer **meta_buf)
1274 {
1275 uvc_video_validate_buffer(stream, *video_buf);
1276
1277 if (*meta_buf) {
1278 struct vb2_v4l2_buffer *vb2_meta = &(*meta_buf)->buf;
1279 const struct vb2_v4l2_buffer *vb2_video = &(*video_buf)->buf;
1280
1281 vb2_meta->sequence = vb2_video->sequence;
1282 vb2_meta->field = vb2_video->field;
1283 vb2_meta->vb2_buf.timestamp = vb2_video->vb2_buf.timestamp;
1284
1285 (*meta_buf)->state = UVC_BUF_STATE_READY;
1286 if (!(*meta_buf)->error)
1287 (*meta_buf)->error = (*video_buf)->error;
1288 *meta_buf = uvc_queue_next_buffer(&stream->meta.queue,
1289 *meta_buf);
1290 }
1291 *video_buf = uvc_queue_next_buffer(&stream->queue, *video_buf);
1292 }
1293
uvc_video_decode_isoc(struct urb * urb,struct uvc_streaming * stream,struct uvc_buffer * buf,struct uvc_buffer * meta_buf)1294 static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
1295 struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
1296 {
1297 u8 *mem;
1298 int ret, i;
1299
1300 for (i = 0; i < urb->number_of_packets; ++i) {
1301 if (urb->iso_frame_desc[i].status < 0) {
1302 uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
1303 "lost (%d).\n", urb->iso_frame_desc[i].status);
1304 /* Mark the buffer as faulty. */
1305 if (buf != NULL)
1306 buf->error = 1;
1307 continue;
1308 }
1309
1310 /* Decode the payload header. */
1311 mem = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
1312 do {
1313 ret = uvc_video_decode_start(stream, buf, mem,
1314 urb->iso_frame_desc[i].actual_length);
1315 if (ret == -EAGAIN)
1316 uvc_video_next_buffers(stream, &buf, &meta_buf);
1317 } while (ret == -EAGAIN);
1318
1319 if (ret < 0)
1320 continue;
1321
1322 uvc_video_decode_meta(stream, meta_buf, mem, ret);
1323
1324 /* Decode the payload data. */
1325 uvc_video_decode_data(stream, buf, mem + ret,
1326 urb->iso_frame_desc[i].actual_length - ret);
1327
1328 /* Process the header again. */
1329 uvc_video_decode_end(stream, buf, mem,
1330 urb->iso_frame_desc[i].actual_length);
1331
1332 if (buf->state == UVC_BUF_STATE_READY)
1333 uvc_video_next_buffers(stream, &buf, &meta_buf);
1334 }
1335 }
1336
uvc_video_decode_bulk(struct urb * urb,struct uvc_streaming * stream,struct uvc_buffer * buf,struct uvc_buffer * meta_buf)1337 static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
1338 struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
1339 {
1340 u8 *mem;
1341 int len, ret;
1342
1343 /*
1344 * Ignore ZLPs if they're not part of a frame, otherwise process them
1345 * to trigger the end of payload detection.
1346 */
1347 if (urb->actual_length == 0 && stream->bulk.header_size == 0)
1348 return;
1349
1350 mem = urb->transfer_buffer;
1351 len = urb->actual_length;
1352 stream->bulk.payload_size += len;
1353
1354 /* If the URB is the first of its payload, decode and save the
1355 * header.
1356 */
1357 if (stream->bulk.header_size == 0 && !stream->bulk.skip_payload) {
1358 do {
1359 ret = uvc_video_decode_start(stream, buf, mem, len);
1360 if (ret == -EAGAIN)
1361 uvc_video_next_buffers(stream, &buf, &meta_buf);
1362 } while (ret == -EAGAIN);
1363
1364 /* If an error occurred skip the rest of the payload. */
1365 if (ret < 0 || buf == NULL) {
1366 stream->bulk.skip_payload = 1;
1367 } else {
1368 memcpy(stream->bulk.header, mem, ret);
1369 stream->bulk.header_size = ret;
1370
1371 uvc_video_decode_meta(stream, meta_buf, mem, ret);
1372
1373 mem += ret;
1374 len -= ret;
1375 }
1376 }
1377
1378 /* The buffer queue might have been cancelled while a bulk transfer
1379 * was in progress, so we can reach here with buf equal to NULL. Make
1380 * sure buf is never dereferenced if NULL.
1381 */
1382
1383 /* Process video data. */
1384 if (!stream->bulk.skip_payload && buf != NULL)
1385 uvc_video_decode_data(stream, buf, mem, len);
1386
1387 /* Detect the payload end by a URB smaller than the maximum size (or
1388 * a payload size equal to the maximum) and process the header again.
1389 */
1390 if (urb->actual_length < urb->transfer_buffer_length ||
1391 stream->bulk.payload_size >= stream->bulk.max_payload_size) {
1392 if (!stream->bulk.skip_payload && buf != NULL) {
1393 uvc_video_decode_end(stream, buf, stream->bulk.header,
1394 stream->bulk.payload_size);
1395 if (buf->state == UVC_BUF_STATE_READY)
1396 uvc_video_next_buffers(stream, &buf, &meta_buf);
1397 }
1398
1399 stream->bulk.header_size = 0;
1400 stream->bulk.skip_payload = 0;
1401 stream->bulk.payload_size = 0;
1402 }
1403 }
1404
uvc_video_encode_bulk(struct urb * urb,struct uvc_streaming * stream,struct uvc_buffer * buf,struct uvc_buffer * meta_buf)1405 static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
1406 struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
1407 {
1408 u8 *mem = urb->transfer_buffer;
1409 int len = stream->urb_size, ret;
1410
1411 if (buf == NULL) {
1412 urb->transfer_buffer_length = 0;
1413 return;
1414 }
1415
1416 /* If the URB is the first of its payload, add the header. */
1417 if (stream->bulk.header_size == 0) {
1418 ret = uvc_video_encode_header(stream, buf, mem, len);
1419 stream->bulk.header_size = ret;
1420 stream->bulk.payload_size += ret;
1421 mem += ret;
1422 len -= ret;
1423 }
1424
1425 /* Process video data. */
1426 ret = uvc_video_encode_data(stream, buf, mem, len);
1427
1428 stream->bulk.payload_size += ret;
1429 len -= ret;
1430
1431 if (buf->bytesused == stream->queue.buf_used ||
1432 stream->bulk.payload_size == stream->bulk.max_payload_size) {
1433 if (buf->bytesused == stream->queue.buf_used) {
1434 stream->queue.buf_used = 0;
1435 buf->state = UVC_BUF_STATE_READY;
1436 buf->buf.sequence = ++stream->sequence;
1437 uvc_queue_next_buffer(&stream->queue, buf);
1438 stream->last_fid ^= UVC_STREAM_FID;
1439 }
1440
1441 stream->bulk.header_size = 0;
1442 stream->bulk.payload_size = 0;
1443 }
1444
1445 urb->transfer_buffer_length = stream->urb_size - len;
1446 }
1447
uvc_video_complete(struct urb * urb)1448 static void uvc_video_complete(struct urb *urb)
1449 {
1450 struct uvc_streaming *stream = urb->context;
1451 struct uvc_video_queue *queue = &stream->queue;
1452 struct uvc_video_queue *qmeta = &stream->meta.queue;
1453 struct vb2_queue *vb2_qmeta = stream->meta.vdev.queue;
1454 struct uvc_buffer *buf = NULL;
1455 struct uvc_buffer *buf_meta = NULL;
1456 unsigned long flags;
1457 int ret;
1458
1459 switch (urb->status) {
1460 case 0:
1461 break;
1462
1463 default:
1464 uvc_printk(KERN_WARNING, "Non-zero status (%d) in video "
1465 "completion handler.\n", urb->status);
1466 /* fall through */
1467 case -ENOENT: /* usb_kill_urb() called. */
1468 if (stream->frozen)
1469 return;
1470 /* fall through */
1471 case -ECONNRESET: /* usb_unlink_urb() called. */
1472 case -ESHUTDOWN: /* The endpoint is being disabled. */
1473 uvc_queue_cancel(queue, urb->status == -ESHUTDOWN);
1474 if (vb2_qmeta)
1475 uvc_queue_cancel(qmeta, urb->status == -ESHUTDOWN);
1476 return;
1477 }
1478
1479 spin_lock_irqsave(&queue->irqlock, flags);
1480 if (!list_empty(&queue->irqqueue))
1481 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
1482 queue);
1483 spin_unlock_irqrestore(&queue->irqlock, flags);
1484
1485 if (vb2_qmeta) {
1486 spin_lock_irqsave(&qmeta->irqlock, flags);
1487 if (!list_empty(&qmeta->irqqueue))
1488 buf_meta = list_first_entry(&qmeta->irqqueue,
1489 struct uvc_buffer, queue);
1490 spin_unlock_irqrestore(&qmeta->irqlock, flags);
1491 }
1492
1493 stream->decode(urb, stream, buf, buf_meta);
1494
1495 if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
1496 uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
1497 ret);
1498 }
1499 }
1500
1501 /*
1502 * Free transfer buffers.
1503 */
uvc_free_urb_buffers(struct uvc_streaming * stream)1504 static void uvc_free_urb_buffers(struct uvc_streaming *stream)
1505 {
1506 unsigned int i;
1507
1508 for (i = 0; i < UVC_URBS; ++i) {
1509 if (stream->urb_buffer[i]) {
1510 #ifndef CONFIG_DMA_NONCOHERENT
1511 usb_free_coherent(stream->dev->udev, stream->urb_size,
1512 stream->urb_buffer[i], stream->urb_dma[i]);
1513 #else
1514 kfree(stream->urb_buffer[i]);
1515 #endif
1516 stream->urb_buffer[i] = NULL;
1517 }
1518 }
1519
1520 stream->urb_size = 0;
1521 }
1522
1523 /*
1524 * Allocate transfer buffers. This function can be called with buffers
1525 * already allocated when resuming from suspend, in which case it will
1526 * return without touching the buffers.
1527 *
1528 * Limit the buffer size to UVC_MAX_PACKETS bulk/isochronous packets. If the
1529 * system is too low on memory try successively smaller numbers of packets
1530 * until allocation succeeds.
1531 *
1532 * Return the number of allocated packets on success or 0 when out of memory.
1533 */
uvc_alloc_urb_buffers(struct uvc_streaming * stream,unsigned int size,unsigned int psize,gfp_t gfp_flags)1534 static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
1535 unsigned int size, unsigned int psize, gfp_t gfp_flags)
1536 {
1537 unsigned int npackets;
1538 unsigned int i;
1539
1540 /* Buffers are already allocated, bail out. */
1541 if (stream->urb_size)
1542 return stream->urb_size / psize;
1543
1544 /* Compute the number of packets. Bulk endpoints might transfer UVC
1545 * payloads across multiple URBs.
1546 */
1547 npackets = DIV_ROUND_UP(size, psize);
1548 if (npackets > UVC_MAX_PACKETS)
1549 npackets = UVC_MAX_PACKETS;
1550
1551 /* Retry allocations until one succeed. */
1552 for (; npackets > 1; npackets /= 2) {
1553 for (i = 0; i < UVC_URBS; ++i) {
1554 stream->urb_size = psize * npackets;
1555 #ifndef CONFIG_DMA_NONCOHERENT
1556 stream->urb_buffer[i] = usb_alloc_coherent(
1557 stream->dev->udev, stream->urb_size,
1558 gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
1559 #else
1560 stream->urb_buffer[i] =
1561 kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
1562 #endif
1563 if (!stream->urb_buffer[i]) {
1564 uvc_free_urb_buffers(stream);
1565 break;
1566 }
1567 }
1568
1569 if (i == UVC_URBS) {
1570 uvc_trace(UVC_TRACE_VIDEO, "Allocated %u URB buffers "
1571 "of %ux%u bytes each.\n", UVC_URBS, npackets,
1572 psize);
1573 return npackets;
1574 }
1575 }
1576
1577 uvc_trace(UVC_TRACE_VIDEO, "Failed to allocate URB buffers (%u bytes "
1578 "per packet).\n", psize);
1579 return 0;
1580 }
1581
1582 /*
1583 * Uninitialize isochronous/bulk URBs and free transfer buffers.
1584 */
uvc_uninit_video(struct uvc_streaming * stream,int free_buffers)1585 static void uvc_uninit_video(struct uvc_streaming *stream, int free_buffers)
1586 {
1587 struct urb *urb;
1588 unsigned int i;
1589
1590 uvc_video_stats_stop(stream);
1591
1592 for (i = 0; i < UVC_URBS; ++i) {
1593 urb = stream->urb[i];
1594 if (urb == NULL)
1595 continue;
1596
1597 usb_kill_urb(urb);
1598 usb_free_urb(urb);
1599 stream->urb[i] = NULL;
1600 }
1601
1602 if (free_buffers)
1603 uvc_free_urb_buffers(stream);
1604 }
1605
1606 /*
1607 * Compute the maximum number of bytes per interval for an endpoint.
1608 */
uvc_endpoint_max_bpi(struct usb_device * dev,struct usb_host_endpoint * ep)1609 static unsigned int uvc_endpoint_max_bpi(struct usb_device *dev,
1610 struct usb_host_endpoint *ep)
1611 {
1612 u16 psize;
1613 u16 mult;
1614
1615 switch (dev->speed) {
1616 case USB_SPEED_SUPER:
1617 case USB_SPEED_SUPER_PLUS:
1618 return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
1619 case USB_SPEED_HIGH:
1620 psize = usb_endpoint_maxp(&ep->desc);
1621 mult = usb_endpoint_maxp_mult(&ep->desc);
1622 return psize * mult;
1623 case USB_SPEED_WIRELESS:
1624 psize = usb_endpoint_maxp(&ep->desc);
1625 return psize;
1626 default:
1627 psize = usb_endpoint_maxp(&ep->desc);
1628 return psize;
1629 }
1630 }
1631
1632 /*
1633 * Initialize isochronous URBs and allocate transfer buffers. The packet size
1634 * is given by the endpoint.
1635 */
uvc_init_video_isoc(struct uvc_streaming * stream,struct usb_host_endpoint * ep,gfp_t gfp_flags)1636 static int uvc_init_video_isoc(struct uvc_streaming *stream,
1637 struct usb_host_endpoint *ep, gfp_t gfp_flags)
1638 {
1639 struct urb *urb;
1640 unsigned int npackets, i, j;
1641 u16 psize;
1642 u32 size;
1643
1644 psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
1645 size = stream->ctrl.dwMaxVideoFrameSize;
1646
1647 npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
1648 if (npackets == 0)
1649 return -ENOMEM;
1650
1651 size = npackets * psize;
1652
1653 for (i = 0; i < UVC_URBS; ++i) {
1654 urb = usb_alloc_urb(npackets, gfp_flags);
1655 if (urb == NULL) {
1656 uvc_uninit_video(stream, 1);
1657 return -ENOMEM;
1658 }
1659
1660 urb->dev = stream->dev->udev;
1661 urb->context = stream;
1662 urb->pipe = usb_rcvisocpipe(stream->dev->udev,
1663 ep->desc.bEndpointAddress);
1664 #ifndef CONFIG_DMA_NONCOHERENT
1665 urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
1666 urb->transfer_dma = stream->urb_dma[i];
1667 #else
1668 urb->transfer_flags = URB_ISO_ASAP;
1669 #endif
1670 urb->interval = ep->desc.bInterval;
1671 urb->transfer_buffer = stream->urb_buffer[i];
1672 urb->complete = uvc_video_complete;
1673 urb->number_of_packets = npackets;
1674 urb->transfer_buffer_length = size;
1675
1676 for (j = 0; j < npackets; ++j) {
1677 urb->iso_frame_desc[j].offset = j * psize;
1678 urb->iso_frame_desc[j].length = psize;
1679 }
1680
1681 stream->urb[i] = urb;
1682 }
1683
1684 return 0;
1685 }
1686
1687 /*
1688 * Initialize bulk URBs and allocate transfer buffers. The packet size is
1689 * given by the endpoint.
1690 */
uvc_init_video_bulk(struct uvc_streaming * stream,struct usb_host_endpoint * ep,gfp_t gfp_flags)1691 static int uvc_init_video_bulk(struct uvc_streaming *stream,
1692 struct usb_host_endpoint *ep, gfp_t gfp_flags)
1693 {
1694 struct urb *urb;
1695 unsigned int npackets, pipe, i;
1696 u16 psize;
1697 u32 size;
1698
1699 psize = usb_endpoint_maxp(&ep->desc);
1700 size = stream->ctrl.dwMaxPayloadTransferSize;
1701 stream->bulk.max_payload_size = size;
1702
1703 npackets = uvc_alloc_urb_buffers(stream, size, psize, gfp_flags);
1704 if (npackets == 0)
1705 return -ENOMEM;
1706
1707 size = npackets * psize;
1708
1709 if (usb_endpoint_dir_in(&ep->desc))
1710 pipe = usb_rcvbulkpipe(stream->dev->udev,
1711 ep->desc.bEndpointAddress);
1712 else
1713 pipe = usb_sndbulkpipe(stream->dev->udev,
1714 ep->desc.bEndpointAddress);
1715
1716 if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1717 size = 0;
1718
1719 for (i = 0; i < UVC_URBS; ++i) {
1720 urb = usb_alloc_urb(0, gfp_flags);
1721 if (urb == NULL) {
1722 uvc_uninit_video(stream, 1);
1723 return -ENOMEM;
1724 }
1725
1726 usb_fill_bulk_urb(urb, stream->dev->udev, pipe,
1727 stream->urb_buffer[i], size, uvc_video_complete,
1728 stream);
1729 #ifndef CONFIG_DMA_NONCOHERENT
1730 urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1731 urb->transfer_dma = stream->urb_dma[i];
1732 #endif
1733
1734 stream->urb[i] = urb;
1735 }
1736
1737 return 0;
1738 }
1739
1740 /*
1741 * Initialize isochronous/bulk URBs and allocate transfer buffers.
1742 */
uvc_init_video(struct uvc_streaming * stream,gfp_t gfp_flags)1743 static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
1744 {
1745 struct usb_interface *intf = stream->intf;
1746 struct usb_host_endpoint *ep;
1747 unsigned int i;
1748 int ret;
1749
1750 stream->sequence = -1;
1751 stream->last_fid = -1;
1752 stream->bulk.header_size = 0;
1753 stream->bulk.skip_payload = 0;
1754 stream->bulk.payload_size = 0;
1755
1756 uvc_video_stats_start(stream);
1757
1758 if (intf->num_altsetting > 1) {
1759 struct usb_host_endpoint *best_ep = NULL;
1760 unsigned int best_psize = UINT_MAX;
1761 unsigned int bandwidth;
1762 unsigned int uninitialized_var(altsetting);
1763 int intfnum = stream->intfnum;
1764
1765 /* Isochronous endpoint, select the alternate setting. */
1766 bandwidth = stream->ctrl.dwMaxPayloadTransferSize;
1767
1768 if (bandwidth == 0) {
1769 uvc_trace(UVC_TRACE_VIDEO, "Device requested null "
1770 "bandwidth, defaulting to lowest.\n");
1771 bandwidth = 1;
1772 } else {
1773 uvc_trace(UVC_TRACE_VIDEO, "Device requested %u "
1774 "B/frame bandwidth.\n", bandwidth);
1775 }
1776
1777 for (i = 0; i < intf->num_altsetting; ++i) {
1778 struct usb_host_interface *alts;
1779 unsigned int psize;
1780
1781 alts = &intf->altsetting[i];
1782 ep = uvc_find_endpoint(alts,
1783 stream->header.bEndpointAddress);
1784 if (ep == NULL)
1785 continue;
1786
1787 /* Check if the bandwidth is high enough. */
1788 psize = uvc_endpoint_max_bpi(stream->dev->udev, ep);
1789 if (psize >= bandwidth && psize <= best_psize) {
1790 altsetting = alts->desc.bAlternateSetting;
1791 best_psize = psize;
1792 best_ep = ep;
1793 }
1794 }
1795
1796 if (best_ep == NULL) {
1797 uvc_trace(UVC_TRACE_VIDEO, "No fast enough alt setting "
1798 "for requested bandwidth.\n");
1799 return -EIO;
1800 }
1801
1802 uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
1803 "(%u B/frame bandwidth).\n", altsetting, best_psize);
1804
1805 ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
1806 if (ret < 0)
1807 return ret;
1808
1809 ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
1810 } else {
1811 /* Bulk endpoint, proceed to URB initialization. */
1812 ep = uvc_find_endpoint(&intf->altsetting[0],
1813 stream->header.bEndpointAddress);
1814 if (ep == NULL)
1815 return -EIO;
1816
1817 ret = uvc_init_video_bulk(stream, ep, gfp_flags);
1818 }
1819
1820 if (ret < 0)
1821 return ret;
1822
1823 /* Submit the URBs. */
1824 for (i = 0; i < UVC_URBS; ++i) {
1825 ret = usb_submit_urb(stream->urb[i], gfp_flags);
1826 if (ret < 0) {
1827 uvc_printk(KERN_ERR, "Failed to submit URB %u "
1828 "(%d).\n", i, ret);
1829 uvc_uninit_video(stream, 1);
1830 return ret;
1831 }
1832 }
1833
1834 /* The Logitech C920 temporarily forgets that it should not be adjusting
1835 * Exposure Absolute during init so restore controls to stored values.
1836 */
1837 if (stream->dev->quirks & UVC_QUIRK_RESTORE_CTRLS_ON_INIT)
1838 uvc_ctrl_restore_values(stream->dev);
1839
1840 return 0;
1841 }
1842
1843 /* --------------------------------------------------------------------------
1844 * Suspend/resume
1845 */
1846
1847 /*
1848 * Stop streaming without disabling the video queue.
1849 *
1850 * To let userspace applications resume without trouble, we must not touch the
1851 * video buffers in any way. We mark the device as frozen to make sure the URB
1852 * completion handler won't try to cancel the queue when we kill the URBs.
1853 */
uvc_video_suspend(struct uvc_streaming * stream)1854 int uvc_video_suspend(struct uvc_streaming *stream)
1855 {
1856 if (!uvc_queue_streaming(&stream->queue))
1857 return 0;
1858
1859 stream->frozen = 1;
1860 uvc_uninit_video(stream, 0);
1861 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1862 return 0;
1863 }
1864
1865 /*
1866 * Reconfigure the video interface and restart streaming if it was enabled
1867 * before suspend.
1868 *
1869 * If an error occurs, disable the video queue. This will wake all pending
1870 * buffers, making sure userspace applications are notified of the problem
1871 * instead of waiting forever.
1872 */
uvc_video_resume(struct uvc_streaming * stream,int reset)1873 int uvc_video_resume(struct uvc_streaming *stream, int reset)
1874 {
1875 int ret;
1876
1877 /* If the bus has been reset on resume, set the alternate setting to 0.
1878 * This should be the default value, but some devices crash or otherwise
1879 * misbehave if they don't receive a SET_INTERFACE request before any
1880 * other video control request.
1881 */
1882 if (reset)
1883 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1884
1885 stream->frozen = 0;
1886
1887 uvc_video_clock_reset(stream);
1888
1889 if (!uvc_queue_streaming(&stream->queue))
1890 return 0;
1891
1892 ret = uvc_commit_video(stream, &stream->ctrl);
1893 if (ret < 0)
1894 return ret;
1895
1896 return uvc_init_video(stream, GFP_NOIO);
1897 }
1898
1899 /* ------------------------------------------------------------------------
1900 * Video device
1901 */
1902
1903 /*
1904 * Initialize the UVC video device by switching to alternate setting 0 and
1905 * retrieve the default format.
1906 *
1907 * Some cameras (namely the Fuji Finepix) set the format and frame
1908 * indexes to zero. The UVC standard doesn't clearly make this a spec
1909 * violation, so try to silently fix the values if possible.
1910 *
1911 * This function is called before registering the device with V4L.
1912 */
uvc_video_init(struct uvc_streaming * stream)1913 int uvc_video_init(struct uvc_streaming *stream)
1914 {
1915 struct uvc_streaming_control *probe = &stream->ctrl;
1916 struct uvc_format *format = NULL;
1917 struct uvc_frame *frame = NULL;
1918 unsigned int i;
1919 int ret;
1920
1921 if (stream->nformats == 0) {
1922 uvc_printk(KERN_INFO, "No supported video formats found.\n");
1923 return -EINVAL;
1924 }
1925
1926 atomic_set(&stream->active, 0);
1927
1928 /* Alternate setting 0 should be the default, yet the XBox Live Vision
1929 * Cam (and possibly other devices) crash or otherwise misbehave if
1930 * they don't receive a SET_INTERFACE request before any other video
1931 * control request.
1932 */
1933 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1934
1935 /* Set the streaming probe control with default streaming parameters
1936 * retrieved from the device. Webcams that don't suport GET_DEF
1937 * requests on the probe control will just keep their current streaming
1938 * parameters.
1939 */
1940 if (uvc_get_video_ctrl(stream, probe, 1, UVC_GET_DEF) == 0)
1941 uvc_set_video_ctrl(stream, probe, 1);
1942
1943 /* Initialize the streaming parameters with the probe control current
1944 * value. This makes sure SET_CUR requests on the streaming commit
1945 * control will always use values retrieved from a successful GET_CUR
1946 * request on the probe control, as required by the UVC specification.
1947 */
1948 ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR);
1949 if (ret < 0)
1950 return ret;
1951
1952 /* Check if the default format descriptor exists. Use the first
1953 * available format otherwise.
1954 */
1955 for (i = stream->nformats; i > 0; --i) {
1956 format = &stream->format[i-1];
1957 if (format->index == probe->bFormatIndex)
1958 break;
1959 }
1960
1961 if (format->nframes == 0) {
1962 uvc_printk(KERN_INFO, "No frame descriptor found for the "
1963 "default format.\n");
1964 return -EINVAL;
1965 }
1966
1967 /* Zero bFrameIndex might be correct. Stream-based formats (including
1968 * MPEG-2 TS and DV) do not support frames but have a dummy frame
1969 * descriptor with bFrameIndex set to zero. If the default frame
1970 * descriptor is not found, use the first available frame.
1971 */
1972 for (i = format->nframes; i > 0; --i) {
1973 frame = &format->frame[i-1];
1974 if (frame->bFrameIndex == probe->bFrameIndex)
1975 break;
1976 }
1977
1978 probe->bFormatIndex = format->index;
1979 probe->bFrameIndex = frame->bFrameIndex;
1980
1981 stream->def_format = format;
1982 stream->cur_format = format;
1983 stream->cur_frame = frame;
1984
1985 /* Select the video decoding function */
1986 if (stream->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
1987 if (stream->dev->quirks & UVC_QUIRK_BUILTIN_ISIGHT)
1988 stream->decode = uvc_video_decode_isight;
1989 else if (stream->intf->num_altsetting > 1)
1990 stream->decode = uvc_video_decode_isoc;
1991 else
1992 stream->decode = uvc_video_decode_bulk;
1993 } else {
1994 if (stream->intf->num_altsetting == 1)
1995 stream->decode = uvc_video_encode_bulk;
1996 else {
1997 uvc_printk(KERN_INFO, "Isochronous endpoints are not "
1998 "supported for video output devices.\n");
1999 return -EINVAL;
2000 }
2001 }
2002
2003 return 0;
2004 }
2005
2006 /*
2007 * Enable or disable the video stream.
2008 */
uvc_video_enable(struct uvc_streaming * stream,int enable)2009 int uvc_video_enable(struct uvc_streaming *stream, int enable)
2010 {
2011 int ret;
2012
2013 if (!enable) {
2014 uvc_uninit_video(stream, 1);
2015 if (stream->intf->num_altsetting > 1) {
2016 usb_set_interface(stream->dev->udev,
2017 stream->intfnum, 0);
2018 } else {
2019 /* UVC doesn't specify how to inform a bulk-based device
2020 * when the video stream is stopped. Windows sends a
2021 * CLEAR_FEATURE(HALT) request to the video streaming
2022 * bulk endpoint, mimic the same behaviour.
2023 */
2024 unsigned int epnum = stream->header.bEndpointAddress
2025 & USB_ENDPOINT_NUMBER_MASK;
2026 unsigned int dir = stream->header.bEndpointAddress
2027 & USB_ENDPOINT_DIR_MASK;
2028 unsigned int pipe;
2029
2030 pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
2031 usb_clear_halt(stream->dev->udev, pipe);
2032 }
2033
2034 uvc_video_clock_cleanup(stream);
2035 return 0;
2036 }
2037
2038 ret = uvc_video_clock_init(stream);
2039 if (ret < 0)
2040 return ret;
2041
2042 /* Commit the streaming parameters. */
2043 ret = uvc_commit_video(stream, &stream->ctrl);
2044 if (ret < 0)
2045 goto error_commit;
2046
2047 ret = uvc_init_video(stream, GFP_KERNEL);
2048 if (ret < 0)
2049 goto error_video;
2050
2051 return 0;
2052
2053 error_video:
2054 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
2055 error_commit:
2056 uvc_video_clock_cleanup(stream);
2057
2058 return ret;
2059 }
2060