1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Video for Linux Two
4 *
5 * A generic video device interface for the LINUX operating system
6 * using a set of device structures/vectors for low level operations.
7 *
8 * This file replaces the videodev.c file that comes with the
9 * regular kernel distribution.
10 *
11 * Author: Bill Dirks <bill@thedirks.org>
12 * based on code by Alan Cox, <alan@cymru.net>
13 */
14
15 /*
16 * Video capture interface for Linux
17 *
18 * A generic video device interface for the LINUX operating system
19 * using a set of device structures/vectors for low level operations.
20 *
21 * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk>
22 *
23 * Fixes:
24 */
25
26 /*
27 * Video4linux 1/2 integration by Justin Schoeman
28 * <justin@suntiger.ee.up.ac.za>
29 * 2.4 PROCFS support ported from 2.4 kernels by
30 * Iñaki García Etxebarria <garetxe@euskalnet.net>
31 * Makefile fix by "W. Michael Petullo" <mike@flyn.org>
32 * 2.4 devfs support ported from 2.4 kernels by
33 * Dan Merillat <dan@merillat.org>
34 * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman)
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/mm.h>
41 #include <linux/string.h>
42 #include <linux/errno.h>
43 #include <linux/uaccess.h>
44 #include <asm/pgtable.h>
45 #include <asm/io.h>
46 #include <asm/div64.h>
47 #include <media/v4l2-common.h>
48 #include <media/v4l2-device.h>
49 #include <media/v4l2-ctrls.h>
50
51 #include <linux/videodev2.h>
52
53 /*
54 *
55 * V 4 L 2 D R I V E R H E L P E R A P I
56 *
57 */
58
59 /*
60 * Video Standard Operations (contributed by Michael Schimek)
61 */
62
63 /* Helper functions for control handling */
64
65 /* Fill in a struct v4l2_queryctrl */
v4l2_ctrl_query_fill(struct v4l2_queryctrl * qctrl,s32 _min,s32 _max,s32 _step,s32 _def)66 int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def)
67 {
68 const char *name;
69 s64 min = _min;
70 s64 max = _max;
71 u64 step = _step;
72 s64 def = _def;
73
74 v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type,
75 &min, &max, &step, &def, &qctrl->flags);
76
77 if (name == NULL)
78 return -EINVAL;
79
80 qctrl->minimum = min;
81 qctrl->maximum = max;
82 qctrl->step = step;
83 qctrl->default_value = def;
84 qctrl->reserved[0] = qctrl->reserved[1] = 0;
85 strscpy(qctrl->name, name, sizeof(qctrl->name));
86 return 0;
87 }
88 EXPORT_SYMBOL(v4l2_ctrl_query_fill);
89
90 /* Clamp x to be between min and max, aligned to a multiple of 2^align. min
91 * and max don't have to be aligned, but there must be at least one valid
92 * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples
93 * of 16 between 17 and 31. */
clamp_align(unsigned int x,unsigned int min,unsigned int max,unsigned int align)94 static unsigned int clamp_align(unsigned int x, unsigned int min,
95 unsigned int max, unsigned int align)
96 {
97 /* Bits that must be zero to be aligned */
98 unsigned int mask = ~((1 << align) - 1);
99
100 /* Clamp to aligned min and max */
101 x = clamp(x, (min + ~mask) & mask, max & mask);
102
103 /* Round to nearest aligned value */
104 if (align)
105 x = (x + (1 << (align - 1))) & mask;
106
107 return x;
108 }
109
clamp_roundup(unsigned int x,unsigned int min,unsigned int max,unsigned int alignment)110 static unsigned int clamp_roundup(unsigned int x, unsigned int min,
111 unsigned int max, unsigned int alignment)
112 {
113 x = clamp(x, min, max);
114 if (alignment)
115 x = round_up(x, alignment);
116
117 return x;
118 }
119
v4l_bound_align_image(u32 * w,unsigned int wmin,unsigned int wmax,unsigned int walign,u32 * h,unsigned int hmin,unsigned int hmax,unsigned int halign,unsigned int salign)120 void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
121 unsigned int walign,
122 u32 *h, unsigned int hmin, unsigned int hmax,
123 unsigned int halign, unsigned int salign)
124 {
125 *w = clamp_align(*w, wmin, wmax, walign);
126 *h = clamp_align(*h, hmin, hmax, halign);
127
128 /* Usually we don't need to align the size and are done now. */
129 if (!salign)
130 return;
131
132 /* How much alignment do we have? */
133 walign = __ffs(*w);
134 halign = __ffs(*h);
135 /* Enough to satisfy the image alignment? */
136 if (walign + halign < salign) {
137 /* Max walign where there is still a valid width */
138 unsigned int wmaxa = __fls(wmax ^ (wmin - 1));
139 /* Max halign where there is still a valid height */
140 unsigned int hmaxa = __fls(hmax ^ (hmin - 1));
141
142 /* up the smaller alignment until we have enough */
143 do {
144 if (halign >= hmaxa ||
145 (walign <= halign && walign < wmaxa)) {
146 *w = clamp_align(*w, wmin, wmax, walign + 1);
147 walign = __ffs(*w);
148 } else {
149 *h = clamp_align(*h, hmin, hmax, halign + 1);
150 halign = __ffs(*h);
151 }
152 } while (halign + walign < salign);
153 }
154 }
155 EXPORT_SYMBOL_GPL(v4l_bound_align_image);
156
157 const void *
__v4l2_find_nearest_size(const void * array,size_t array_size,size_t entry_size,size_t width_offset,size_t height_offset,s32 width,s32 height)158 __v4l2_find_nearest_size(const void *array, size_t array_size,
159 size_t entry_size, size_t width_offset,
160 size_t height_offset, s32 width, s32 height)
161 {
162 u32 error, min_error = U32_MAX;
163 const void *best = NULL;
164 unsigned int i;
165
166 if (!array)
167 return NULL;
168
169 for (i = 0; i < array_size; i++, array += entry_size) {
170 const u32 *entry_width = array + width_offset;
171 const u32 *entry_height = array + height_offset;
172
173 error = abs(*entry_width - width) + abs(*entry_height - height);
174 if (error > min_error)
175 continue;
176
177 min_error = error;
178 best = array;
179 if (!error)
180 break;
181 }
182
183 return best;
184 }
185 EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size);
186
v4l2_g_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)187 int v4l2_g_parm_cap(struct video_device *vdev,
188 struct v4l2_subdev *sd, struct v4l2_streamparm *a)
189 {
190 struct v4l2_subdev_frame_interval ival = { 0 };
191 int ret;
192
193 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
194 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
195 return -EINVAL;
196
197 if (vdev->device_caps & V4L2_CAP_READWRITE)
198 a->parm.capture.readbuffers = 2;
199 if (v4l2_subdev_has_op(sd, video, g_frame_interval))
200 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
201 ret = v4l2_subdev_call(sd, video, g_frame_interval, &ival);
202 if (!ret)
203 a->parm.capture.timeperframe = ival.interval;
204 return ret;
205 }
206 EXPORT_SYMBOL_GPL(v4l2_g_parm_cap);
207
v4l2_s_parm_cap(struct video_device * vdev,struct v4l2_subdev * sd,struct v4l2_streamparm * a)208 int v4l2_s_parm_cap(struct video_device *vdev,
209 struct v4l2_subdev *sd, struct v4l2_streamparm *a)
210 {
211 struct v4l2_subdev_frame_interval ival = {
212 .interval = a->parm.capture.timeperframe
213 };
214 int ret;
215
216 if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
217 a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
218 return -EINVAL;
219
220 memset(&a->parm, 0, sizeof(a->parm));
221 if (vdev->device_caps & V4L2_CAP_READWRITE)
222 a->parm.capture.readbuffers = 2;
223 else
224 a->parm.capture.readbuffers = 0;
225
226 if (v4l2_subdev_has_op(sd, video, g_frame_interval))
227 a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
228 ret = v4l2_subdev_call(sd, video, s_frame_interval, &ival);
229 if (!ret)
230 a->parm.capture.timeperframe = ival.interval;
231 return ret;
232 }
233 EXPORT_SYMBOL_GPL(v4l2_s_parm_cap);
234
v4l2_format_info(u32 format)235 const struct v4l2_format_info *v4l2_format_info(u32 format)
236 {
237 static const struct v4l2_format_info formats[] = {
238 /* RGB formats */
239 { .format = V4L2_PIX_FMT_BGR24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
240 { .format = V4L2_PIX_FMT_RGB24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
241 { .format = V4L2_PIX_FMT_HSV24, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
242 { .format = V4L2_PIX_FMT_BGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
243 { .format = V4L2_PIX_FMT_XBGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
244 { .format = V4L2_PIX_FMT_BGRX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
245 { .format = V4L2_PIX_FMT_RGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
246 { .format = V4L2_PIX_FMT_XRGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
247 { .format = V4L2_PIX_FMT_RGBX32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
248 { .format = V4L2_PIX_FMT_HSV32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
249 { .format = V4L2_PIX_FMT_ARGB32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
250 { .format = V4L2_PIX_FMT_RGBA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
251 { .format = V4L2_PIX_FMT_ABGR32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
252 { .format = V4L2_PIX_FMT_BGRA32, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
253 { .format = V4L2_PIX_FMT_GREY, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
254
255 /* YUV packed formats */
256 { .format = V4L2_PIX_FMT_YUYV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
257 { .format = V4L2_PIX_FMT_YVYU, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
258 { .format = V4L2_PIX_FMT_UYVY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
259 { .format = V4L2_PIX_FMT_VYUY, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 },
260
261 /* YUV planar formats */
262 { .format = V4L2_PIX_FMT_NV12, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
263 { .format = V4L2_PIX_FMT_NV21, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
264 { .format = V4L2_PIX_FMT_NV16, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
265 { .format = V4L2_PIX_FMT_NV61, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
266 { .format = V4L2_PIX_FMT_NV24, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
267 { .format = V4L2_PIX_FMT_NV42, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 },
268
269 { .format = V4L2_PIX_FMT_YUV410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
270 { .format = V4L2_PIX_FMT_YVU410, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 },
271 { .format = V4L2_PIX_FMT_YUV411P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 1 },
272 { .format = V4L2_PIX_FMT_YUV420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
273 { .format = V4L2_PIX_FMT_YVU420, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
274 { .format = V4L2_PIX_FMT_YUV422P, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
275
276 /* YUV planar formats, non contiguous variant */
277 { .format = V4L2_PIX_FMT_YUV420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
278 { .format = V4L2_PIX_FMT_YVU420M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
279 { .format = V4L2_PIX_FMT_YUV422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
280 { .format = V4L2_PIX_FMT_YVU422M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
281 { .format = V4L2_PIX_FMT_YUV444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
282 { .format = V4L2_PIX_FMT_YVU444M, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 1, .vdiv = 1 },
283
284 { .format = V4L2_PIX_FMT_NV12M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
285 { .format = V4L2_PIX_FMT_NV21M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
286 { .format = V4L2_PIX_FMT_NV16M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
287 { .format = V4L2_PIX_FMT_NV61M, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 },
288
289 /* Bayer RGB formats */
290 { .format = V4L2_PIX_FMT_SBGGR8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
291 { .format = V4L2_PIX_FMT_SGBRG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
292 { .format = V4L2_PIX_FMT_SGRBG8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
293 { .format = V4L2_PIX_FMT_SRGGB8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
294 { .format = V4L2_PIX_FMT_SBGGR10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
295 { .format = V4L2_PIX_FMT_SGBRG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
296 { .format = V4L2_PIX_FMT_SGRBG10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
297 { .format = V4L2_PIX_FMT_SRGGB10, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
298 { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
299 { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
300 { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
301 { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
302 { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
303 { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
304 { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
305 { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
306 { .format = V4L2_PIX_FMT_SBGGR12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
307 { .format = V4L2_PIX_FMT_SGBRG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
308 { .format = V4L2_PIX_FMT_SGRBG12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
309 { .format = V4L2_PIX_FMT_SRGGB12, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
310 };
311 unsigned int i;
312
313 for (i = 0; i < ARRAY_SIZE(formats); ++i)
314 if (formats[i].format == format)
315 return &formats[i];
316 return NULL;
317 }
318 EXPORT_SYMBOL(v4l2_format_info);
319
v4l2_format_block_width(const struct v4l2_format_info * info,int plane)320 static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane)
321 {
322 if (!info->block_w[plane])
323 return 1;
324 return info->block_w[plane];
325 }
326
v4l2_format_block_height(const struct v4l2_format_info * info,int plane)327 static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane)
328 {
329 if (!info->block_h[plane])
330 return 1;
331 return info->block_h[plane];
332 }
333
v4l2_apply_frmsize_constraints(u32 * width,u32 * height,const struct v4l2_frmsize_stepwise * frmsize)334 void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
335 const struct v4l2_frmsize_stepwise *frmsize)
336 {
337 if (!frmsize)
338 return;
339
340 /*
341 * Clamp width/height to meet min/max constraints and round it up to
342 * macroblock alignment.
343 */
344 *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width,
345 frmsize->step_width);
346 *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height,
347 frmsize->step_height);
348 }
349 EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints);
350
v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane * pixfmt,u32 pixelformat,u32 width,u32 height)351 int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
352 u32 pixelformat, u32 width, u32 height)
353 {
354 const struct v4l2_format_info *info;
355 struct v4l2_plane_pix_format *plane;
356 int i;
357
358 info = v4l2_format_info(pixelformat);
359 if (!info)
360 return -EINVAL;
361
362 pixfmt->width = width;
363 pixfmt->height = height;
364 pixfmt->pixelformat = pixelformat;
365 pixfmt->num_planes = info->mem_planes;
366
367 if (info->mem_planes == 1) {
368 plane = &pixfmt->plane_fmt[0];
369 plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0];
370 plane->sizeimage = 0;
371
372 for (i = 0; i < info->comp_planes; i++) {
373 unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
374 unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
375 unsigned int aligned_width;
376 unsigned int aligned_height;
377
378 aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
379 aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
380
381 plane->sizeimage += info->bpp[i] *
382 DIV_ROUND_UP(aligned_width, hdiv) *
383 DIV_ROUND_UP(aligned_height, vdiv);
384 }
385 } else {
386 for (i = 0; i < info->comp_planes; i++) {
387 unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
388 unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
389 unsigned int aligned_width;
390 unsigned int aligned_height;
391
392 aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
393 aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
394
395 plane = &pixfmt->plane_fmt[i];
396 plane->bytesperline =
397 info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv);
398 plane->sizeimage =
399 plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv);
400 }
401 }
402 return 0;
403 }
404 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp);
405
v4l2_fill_pixfmt(struct v4l2_pix_format * pixfmt,u32 pixelformat,u32 width,u32 height)406 int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
407 u32 width, u32 height)
408 {
409 const struct v4l2_format_info *info;
410 int i;
411
412 info = v4l2_format_info(pixelformat);
413 if (!info)
414 return -EINVAL;
415
416 /* Single planar API cannot be used for multi plane formats. */
417 if (info->mem_planes > 1)
418 return -EINVAL;
419
420 pixfmt->width = width;
421 pixfmt->height = height;
422 pixfmt->pixelformat = pixelformat;
423 pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0];
424 pixfmt->sizeimage = 0;
425
426 for (i = 0; i < info->comp_planes; i++) {
427 unsigned int hdiv = (i == 0) ? 1 : info->hdiv;
428 unsigned int vdiv = (i == 0) ? 1 : info->vdiv;
429 unsigned int aligned_width;
430 unsigned int aligned_height;
431
432 aligned_width = ALIGN(width, v4l2_format_block_width(info, i));
433 aligned_height = ALIGN(height, v4l2_format_block_height(info, i));
434
435 pixfmt->sizeimage += info->bpp[i] *
436 DIV_ROUND_UP(aligned_width, hdiv) *
437 DIV_ROUND_UP(aligned_height, vdiv);
438 }
439 return 0;
440 }
441 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
442