1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ispvideo.c
4  *
5  * TI OMAP3 ISP - Generic video node
6  *
7  * Copyright (C) 2009-2010 Nokia Corporation
8  *
9  * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
10  *	     Sakari Ailus <sakari.ailus@iki.fi>
11  */
12 
13 #include <asm/cacheflush.h>
14 #include <linux/clk.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/pagemap.h>
18 #include <linux/scatterlist.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 
23 #include <media/v4l2-dev.h>
24 #include <media/v4l2-ioctl.h>
25 #include <media/v4l2-mc.h>
26 #include <media/videobuf2-dma-contig.h>
27 
28 #include "ispvideo.h"
29 #include "isp.h"
30 
31 
32 /* -----------------------------------------------------------------------------
33  * Helper functions
34  */
35 
36 /*
37  * NOTE: When adding new media bus codes, always remember to add
38  * corresponding in-memory formats to the table below!!!
39  */
40 static struct isp_format_info formats[] = {
41 	{ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
42 	  MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
43 	  V4L2_PIX_FMT_GREY, 8, 1, },
44 	{ MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
45 	  MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
46 	  V4L2_PIX_FMT_Y10, 10, 2, },
47 	{ MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
48 	  MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
49 	  V4L2_PIX_FMT_Y12, 12, 2, },
50 	{ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
51 	  MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
52 	  V4L2_PIX_FMT_SBGGR8, 8, 1, },
53 	{ MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
54 	  MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
55 	  V4L2_PIX_FMT_SGBRG8, 8, 1, },
56 	{ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
57 	  MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
58 	  V4L2_PIX_FMT_SGRBG8, 8, 1, },
59 	{ MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
60 	  MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
61 	  V4L2_PIX_FMT_SRGGB8, 8, 1, },
62 	{ MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
63 	  MEDIA_BUS_FMT_SBGGR10_1X10, 0,
64 	  V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
65 	{ MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
66 	  MEDIA_BUS_FMT_SGBRG10_1X10, 0,
67 	  V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
68 	{ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
69 	  MEDIA_BUS_FMT_SGRBG10_1X10, 0,
70 	  V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
71 	{ MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
72 	  MEDIA_BUS_FMT_SRGGB10_1X10, 0,
73 	  V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
74 	{ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
75 	  MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
76 	  V4L2_PIX_FMT_SBGGR10, 10, 2, },
77 	{ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
78 	  MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
79 	  V4L2_PIX_FMT_SGBRG10, 10, 2, },
80 	{ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
81 	  MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
82 	  V4L2_PIX_FMT_SGRBG10, 10, 2, },
83 	{ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
84 	  MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
85 	  V4L2_PIX_FMT_SRGGB10, 10, 2, },
86 	{ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
87 	  MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
88 	  V4L2_PIX_FMT_SBGGR12, 12, 2, },
89 	{ MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
90 	  MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
91 	  V4L2_PIX_FMT_SGBRG12, 12, 2, },
92 	{ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
93 	  MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
94 	  V4L2_PIX_FMT_SGRBG12, 12, 2, },
95 	{ MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
96 	  MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
97 	  V4L2_PIX_FMT_SRGGB12, 12, 2, },
98 	{ MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
99 	  MEDIA_BUS_FMT_UYVY8_1X16, 0,
100 	  V4L2_PIX_FMT_UYVY, 16, 2, },
101 	{ MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
102 	  MEDIA_BUS_FMT_YUYV8_1X16, 0,
103 	  V4L2_PIX_FMT_YUYV, 16, 2, },
104 	{ MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8,
105 	  MEDIA_BUS_FMT_UYVY8_2X8, 0,
106 	  V4L2_PIX_FMT_UYVY, 8, 2, },
107 	{ MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8,
108 	  MEDIA_BUS_FMT_YUYV8_2X8, 0,
109 	  V4L2_PIX_FMT_YUYV, 8, 2, },
110 	/* Empty entry to catch the unsupported pixel code (0) used by the CCDC
111 	 * module and avoid NULL pointer dereferences.
112 	 */
113 	{ 0, }
114 };
115 
omap3isp_video_format_info(u32 code)116 const struct isp_format_info *omap3isp_video_format_info(u32 code)
117 {
118 	unsigned int i;
119 
120 	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
121 		if (formats[i].code == code)
122 			return &formats[i];
123 	}
124 
125 	return NULL;
126 }
127 
128 /*
129  * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
130  * @video: ISP video instance
131  * @mbus: v4l2_mbus_framefmt format (input)
132  * @pix: v4l2_pix_format format (output)
133  *
134  * Fill the output pix structure with information from the input mbus format.
135  * The bytesperline and sizeimage fields are computed from the requested bytes
136  * per line value in the pix format and information from the video instance.
137  *
138  * Return the number of padding bytes at end of line.
139  */
isp_video_mbus_to_pix(const struct isp_video * video,const struct v4l2_mbus_framefmt * mbus,struct v4l2_pix_format * pix)140 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
141 					  const struct v4l2_mbus_framefmt *mbus,
142 					  struct v4l2_pix_format *pix)
143 {
144 	unsigned int bpl = pix->bytesperline;
145 	unsigned int min_bpl;
146 	unsigned int i;
147 
148 	memset(pix, 0, sizeof(*pix));
149 	pix->width = mbus->width;
150 	pix->height = mbus->height;
151 
152 	for (i = 0; i < ARRAY_SIZE(formats); ++i) {
153 		if (formats[i].code == mbus->code)
154 			break;
155 	}
156 
157 	if (WARN_ON(i == ARRAY_SIZE(formats)))
158 		return 0;
159 
160 	min_bpl = pix->width * formats[i].bpp;
161 
162 	/* Clamp the requested bytes per line value. If the maximum bytes per
163 	 * line value is zero, the module doesn't support user configurable line
164 	 * sizes. Override the requested value with the minimum in that case.
165 	 */
166 	if (video->bpl_max)
167 		bpl = clamp(bpl, min_bpl, video->bpl_max);
168 	else
169 		bpl = min_bpl;
170 
171 	if (!video->bpl_zero_padding || bpl != min_bpl)
172 		bpl = ALIGN(bpl, video->bpl_alignment);
173 
174 	pix->pixelformat = formats[i].pixelformat;
175 	pix->bytesperline = bpl;
176 	pix->sizeimage = pix->bytesperline * pix->height;
177 	pix->colorspace = mbus->colorspace;
178 	pix->field = mbus->field;
179 
180 	return bpl - min_bpl;
181 }
182 
isp_video_pix_to_mbus(const struct v4l2_pix_format * pix,struct v4l2_mbus_framefmt * mbus)183 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
184 				  struct v4l2_mbus_framefmt *mbus)
185 {
186 	unsigned int i;
187 
188 	memset(mbus, 0, sizeof(*mbus));
189 	mbus->width = pix->width;
190 	mbus->height = pix->height;
191 
192 	/* Skip the last format in the loop so that it will be selected if no
193 	 * match is found.
194 	 */
195 	for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
196 		if (formats[i].pixelformat == pix->pixelformat)
197 			break;
198 	}
199 
200 	mbus->code = formats[i].code;
201 	mbus->colorspace = pix->colorspace;
202 	mbus->field = pix->field;
203 }
204 
205 static struct v4l2_subdev *
isp_video_remote_subdev(struct isp_video * video,u32 * pad)206 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
207 {
208 	struct media_pad *remote;
209 
210 	remote = media_entity_remote_pad(&video->pad);
211 
212 	if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
213 		return NULL;
214 
215 	if (pad)
216 		*pad = remote->index;
217 
218 	return media_entity_to_v4l2_subdev(remote->entity);
219 }
220 
221 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
isp_video_get_graph_data(struct isp_video * video,struct isp_pipeline * pipe)222 static int isp_video_get_graph_data(struct isp_video *video,
223 				    struct isp_pipeline *pipe)
224 {
225 	struct media_graph graph;
226 	struct media_entity *entity = &video->video.entity;
227 	struct media_device *mdev = entity->graph_obj.mdev;
228 	struct isp_video *far_end = NULL;
229 	int ret;
230 
231 	mutex_lock(&mdev->graph_mutex);
232 	ret = media_graph_walk_init(&graph, mdev);
233 	if (ret) {
234 		mutex_unlock(&mdev->graph_mutex);
235 		return ret;
236 	}
237 
238 	media_graph_walk_start(&graph, entity);
239 
240 	while ((entity = media_graph_walk_next(&graph))) {
241 		struct isp_video *__video;
242 
243 		media_entity_enum_set(&pipe->ent_enum, entity);
244 
245 		if (far_end != NULL)
246 			continue;
247 
248 		if (entity == &video->video.entity)
249 			continue;
250 
251 		if (!is_media_entity_v4l2_video_device(entity))
252 			continue;
253 
254 		__video = to_isp_video(media_entity_to_video_device(entity));
255 		if (__video->type != video->type)
256 			far_end = __video;
257 	}
258 
259 	mutex_unlock(&mdev->graph_mutex);
260 
261 	media_graph_walk_cleanup(&graph);
262 
263 	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
264 		pipe->input = far_end;
265 		pipe->output = video;
266 	} else {
267 		if (far_end == NULL)
268 			return -EPIPE;
269 
270 		pipe->input = video;
271 		pipe->output = far_end;
272 	}
273 
274 	return 0;
275 }
276 
277 static int
__isp_video_get_format(struct isp_video * video,struct v4l2_format * format)278 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
279 {
280 	struct v4l2_subdev_format fmt;
281 	struct v4l2_subdev *subdev;
282 	u32 pad;
283 	int ret;
284 
285 	subdev = isp_video_remote_subdev(video, &pad);
286 	if (subdev == NULL)
287 		return -EINVAL;
288 
289 	fmt.pad = pad;
290 	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
291 
292 	mutex_lock(&video->mutex);
293 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
294 	mutex_unlock(&video->mutex);
295 
296 	if (ret)
297 		return ret;
298 
299 	format->type = video->type;
300 	return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
301 }
302 
303 static int
isp_video_check_format(struct isp_video * video,struct isp_video_fh * vfh)304 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
305 {
306 	struct v4l2_format format;
307 	int ret;
308 
309 	memcpy(&format, &vfh->format, sizeof(format));
310 	ret = __isp_video_get_format(video, &format);
311 	if (ret < 0)
312 		return ret;
313 
314 	if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
315 	    vfh->format.fmt.pix.height != format.fmt.pix.height ||
316 	    vfh->format.fmt.pix.width != format.fmt.pix.width ||
317 	    vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
318 	    vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage ||
319 	    vfh->format.fmt.pix.field != format.fmt.pix.field)
320 		return -EINVAL;
321 
322 	return 0;
323 }
324 
325 /* -----------------------------------------------------------------------------
326  * Video queue operations
327  */
328 
isp_video_queue_setup(struct vb2_queue * queue,unsigned int * count,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])329 static int isp_video_queue_setup(struct vb2_queue *queue,
330 				 unsigned int *count, unsigned int *num_planes,
331 				 unsigned int sizes[], struct device *alloc_devs[])
332 {
333 	struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
334 	struct isp_video *video = vfh->video;
335 
336 	*num_planes = 1;
337 
338 	sizes[0] = vfh->format.fmt.pix.sizeimage;
339 	if (sizes[0] == 0)
340 		return -EINVAL;
341 
342 	*count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
343 
344 	return 0;
345 }
346 
isp_video_buffer_prepare(struct vb2_buffer * buf)347 static int isp_video_buffer_prepare(struct vb2_buffer *buf)
348 {
349 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
350 	struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
351 	struct isp_buffer *buffer = to_isp_buffer(vbuf);
352 	struct isp_video *video = vfh->video;
353 	dma_addr_t addr;
354 
355 	/* Refuse to prepare the buffer is the video node has registered an
356 	 * error. We don't need to take any lock here as the operation is
357 	 * inherently racy. The authoritative check will be performed in the
358 	 * queue handler, which can't return an error, this check is just a best
359 	 * effort to notify userspace as early as possible.
360 	 */
361 	if (unlikely(video->error))
362 		return -EIO;
363 
364 	addr = vb2_dma_contig_plane_dma_addr(buf, 0);
365 	if (!IS_ALIGNED(addr, 32)) {
366 		dev_dbg(video->isp->dev,
367 			"Buffer address must be aligned to 32 bytes boundary.\n");
368 		return -EINVAL;
369 	}
370 
371 	vb2_set_plane_payload(&buffer->vb.vb2_buf, 0,
372 			      vfh->format.fmt.pix.sizeimage);
373 	buffer->dma = addr;
374 
375 	return 0;
376 }
377 
378 /*
379  * isp_video_buffer_queue - Add buffer to streaming queue
380  * @buf: Video buffer
381  *
382  * In memory-to-memory mode, start streaming on the pipeline if buffers are
383  * queued on both the input and the output, if the pipeline isn't already busy.
384  * If the pipeline is busy, it will be restarted in the output module interrupt
385  * handler.
386  */
isp_video_buffer_queue(struct vb2_buffer * buf)387 static void isp_video_buffer_queue(struct vb2_buffer *buf)
388 {
389 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf);
390 	struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue);
391 	struct isp_buffer *buffer = to_isp_buffer(vbuf);
392 	struct isp_video *video = vfh->video;
393 	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
394 	enum isp_pipeline_state state;
395 	unsigned long flags;
396 	unsigned int empty;
397 	unsigned int start;
398 
399 	spin_lock_irqsave(&video->irqlock, flags);
400 
401 	if (unlikely(video->error)) {
402 		vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR);
403 		spin_unlock_irqrestore(&video->irqlock, flags);
404 		return;
405 	}
406 
407 	empty = list_empty(&video->dmaqueue);
408 	list_add_tail(&buffer->irqlist, &video->dmaqueue);
409 
410 	spin_unlock_irqrestore(&video->irqlock, flags);
411 
412 	if (empty) {
413 		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
414 			state = ISP_PIPELINE_QUEUE_OUTPUT;
415 		else
416 			state = ISP_PIPELINE_QUEUE_INPUT;
417 
418 		spin_lock_irqsave(&pipe->lock, flags);
419 		pipe->state |= state;
420 		video->ops->queue(video, buffer);
421 		video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
422 
423 		start = isp_pipeline_ready(pipe);
424 		if (start)
425 			pipe->state |= ISP_PIPELINE_STREAM;
426 		spin_unlock_irqrestore(&pipe->lock, flags);
427 
428 		if (start)
429 			omap3isp_pipeline_set_stream(pipe,
430 						ISP_PIPELINE_STREAM_SINGLESHOT);
431 	}
432 }
433 
434 /*
435  * omap3isp_video_return_buffers - Return all queued buffers to videobuf2
436  * @video: ISP video object
437  * @state: new state for the returned buffers
438  *
439  * Return all buffers queued on the video node to videobuf2 in the given state.
440  * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error
441  * when starting the stream, or VB2_BUF_STATE_ERROR otherwise.
442  *
443  * The function must be called with the video irqlock held.
444  */
omap3isp_video_return_buffers(struct isp_video * video,enum vb2_buffer_state state)445 static void omap3isp_video_return_buffers(struct isp_video *video,
446 					  enum vb2_buffer_state state)
447 {
448 	while (!list_empty(&video->dmaqueue)) {
449 		struct isp_buffer *buf;
450 
451 		buf = list_first_entry(&video->dmaqueue,
452 				       struct isp_buffer, irqlist);
453 		list_del(&buf->irqlist);
454 		vb2_buffer_done(&buf->vb.vb2_buf, state);
455 	}
456 }
457 
isp_video_start_streaming(struct vb2_queue * queue,unsigned int count)458 static int isp_video_start_streaming(struct vb2_queue *queue,
459 				     unsigned int count)
460 {
461 	struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
462 	struct isp_video *video = vfh->video;
463 	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
464 	unsigned long flags;
465 	int ret;
466 
467 	/* In sensor-to-memory mode, the stream can be started synchronously
468 	 * to the stream on command. In memory-to-memory mode, it will be
469 	 * started when buffers are queued on both the input and output.
470 	 */
471 	if (pipe->input)
472 		return 0;
473 
474 	ret = omap3isp_pipeline_set_stream(pipe,
475 					   ISP_PIPELINE_STREAM_CONTINUOUS);
476 	if (ret < 0) {
477 		spin_lock_irqsave(&video->irqlock, flags);
478 		omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED);
479 		spin_unlock_irqrestore(&video->irqlock, flags);
480 		return ret;
481 	}
482 
483 	spin_lock_irqsave(&video->irqlock, flags);
484 	if (list_empty(&video->dmaqueue))
485 		video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
486 	spin_unlock_irqrestore(&video->irqlock, flags);
487 
488 	return 0;
489 }
490 
491 static const struct vb2_ops isp_video_queue_ops = {
492 	.queue_setup = isp_video_queue_setup,
493 	.buf_prepare = isp_video_buffer_prepare,
494 	.buf_queue = isp_video_buffer_queue,
495 	.start_streaming = isp_video_start_streaming,
496 };
497 
498 /*
499  * omap3isp_video_buffer_next - Complete the current buffer and return the next
500  * @video: ISP video object
501  *
502  * Remove the current video buffer from the DMA queue and fill its timestamp and
503  * field count before handing it back to videobuf2.
504  *
505  * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
506  * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
507  * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
508  *
509  * The DMA queue is expected to contain at least one buffer.
510  *
511  * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
512  * empty.
513  */
omap3isp_video_buffer_next(struct isp_video * video)514 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
515 {
516 	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
517 	enum vb2_buffer_state vb_state;
518 	struct isp_buffer *buf;
519 	unsigned long flags;
520 
521 	spin_lock_irqsave(&video->irqlock, flags);
522 	if (WARN_ON(list_empty(&video->dmaqueue))) {
523 		spin_unlock_irqrestore(&video->irqlock, flags);
524 		return NULL;
525 	}
526 
527 	buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
528 			       irqlist);
529 	list_del(&buf->irqlist);
530 	spin_unlock_irqrestore(&video->irqlock, flags);
531 
532 	buf->vb.vb2_buf.timestamp = ktime_get_ns();
533 
534 	/* Do frame number propagation only if this is the output video node.
535 	 * Frame number either comes from the CSI receivers or it gets
536 	 * incremented here if H3A is not active.
537 	 * Note: There is no guarantee that the output buffer will finish
538 	 * first, so the input number might lag behind by 1 in some cases.
539 	 */
540 	if (video == pipe->output && !pipe->do_propagation)
541 		buf->vb.sequence =
542 			atomic_inc_return(&pipe->frame_number);
543 	else
544 		buf->vb.sequence = atomic_read(&pipe->frame_number);
545 
546 	if (pipe->field != V4L2_FIELD_NONE)
547 		buf->vb.sequence /= 2;
548 
549 	buf->vb.field = pipe->field;
550 
551 	/* Report pipeline errors to userspace on the capture device side. */
552 	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
553 		vb_state = VB2_BUF_STATE_ERROR;
554 		pipe->error = false;
555 	} else {
556 		vb_state = VB2_BUF_STATE_DONE;
557 	}
558 
559 	vb2_buffer_done(&buf->vb.vb2_buf, vb_state);
560 
561 	spin_lock_irqsave(&video->irqlock, flags);
562 
563 	if (list_empty(&video->dmaqueue)) {
564 		enum isp_pipeline_state state;
565 
566 		spin_unlock_irqrestore(&video->irqlock, flags);
567 
568 		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
569 			state = ISP_PIPELINE_QUEUE_OUTPUT
570 			      | ISP_PIPELINE_STREAM;
571 		else
572 			state = ISP_PIPELINE_QUEUE_INPUT
573 			      | ISP_PIPELINE_STREAM;
574 
575 		spin_lock_irqsave(&pipe->lock, flags);
576 		pipe->state &= ~state;
577 		if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
578 			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
579 		spin_unlock_irqrestore(&pipe->lock, flags);
580 		return NULL;
581 	}
582 
583 	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
584 		spin_lock(&pipe->lock);
585 		pipe->state &= ~ISP_PIPELINE_STREAM;
586 		spin_unlock(&pipe->lock);
587 	}
588 
589 	buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
590 			       irqlist);
591 
592 	spin_unlock_irqrestore(&video->irqlock, flags);
593 
594 	return buf;
595 }
596 
597 /*
598  * omap3isp_video_cancel_stream - Cancel stream on a video node
599  * @video: ISP video object
600  *
601  * Cancelling a stream returns all buffers queued on the video node to videobuf2
602  * in the erroneous state and makes sure no new buffer can be queued.
603  */
omap3isp_video_cancel_stream(struct isp_video * video)604 void omap3isp_video_cancel_stream(struct isp_video *video)
605 {
606 	unsigned long flags;
607 
608 	spin_lock_irqsave(&video->irqlock, flags);
609 	omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR);
610 	video->error = true;
611 	spin_unlock_irqrestore(&video->irqlock, flags);
612 }
613 
614 /*
615  * omap3isp_video_resume - Perform resume operation on the buffers
616  * @video: ISP video object
617  * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
618  *
619  * This function is intended to be used on suspend/resume scenario. It
620  * requests video queue layer to discard buffers marked as DONE if it's in
621  * continuous mode and requests ISP modules to queue again the ACTIVE buffer
622  * if there's any.
623  */
omap3isp_video_resume(struct isp_video * video,int continuous)624 void omap3isp_video_resume(struct isp_video *video, int continuous)
625 {
626 	struct isp_buffer *buf = NULL;
627 
628 	if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
629 		mutex_lock(&video->queue_lock);
630 		vb2_discard_done(video->queue);
631 		mutex_unlock(&video->queue_lock);
632 	}
633 
634 	if (!list_empty(&video->dmaqueue)) {
635 		buf = list_first_entry(&video->dmaqueue,
636 				       struct isp_buffer, irqlist);
637 		video->ops->queue(video, buf);
638 		video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
639 	} else {
640 		if (continuous)
641 			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
642 	}
643 }
644 
645 /* -----------------------------------------------------------------------------
646  * V4L2 ioctls
647  */
648 
649 static int
isp_video_querycap(struct file * file,void * fh,struct v4l2_capability * cap)650 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
651 {
652 	struct isp_video *video = video_drvdata(file);
653 
654 	strscpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
655 	strscpy(cap->card, video->video.name, sizeof(cap->card));
656 	strscpy(cap->bus_info, "media", sizeof(cap->bus_info));
657 
658 	cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
659 		| V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
660 
661 
662 	return 0;
663 }
664 
665 static int
isp_video_get_format(struct file * file,void * fh,struct v4l2_format * format)666 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
667 {
668 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
669 	struct isp_video *video = video_drvdata(file);
670 
671 	if (format->type != video->type)
672 		return -EINVAL;
673 
674 	mutex_lock(&video->mutex);
675 	*format = vfh->format;
676 	mutex_unlock(&video->mutex);
677 
678 	return 0;
679 }
680 
681 static int
isp_video_set_format(struct file * file,void * fh,struct v4l2_format * format)682 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
683 {
684 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
685 	struct isp_video *video = video_drvdata(file);
686 	struct v4l2_mbus_framefmt fmt;
687 
688 	if (format->type != video->type)
689 		return -EINVAL;
690 
691 	/* Replace unsupported field orders with sane defaults. */
692 	switch (format->fmt.pix.field) {
693 	case V4L2_FIELD_NONE:
694 		/* Progressive is supported everywhere. */
695 		break;
696 	case V4L2_FIELD_ALTERNATE:
697 		/* ALTERNATE is not supported on output nodes. */
698 		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
699 			format->fmt.pix.field = V4L2_FIELD_NONE;
700 		break;
701 	case V4L2_FIELD_INTERLACED:
702 		/* The ISP has no concept of video standard, select the
703 		 * top-bottom order when the unqualified interlaced order is
704 		 * requested.
705 		 */
706 		format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB;
707 		/* Fall-through */
708 	case V4L2_FIELD_INTERLACED_TB:
709 	case V4L2_FIELD_INTERLACED_BT:
710 		/* Interlaced orders are only supported at the CCDC output. */
711 		if (video != &video->isp->isp_ccdc.video_out)
712 			format->fmt.pix.field = V4L2_FIELD_NONE;
713 		break;
714 	case V4L2_FIELD_TOP:
715 	case V4L2_FIELD_BOTTOM:
716 	case V4L2_FIELD_SEQ_TB:
717 	case V4L2_FIELD_SEQ_BT:
718 	default:
719 		/* All other field orders are currently unsupported, default to
720 		 * progressive.
721 		 */
722 		format->fmt.pix.field = V4L2_FIELD_NONE;
723 		break;
724 	}
725 
726 	/* Fill the bytesperline and sizeimage fields by converting to media bus
727 	 * format and back to pixel format.
728 	 */
729 	isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
730 	isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
731 
732 	mutex_lock(&video->mutex);
733 	vfh->format = *format;
734 	mutex_unlock(&video->mutex);
735 
736 	return 0;
737 }
738 
739 static int
isp_video_try_format(struct file * file,void * fh,struct v4l2_format * format)740 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
741 {
742 	struct isp_video *video = video_drvdata(file);
743 	struct v4l2_subdev_format fmt;
744 	struct v4l2_subdev *subdev;
745 	u32 pad;
746 	int ret;
747 
748 	if (format->type != video->type)
749 		return -EINVAL;
750 
751 	subdev = isp_video_remote_subdev(video, &pad);
752 	if (subdev == NULL)
753 		return -EINVAL;
754 
755 	isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
756 
757 	fmt.pad = pad;
758 	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
759 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
760 	if (ret)
761 		return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
762 
763 	isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
764 	return 0;
765 }
766 
767 static int
isp_video_get_selection(struct file * file,void * fh,struct v4l2_selection * sel)768 isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
769 {
770 	struct isp_video *video = video_drvdata(file);
771 	struct v4l2_subdev_format format;
772 	struct v4l2_subdev *subdev;
773 	struct v4l2_subdev_selection sdsel = {
774 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
775 		.target = sel->target,
776 	};
777 	u32 pad;
778 	int ret;
779 
780 	switch (sel->target) {
781 	case V4L2_SEL_TGT_CROP:
782 	case V4L2_SEL_TGT_CROP_BOUNDS:
783 	case V4L2_SEL_TGT_CROP_DEFAULT:
784 		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
785 			return -EINVAL;
786 		break;
787 	case V4L2_SEL_TGT_COMPOSE:
788 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
789 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
790 		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
791 			return -EINVAL;
792 		break;
793 	default:
794 		return -EINVAL;
795 	}
796 	subdev = isp_video_remote_subdev(video, &pad);
797 	if (subdev == NULL)
798 		return -EINVAL;
799 
800 	/* Try the get selection operation first and fallback to get format if not
801 	 * implemented.
802 	 */
803 	sdsel.pad = pad;
804 	ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
805 	if (!ret)
806 		sel->r = sdsel.r;
807 	if (ret != -ENOIOCTLCMD)
808 		return ret;
809 
810 	format.pad = pad;
811 	format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
812 	ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
813 	if (ret < 0)
814 		return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
815 
816 	sel->r.left = 0;
817 	sel->r.top = 0;
818 	sel->r.width = format.format.width;
819 	sel->r.height = format.format.height;
820 
821 	return 0;
822 }
823 
824 static int
isp_video_set_selection(struct file * file,void * fh,struct v4l2_selection * sel)825 isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
826 {
827 	struct isp_video *video = video_drvdata(file);
828 	struct v4l2_subdev *subdev;
829 	struct v4l2_subdev_selection sdsel = {
830 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
831 		.target = sel->target,
832 		.flags = sel->flags,
833 		.r = sel->r,
834 	};
835 	u32 pad;
836 	int ret;
837 
838 	switch (sel->target) {
839 	case V4L2_SEL_TGT_CROP:
840 		if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
841 			return -EINVAL;
842 		break;
843 	case V4L2_SEL_TGT_COMPOSE:
844 		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
845 			return -EINVAL;
846 		break;
847 	default:
848 		return -EINVAL;
849 	}
850 	subdev = isp_video_remote_subdev(video, &pad);
851 	if (subdev == NULL)
852 		return -EINVAL;
853 
854 	sdsel.pad = pad;
855 	mutex_lock(&video->mutex);
856 	ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
857 	mutex_unlock(&video->mutex);
858 	if (!ret)
859 		sel->r = sdsel.r;
860 
861 	return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
862 }
863 
864 static int
isp_video_get_param(struct file * file,void * fh,struct v4l2_streamparm * a)865 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
866 {
867 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
868 	struct isp_video *video = video_drvdata(file);
869 
870 	if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
871 	    video->type != a->type)
872 		return -EINVAL;
873 
874 	memset(a, 0, sizeof(*a));
875 	a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
876 	a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
877 	a->parm.output.timeperframe = vfh->timeperframe;
878 
879 	return 0;
880 }
881 
882 static int
isp_video_set_param(struct file * file,void * fh,struct v4l2_streamparm * a)883 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
884 {
885 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
886 	struct isp_video *video = video_drvdata(file);
887 
888 	if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
889 	    video->type != a->type)
890 		return -EINVAL;
891 
892 	if (a->parm.output.timeperframe.denominator == 0)
893 		a->parm.output.timeperframe.denominator = 1;
894 
895 	vfh->timeperframe = a->parm.output.timeperframe;
896 
897 	return 0;
898 }
899 
900 static int
isp_video_reqbufs(struct file * file,void * fh,struct v4l2_requestbuffers * rb)901 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
902 {
903 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
904 	struct isp_video *video = video_drvdata(file);
905 	int ret;
906 
907 	mutex_lock(&video->queue_lock);
908 	ret = vb2_reqbufs(&vfh->queue, rb);
909 	mutex_unlock(&video->queue_lock);
910 
911 	return ret;
912 }
913 
914 static int
isp_video_querybuf(struct file * file,void * fh,struct v4l2_buffer * b)915 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
916 {
917 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
918 	struct isp_video *video = video_drvdata(file);
919 	int ret;
920 
921 	mutex_lock(&video->queue_lock);
922 	ret = vb2_querybuf(&vfh->queue, b);
923 	mutex_unlock(&video->queue_lock);
924 
925 	return ret;
926 }
927 
928 static int
isp_video_qbuf(struct file * file,void * fh,struct v4l2_buffer * b)929 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
930 {
931 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
932 	struct isp_video *video = video_drvdata(file);
933 	int ret;
934 
935 	mutex_lock(&video->queue_lock);
936 	ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
937 	mutex_unlock(&video->queue_lock);
938 
939 	return ret;
940 }
941 
942 static int
isp_video_dqbuf(struct file * file,void * fh,struct v4l2_buffer * b)943 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
944 {
945 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
946 	struct isp_video *video = video_drvdata(file);
947 	int ret;
948 
949 	mutex_lock(&video->queue_lock);
950 	ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
951 	mutex_unlock(&video->queue_lock);
952 
953 	return ret;
954 }
955 
isp_video_check_external_subdevs(struct isp_video * video,struct isp_pipeline * pipe)956 static int isp_video_check_external_subdevs(struct isp_video *video,
957 					    struct isp_pipeline *pipe)
958 {
959 	struct isp_device *isp = video->isp;
960 	struct media_entity *ents[] = {
961 		&isp->isp_csi2a.subdev.entity,
962 		&isp->isp_csi2c.subdev.entity,
963 		&isp->isp_ccp2.subdev.entity,
964 		&isp->isp_ccdc.subdev.entity
965 	};
966 	struct media_pad *source_pad;
967 	struct media_entity *source = NULL;
968 	struct media_entity *sink;
969 	struct v4l2_subdev_format fmt;
970 	struct v4l2_ext_controls ctrls;
971 	struct v4l2_ext_control ctrl;
972 	unsigned int i;
973 	int ret;
974 
975 	/* Memory-to-memory pipelines have no external subdev. */
976 	if (pipe->input != NULL)
977 		return 0;
978 
979 	for (i = 0; i < ARRAY_SIZE(ents); i++) {
980 		/* Is the entity part of the pipeline? */
981 		if (!media_entity_enum_test(&pipe->ent_enum, ents[i]))
982 			continue;
983 
984 		/* ISP entities have always sink pad == 0. Find source. */
985 		source_pad = media_entity_remote_pad(&ents[i]->pads[0]);
986 		if (source_pad == NULL)
987 			continue;
988 
989 		source = source_pad->entity;
990 		sink = ents[i];
991 		break;
992 	}
993 
994 	if (!source) {
995 		dev_warn(isp->dev, "can't find source, failing now\n");
996 		return -EINVAL;
997 	}
998 
999 	if (!is_media_entity_v4l2_subdev(source))
1000 		return 0;
1001 
1002 	pipe->external = media_entity_to_v4l2_subdev(source);
1003 
1004 	fmt.pad = source_pad->index;
1005 	fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1006 	ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
1007 			       pad, get_fmt, NULL, &fmt);
1008 	if (unlikely(ret < 0)) {
1009 		dev_warn(isp->dev, "get_fmt returned null!\n");
1010 		return ret;
1011 	}
1012 
1013 	pipe->external_width =
1014 		omap3isp_video_format_info(fmt.format.code)->width;
1015 
1016 	memset(&ctrls, 0, sizeof(ctrls));
1017 	memset(&ctrl, 0, sizeof(ctrl));
1018 
1019 	ctrl.id = V4L2_CID_PIXEL_RATE;
1020 
1021 	ctrls.count = 1;
1022 	ctrls.controls = &ctrl;
1023 	ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video,
1024 			       NULL, &ctrls);
1025 	if (ret < 0) {
1026 		dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
1027 			 pipe->external->name);
1028 		return ret;
1029 	}
1030 
1031 	pipe->external_rate = ctrl.value64;
1032 
1033 	if (media_entity_enum_test(&pipe->ent_enum,
1034 				   &isp->isp_ccdc.subdev.entity)) {
1035 		unsigned int rate = UINT_MAX;
1036 		/*
1037 		 * Check that maximum allowed CCDC pixel rate isn't
1038 		 * exceeded by the pixel rate.
1039 		 */
1040 		omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
1041 		if (pipe->external_rate > rate)
1042 			return -ENOSPC;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 /*
1049  * Stream management
1050  *
1051  * Every ISP pipeline has a single input and a single output. The input can be
1052  * either a sensor or a video node. The output is always a video node.
1053  *
1054  * As every pipeline has an output video node, the ISP video objects at the
1055  * pipeline output stores the pipeline state. It tracks the streaming state of
1056  * both the input and output, as well as the availability of buffers.
1057  *
1058  * In sensor-to-memory mode, frames are always available at the pipeline input.
1059  * Starting the sensor usually requires I2C transfers and must be done in
1060  * interruptible context. The pipeline is started and stopped synchronously
1061  * to the stream on/off commands. All modules in the pipeline will get their
1062  * subdev set stream handler called. The module at the end of the pipeline must
1063  * delay starting the hardware until buffers are available at its output.
1064  *
1065  * In memory-to-memory mode, starting/stopping the stream requires
1066  * synchronization between the input and output. ISP modules can't be stopped
1067  * in the middle of a frame, and at least some of the modules seem to become
1068  * busy as soon as they're started, even if they don't receive a frame start
1069  * event. For that reason frames need to be processed in single-shot mode. The
1070  * driver needs to wait until a frame is completely processed and written to
1071  * memory before restarting the pipeline for the next frame. Pipelined
1072  * processing might be possible but requires more testing.
1073  *
1074  * Stream start must be delayed until buffers are available at both the input
1075  * and output. The pipeline must be started in the videobuf queue callback with
1076  * the buffers queue spinlock held. The modules subdev set stream operation must
1077  * not sleep.
1078  */
1079 static int
isp_video_streamon(struct file * file,void * fh,enum v4l2_buf_type type)1080 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1081 {
1082 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
1083 	struct isp_video *video = video_drvdata(file);
1084 	enum isp_pipeline_state state;
1085 	struct isp_pipeline *pipe;
1086 	unsigned long flags;
1087 	int ret;
1088 
1089 	if (type != video->type)
1090 		return -EINVAL;
1091 
1092 	mutex_lock(&video->stream_lock);
1093 
1094 	/* Start streaming on the pipeline. No link touching an entity in the
1095 	 * pipeline can be activated or deactivated once streaming is started.
1096 	 */
1097 	pipe = video->video.entity.pipe
1098 	     ? to_isp_pipeline(&video->video.entity) : &video->pipe;
1099 
1100 	ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev);
1101 	if (ret)
1102 		goto err_enum_init;
1103 
1104 	/* TODO: Implement PM QoS */
1105 	pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1106 	pipe->max_rate = pipe->l3_ick;
1107 
1108 	ret = media_pipeline_start(&video->video.entity, &pipe->pipe);
1109 	if (ret < 0)
1110 		goto err_pipeline_start;
1111 
1112 	/* Verify that the currently configured format matches the output of
1113 	 * the connected subdev.
1114 	 */
1115 	ret = isp_video_check_format(video, vfh);
1116 	if (ret < 0)
1117 		goto err_check_format;
1118 
1119 	video->bpl_padding = ret;
1120 	video->bpl_value = vfh->format.fmt.pix.bytesperline;
1121 
1122 	ret = isp_video_get_graph_data(video, pipe);
1123 	if (ret < 0)
1124 		goto err_check_format;
1125 
1126 	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1127 		state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1128 	else
1129 		state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1130 
1131 	ret = isp_video_check_external_subdevs(video, pipe);
1132 	if (ret < 0)
1133 		goto err_check_format;
1134 
1135 	pipe->error = false;
1136 
1137 	spin_lock_irqsave(&pipe->lock, flags);
1138 	pipe->state &= ~ISP_PIPELINE_STREAM;
1139 	pipe->state |= state;
1140 	spin_unlock_irqrestore(&pipe->lock, flags);
1141 
1142 	/* Set the maximum time per frame as the value requested by userspace.
1143 	 * This is a soft limit that can be overridden if the hardware doesn't
1144 	 * support the request limit.
1145 	 */
1146 	if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1147 		pipe->max_timeperframe = vfh->timeperframe;
1148 
1149 	video->queue = &vfh->queue;
1150 	INIT_LIST_HEAD(&video->dmaqueue);
1151 	atomic_set(&pipe->frame_number, -1);
1152 	pipe->field = vfh->format.fmt.pix.field;
1153 
1154 	mutex_lock(&video->queue_lock);
1155 	ret = vb2_streamon(&vfh->queue, type);
1156 	mutex_unlock(&video->queue_lock);
1157 	if (ret < 0)
1158 		goto err_check_format;
1159 
1160 	mutex_unlock(&video->stream_lock);
1161 
1162 	return 0;
1163 
1164 err_check_format:
1165 	media_pipeline_stop(&video->video.entity);
1166 err_pipeline_start:
1167 	/* TODO: Implement PM QoS */
1168 	/* The DMA queue must be emptied here, otherwise CCDC interrupts that
1169 	 * will get triggered the next time the CCDC is powered up will try to
1170 	 * access buffers that might have been freed but still present in the
1171 	 * DMA queue. This can easily get triggered if the above
1172 	 * omap3isp_pipeline_set_stream() call fails on a system with a
1173 	 * free-running sensor.
1174 	 */
1175 	INIT_LIST_HEAD(&video->dmaqueue);
1176 	video->queue = NULL;
1177 
1178 	media_entity_enum_cleanup(&pipe->ent_enum);
1179 
1180 err_enum_init:
1181 	mutex_unlock(&video->stream_lock);
1182 
1183 	return ret;
1184 }
1185 
1186 static int
isp_video_streamoff(struct file * file,void * fh,enum v4l2_buf_type type)1187 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1188 {
1189 	struct isp_video_fh *vfh = to_isp_video_fh(fh);
1190 	struct isp_video *video = video_drvdata(file);
1191 	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1192 	enum isp_pipeline_state state;
1193 	unsigned int streaming;
1194 	unsigned long flags;
1195 
1196 	if (type != video->type)
1197 		return -EINVAL;
1198 
1199 	mutex_lock(&video->stream_lock);
1200 
1201 	/* Make sure we're not streaming yet. */
1202 	mutex_lock(&video->queue_lock);
1203 	streaming = vb2_is_streaming(&vfh->queue);
1204 	mutex_unlock(&video->queue_lock);
1205 
1206 	if (!streaming)
1207 		goto done;
1208 
1209 	/* Update the pipeline state. */
1210 	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1211 		state = ISP_PIPELINE_STREAM_OUTPUT
1212 		      | ISP_PIPELINE_QUEUE_OUTPUT;
1213 	else
1214 		state = ISP_PIPELINE_STREAM_INPUT
1215 		      | ISP_PIPELINE_QUEUE_INPUT;
1216 
1217 	spin_lock_irqsave(&pipe->lock, flags);
1218 	pipe->state &= ~state;
1219 	spin_unlock_irqrestore(&pipe->lock, flags);
1220 
1221 	/* Stop the stream. */
1222 	omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1223 	omap3isp_video_cancel_stream(video);
1224 
1225 	mutex_lock(&video->queue_lock);
1226 	vb2_streamoff(&vfh->queue, type);
1227 	mutex_unlock(&video->queue_lock);
1228 	video->queue = NULL;
1229 	video->error = false;
1230 
1231 	/* TODO: Implement PM QoS */
1232 	media_pipeline_stop(&video->video.entity);
1233 
1234 	media_entity_enum_cleanup(&pipe->ent_enum);
1235 
1236 done:
1237 	mutex_unlock(&video->stream_lock);
1238 	return 0;
1239 }
1240 
1241 static int
isp_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1242 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1243 {
1244 	if (input->index > 0)
1245 		return -EINVAL;
1246 
1247 	strscpy(input->name, "camera", sizeof(input->name));
1248 	input->type = V4L2_INPUT_TYPE_CAMERA;
1249 
1250 	return 0;
1251 }
1252 
1253 static int
isp_video_g_input(struct file * file,void * fh,unsigned int * input)1254 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1255 {
1256 	*input = 0;
1257 
1258 	return 0;
1259 }
1260 
1261 static int
isp_video_s_input(struct file * file,void * fh,unsigned int input)1262 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1263 {
1264 	return input == 0 ? 0 : -EINVAL;
1265 }
1266 
1267 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1268 	.vidioc_querycap		= isp_video_querycap,
1269 	.vidioc_g_fmt_vid_cap		= isp_video_get_format,
1270 	.vidioc_s_fmt_vid_cap		= isp_video_set_format,
1271 	.vidioc_try_fmt_vid_cap		= isp_video_try_format,
1272 	.vidioc_g_fmt_vid_out		= isp_video_get_format,
1273 	.vidioc_s_fmt_vid_out		= isp_video_set_format,
1274 	.vidioc_try_fmt_vid_out		= isp_video_try_format,
1275 	.vidioc_g_selection		= isp_video_get_selection,
1276 	.vidioc_s_selection		= isp_video_set_selection,
1277 	.vidioc_g_parm			= isp_video_get_param,
1278 	.vidioc_s_parm			= isp_video_set_param,
1279 	.vidioc_reqbufs			= isp_video_reqbufs,
1280 	.vidioc_querybuf		= isp_video_querybuf,
1281 	.vidioc_qbuf			= isp_video_qbuf,
1282 	.vidioc_dqbuf			= isp_video_dqbuf,
1283 	.vidioc_streamon		= isp_video_streamon,
1284 	.vidioc_streamoff		= isp_video_streamoff,
1285 	.vidioc_enum_input		= isp_video_enum_input,
1286 	.vidioc_g_input			= isp_video_g_input,
1287 	.vidioc_s_input			= isp_video_s_input,
1288 };
1289 
1290 /* -----------------------------------------------------------------------------
1291  * V4L2 file operations
1292  */
1293 
isp_video_open(struct file * file)1294 static int isp_video_open(struct file *file)
1295 {
1296 	struct isp_video *video = video_drvdata(file);
1297 	struct isp_video_fh *handle;
1298 	struct vb2_queue *queue;
1299 	int ret = 0;
1300 
1301 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1302 	if (handle == NULL)
1303 		return -ENOMEM;
1304 
1305 	v4l2_fh_init(&handle->vfh, &video->video);
1306 	v4l2_fh_add(&handle->vfh);
1307 
1308 	/* If this is the first user, initialise the pipeline. */
1309 	if (omap3isp_get(video->isp) == NULL) {
1310 		ret = -EBUSY;
1311 		goto done;
1312 	}
1313 
1314 	ret = v4l2_pipeline_pm_use(&video->video.entity, 1);
1315 	if (ret < 0) {
1316 		omap3isp_put(video->isp);
1317 		goto done;
1318 	}
1319 
1320 	queue = &handle->queue;
1321 	queue->type = video->type;
1322 	queue->io_modes = VB2_MMAP | VB2_USERPTR;
1323 	queue->drv_priv = handle;
1324 	queue->ops = &isp_video_queue_ops;
1325 	queue->mem_ops = &vb2_dma_contig_memops;
1326 	queue->buf_struct_size = sizeof(struct isp_buffer);
1327 	queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1328 	queue->dev = video->isp->dev;
1329 
1330 	ret = vb2_queue_init(&handle->queue);
1331 	if (ret < 0) {
1332 		omap3isp_put(video->isp);
1333 		goto done;
1334 	}
1335 
1336 	memset(&handle->format, 0, sizeof(handle->format));
1337 	handle->format.type = video->type;
1338 	handle->timeperframe.denominator = 1;
1339 
1340 	handle->video = video;
1341 	file->private_data = &handle->vfh;
1342 
1343 done:
1344 	if (ret < 0) {
1345 		v4l2_fh_del(&handle->vfh);
1346 		v4l2_fh_exit(&handle->vfh);
1347 		kfree(handle);
1348 	}
1349 
1350 	return ret;
1351 }
1352 
isp_video_release(struct file * file)1353 static int isp_video_release(struct file *file)
1354 {
1355 	struct isp_video *video = video_drvdata(file);
1356 	struct v4l2_fh *vfh = file->private_data;
1357 	struct isp_video_fh *handle = to_isp_video_fh(vfh);
1358 
1359 	/* Disable streaming and free the buffers queue resources. */
1360 	isp_video_streamoff(file, vfh, video->type);
1361 
1362 	mutex_lock(&video->queue_lock);
1363 	vb2_queue_release(&handle->queue);
1364 	mutex_unlock(&video->queue_lock);
1365 
1366 	v4l2_pipeline_pm_use(&video->video.entity, 0);
1367 
1368 	/* Release the file handle. */
1369 	v4l2_fh_del(vfh);
1370 	v4l2_fh_exit(vfh);
1371 	kfree(handle);
1372 	file->private_data = NULL;
1373 
1374 	omap3isp_put(video->isp);
1375 
1376 	return 0;
1377 }
1378 
isp_video_poll(struct file * file,poll_table * wait)1379 static __poll_t isp_video_poll(struct file *file, poll_table *wait)
1380 {
1381 	struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1382 	struct isp_video *video = video_drvdata(file);
1383 	__poll_t ret;
1384 
1385 	mutex_lock(&video->queue_lock);
1386 	ret = vb2_poll(&vfh->queue, file, wait);
1387 	mutex_unlock(&video->queue_lock);
1388 
1389 	return ret;
1390 }
1391 
isp_video_mmap(struct file * file,struct vm_area_struct * vma)1392 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1393 {
1394 	struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1395 
1396 	return vb2_mmap(&vfh->queue, vma);
1397 }
1398 
1399 static const struct v4l2_file_operations isp_video_fops = {
1400 	.owner = THIS_MODULE,
1401 	.unlocked_ioctl = video_ioctl2,
1402 	.open = isp_video_open,
1403 	.release = isp_video_release,
1404 	.poll = isp_video_poll,
1405 	.mmap = isp_video_mmap,
1406 };
1407 
1408 /* -----------------------------------------------------------------------------
1409  * ISP video core
1410  */
1411 
1412 static const struct isp_video_operations isp_video_dummy_ops = {
1413 };
1414 
omap3isp_video_init(struct isp_video * video,const char * name)1415 int omap3isp_video_init(struct isp_video *video, const char *name)
1416 {
1417 	const char *direction;
1418 	int ret;
1419 
1420 	switch (video->type) {
1421 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1422 		direction = "output";
1423 		video->pad.flags = MEDIA_PAD_FL_SINK
1424 				   | MEDIA_PAD_FL_MUST_CONNECT;
1425 		break;
1426 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1427 		direction = "input";
1428 		video->pad.flags = MEDIA_PAD_FL_SOURCE
1429 				   | MEDIA_PAD_FL_MUST_CONNECT;
1430 		video->video.vfl_dir = VFL_DIR_TX;
1431 		break;
1432 
1433 	default:
1434 		return -EINVAL;
1435 	}
1436 
1437 	ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
1438 	if (ret < 0)
1439 		return ret;
1440 
1441 	mutex_init(&video->mutex);
1442 	atomic_set(&video->active, 0);
1443 
1444 	spin_lock_init(&video->pipe.lock);
1445 	mutex_init(&video->stream_lock);
1446 	mutex_init(&video->queue_lock);
1447 	spin_lock_init(&video->irqlock);
1448 
1449 	/* Initialize the video device. */
1450 	if (video->ops == NULL)
1451 		video->ops = &isp_video_dummy_ops;
1452 
1453 	video->video.fops = &isp_video_fops;
1454 	snprintf(video->video.name, sizeof(video->video.name),
1455 		 "OMAP3 ISP %s %s", name, direction);
1456 	video->video.vfl_type = VFL_TYPE_GRABBER;
1457 	video->video.release = video_device_release_empty;
1458 	video->video.ioctl_ops = &isp_video_ioctl_ops;
1459 	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1460 		video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE
1461 					 | V4L2_CAP_STREAMING;
1462 	else
1463 		video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT
1464 					 | V4L2_CAP_STREAMING;
1465 
1466 	video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1467 
1468 	video_set_drvdata(&video->video, video);
1469 
1470 	return 0;
1471 }
1472 
omap3isp_video_cleanup(struct isp_video * video)1473 void omap3isp_video_cleanup(struct isp_video *video)
1474 {
1475 	media_entity_cleanup(&video->video.entity);
1476 	mutex_destroy(&video->queue_lock);
1477 	mutex_destroy(&video->stream_lock);
1478 	mutex_destroy(&video->mutex);
1479 }
1480 
omap3isp_video_register(struct isp_video * video,struct v4l2_device * vdev)1481 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1482 {
1483 	int ret;
1484 
1485 	video->video.v4l2_dev = vdev;
1486 
1487 	ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1488 	if (ret < 0)
1489 		dev_err(video->isp->dev,
1490 			"%s: could not register video device (%d)\n",
1491 			__func__, ret);
1492 
1493 	return ret;
1494 }
1495 
omap3isp_video_unregister(struct isp_video * video)1496 void omap3isp_video_unregister(struct isp_video *video)
1497 {
1498 	video_unregister_device(&video->video);
1499 }
1500