1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * sh-mobile VEU mem2mem driver
4  *
5  * Copyright (C) 2012 Renesas Electronics Corporation
6  * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
7  * Copyright (C) 2008 Magnus Damm
8  */
9 
10 #include <linux/err.h>
11 #include <linux/fs.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/videodev2.h>
21 
22 #include <media/v4l2-dev.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-ioctl.h>
25 #include <media/v4l2-mem2mem.h>
26 #include <media/v4l2-image-sizes.h>
27 #include <media/videobuf2-dma-contig.h>
28 
29 #define VEU_STR 0x00 /* start register */
30 #define VEU_SWR 0x10 /* src: line length */
31 #define VEU_SSR 0x14 /* src: image size */
32 #define VEU_SAYR 0x18 /* src: y/rgb plane address */
33 #define VEU_SACR 0x1c /* src: c plane address */
34 #define VEU_BSSR 0x20 /* bundle mode register */
35 #define VEU_EDWR 0x30 /* dst: line length */
36 #define VEU_DAYR 0x34 /* dst: y/rgb plane address */
37 #define VEU_DACR 0x38 /* dst: c plane address */
38 #define VEU_TRCR 0x50 /* transform control */
39 #define VEU_RFCR 0x54 /* resize scale */
40 #define VEU_RFSR 0x58 /* resize clip */
41 #define VEU_ENHR 0x5c /* enhance */
42 #define VEU_FMCR 0x70 /* filter mode */
43 #define VEU_VTCR 0x74 /* lowpass vertical */
44 #define VEU_HTCR 0x78 /* lowpass horizontal */
45 #define VEU_APCR 0x80 /* color match */
46 #define VEU_ECCR 0x84 /* color replace */
47 #define VEU_AFXR 0x90 /* fixed mode */
48 #define VEU_SWPR 0x94 /* swap */
49 #define VEU_EIER 0xa0 /* interrupt mask */
50 #define VEU_EVTR 0xa4 /* interrupt event */
51 #define VEU_STAR 0xb0 /* status */
52 #define VEU_BSRR 0xb4 /* reset */
53 
54 #define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
55 #define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
56 #define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
57 #define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
58 #define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
59 #define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
60 #define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
61 #define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
62 #define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
63 #define VEU_COFFR 0x224 /* color conversion offset */
64 #define VEU_CBR   0x228 /* color conversion clip */
65 
66 /*
67  * 4092x4092 max size is the normal case. In some cases it can be reduced to
68  * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
69  */
70 #define MAX_W 4092
71 #define MAX_H 4092
72 #define MIN_W 8
73 #define MIN_H 8
74 #define ALIGN_W 4
75 
76 /* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
77 #define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
78 
79 #define MEM2MEM_DEF_TRANSLEN 1
80 
81 struct sh_veu_dev;
82 
83 struct sh_veu_file {
84 	struct sh_veu_dev *veu_dev;
85 	bool cfg_needed;
86 };
87 
88 struct sh_veu_format {
89 	char *name;
90 	u32 fourcc;
91 	unsigned int depth;
92 	unsigned int ydepth;
93 };
94 
95 /* video data format */
96 struct sh_veu_vfmt {
97 	/* Replace with v4l2_rect */
98 	struct v4l2_rect		frame;
99 	unsigned int			bytesperline;
100 	unsigned int			offset_y;
101 	unsigned int			offset_c;
102 	const struct sh_veu_format	*fmt;
103 };
104 
105 struct sh_veu_dev {
106 	struct v4l2_device v4l2_dev;
107 	struct video_device vdev;
108 	struct v4l2_m2m_dev *m2m_dev;
109 	struct device *dev;
110 	struct v4l2_m2m_ctx *m2m_ctx;
111 	struct sh_veu_vfmt vfmt_out;
112 	struct sh_veu_vfmt vfmt_in;
113 	/* Only single user per direction so far */
114 	struct sh_veu_file *capture;
115 	struct sh_veu_file *output;
116 	struct mutex fop_lock;
117 	void __iomem *base;
118 	spinlock_t lock;
119 	bool is_2h;
120 	unsigned int xaction;
121 	bool aborting;
122 };
123 
124 enum sh_veu_fmt_idx {
125 	SH_VEU_FMT_NV12,
126 	SH_VEU_FMT_NV16,
127 	SH_VEU_FMT_NV24,
128 	SH_VEU_FMT_RGB332,
129 	SH_VEU_FMT_RGB444,
130 	SH_VEU_FMT_RGB565,
131 	SH_VEU_FMT_RGB666,
132 	SH_VEU_FMT_RGB24,
133 };
134 
135 #define DEFAULT_IN_WIDTH	VGA_WIDTH
136 #define DEFAULT_IN_HEIGHT	VGA_HEIGHT
137 #define DEFAULT_IN_FMTIDX	SH_VEU_FMT_NV12
138 #define DEFAULT_OUT_WIDTH	VGA_WIDTH
139 #define DEFAULT_OUT_HEIGHT	VGA_HEIGHT
140 #define DEFAULT_OUT_FMTIDX	SH_VEU_FMT_RGB565
141 
142 /*
143  * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
144  * aligned for NV24.
145  */
146 static const struct sh_veu_format sh_veu_fmt[] = {
147 	[SH_VEU_FMT_NV12]   = { .ydepth = 8, .depth = 12, .name = "NV12", .fourcc = V4L2_PIX_FMT_NV12 },
148 	[SH_VEU_FMT_NV16]   = { .ydepth = 8, .depth = 16, .name = "NV16", .fourcc = V4L2_PIX_FMT_NV16 },
149 	[SH_VEU_FMT_NV24]   = { .ydepth = 8, .depth = 24, .name = "NV24", .fourcc = V4L2_PIX_FMT_NV24 },
150 	[SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .name = "RGB332", .fourcc = V4L2_PIX_FMT_RGB332 },
151 	[SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .name = "RGB444", .fourcc = V4L2_PIX_FMT_RGB444 },
152 	[SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .name = "RGB565", .fourcc = V4L2_PIX_FMT_RGB565 },
153 	[SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .name = "BGR666", .fourcc = V4L2_PIX_FMT_BGR666 },
154 	[SH_VEU_FMT_RGB24]  = { .ydepth = 24, .depth = 24, .name = "RGB24", .fourcc = V4L2_PIX_FMT_RGB24 },
155 };
156 
157 #define DEFAULT_IN_VFMT (struct sh_veu_vfmt){						\
158 	.frame = {									\
159 		.width = VGA_WIDTH,							\
160 		.height = VGA_HEIGHT,							\
161 	},										\
162 	.bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3,	\
163 	.fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX],						\
164 }
165 
166 #define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){						\
167 	.frame = {									\
168 		.width = VGA_WIDTH,							\
169 		.height = VGA_HEIGHT,							\
170 	},										\
171 	.bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3,	\
172 	.fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX],						\
173 }
174 
175 /*
176  * TODO: add support for further output formats:
177  *	SH_VEU_FMT_NV12,
178  *	SH_VEU_FMT_NV16,
179  *	SH_VEU_FMT_NV24,
180  *	SH_VEU_FMT_RGB332,
181  *	SH_VEU_FMT_RGB444,
182  *	SH_VEU_FMT_RGB666,
183  *	SH_VEU_FMT_RGB24,
184  */
185 
186 static const int sh_veu_fmt_out[] = {
187 	SH_VEU_FMT_RGB565,
188 };
189 
190 /*
191  * TODO: add support for further input formats:
192  *	SH_VEU_FMT_NV16,
193  *	SH_VEU_FMT_NV24,
194  *	SH_VEU_FMT_RGB565,
195  *	SH_VEU_FMT_RGB666,
196  *	SH_VEU_FMT_RGB24,
197  */
198 static const int sh_veu_fmt_in[] = {
199 	SH_VEU_FMT_NV12,
200 };
201 
sh_veu_4cc2cspace(u32 fourcc)202 static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
203 {
204 	switch (fourcc) {
205 	default:
206 		BUG();
207 	case V4L2_PIX_FMT_NV12:
208 	case V4L2_PIX_FMT_NV16:
209 	case V4L2_PIX_FMT_NV24:
210 		return V4L2_COLORSPACE_SMPTE170M;
211 	case V4L2_PIX_FMT_RGB332:
212 	case V4L2_PIX_FMT_RGB444:
213 	case V4L2_PIX_FMT_RGB565:
214 	case V4L2_PIX_FMT_BGR666:
215 	case V4L2_PIX_FMT_RGB24:
216 		return V4L2_COLORSPACE_SRGB;
217 	}
218 }
219 
sh_veu_reg_read(struct sh_veu_dev * veu,unsigned int reg)220 static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
221 {
222 	return ioread32(veu->base + reg);
223 }
224 
sh_veu_reg_write(struct sh_veu_dev * veu,unsigned int reg,u32 value)225 static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
226 			     u32 value)
227 {
228 	iowrite32(value, veu->base + reg);
229 }
230 
231 		/* ========== mem2mem callbacks ========== */
232 
sh_veu_job_abort(void * priv)233 static void sh_veu_job_abort(void *priv)
234 {
235 	struct sh_veu_dev *veu = priv;
236 
237 	/* Will cancel the transaction in the next interrupt handler */
238 	veu->aborting = true;
239 }
240 
sh_veu_process(struct sh_veu_dev * veu,struct vb2_buffer * src_buf,struct vb2_buffer * dst_buf)241 static void sh_veu_process(struct sh_veu_dev *veu,
242 			   struct vb2_buffer *src_buf,
243 			   struct vb2_buffer *dst_buf)
244 {
245 	dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
246 
247 	sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
248 	sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
249 			 addr + veu->vfmt_out.offset_c : 0);
250 	dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
251 		(unsigned long)addr,
252 		veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
253 
254 	addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
255 	sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
256 	sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
257 			 addr + veu->vfmt_in.offset_c : 0);
258 	dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
259 		(unsigned long)addr,
260 		veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
261 
262 	sh_veu_reg_write(veu, VEU_STR, 1);
263 
264 	sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
265 }
266 
267 /*
268  * sh_veu_device_run() - prepares and starts the device
269  *
270  * This will be called by the framework when it decides to schedule a particular
271  * instance.
272  */
sh_veu_device_run(void * priv)273 static void sh_veu_device_run(void *priv)
274 {
275 	struct sh_veu_dev *veu = priv;
276 	struct vb2_buffer *src_buf, *dst_buf;
277 
278 	src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
279 	dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
280 
281 	if (src_buf && dst_buf)
282 		sh_veu_process(veu, src_buf, dst_buf);
283 }
284 
285 		/* ========== video ioctls ========== */
286 
sh_veu_is_streamer(struct sh_veu_dev * veu,struct sh_veu_file * veu_file,enum v4l2_buf_type type)287 static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
288 			       enum v4l2_buf_type type)
289 {
290 	return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
291 		veu_file == veu->capture) ||
292 		(type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
293 		 veu_file == veu->output);
294 }
295 
296 static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
297 			     struct vb2_queue *dst_vq);
298 
299 /*
300  * It is not unusual to have video nodes open()ed multiple times. While some
301  * V4L2 operations are non-intrusive, like querying formats and various
302  * parameters, others, like setting formats, starting and stopping streaming,
303  * queuing and dequeuing buffers, directly affect hardware configuration and /
304  * or execution. This function verifies availability of the requested interface
305  * and, if available, reserves it for the requesting user.
306  */
sh_veu_stream_init(struct sh_veu_dev * veu,struct sh_veu_file * veu_file,enum v4l2_buf_type type)307 static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
308 			      enum v4l2_buf_type type)
309 {
310 	struct sh_veu_file **stream;
311 
312 	switch (type) {
313 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
314 		stream = &veu->capture;
315 		break;
316 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
317 		stream = &veu->output;
318 		break;
319 	default:
320 		return -EINVAL;
321 	}
322 
323 	if (*stream == veu_file)
324 		return 0;
325 
326 	if (*stream)
327 		return -EBUSY;
328 
329 	*stream = veu_file;
330 
331 	return 0;
332 }
333 
sh_veu_context_init(struct sh_veu_dev * veu)334 static int sh_veu_context_init(struct sh_veu_dev *veu)
335 {
336 	if (veu->m2m_ctx)
337 		return 0;
338 
339 	veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
340 					 sh_veu_queue_init);
341 
342 	return PTR_ERR_OR_ZERO(veu->m2m_ctx);
343 }
344 
sh_veu_querycap(struct file * file,void * priv,struct v4l2_capability * cap)345 static int sh_veu_querycap(struct file *file, void *priv,
346 			   struct v4l2_capability *cap)
347 {
348 	strlcpy(cap->driver, "sh-veu", sizeof(cap->driver));
349 	strlcpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
350 	strlcpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
351 	cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
352 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
353 
354 	return 0;
355 }
356 
sh_veu_enum_fmt(struct v4l2_fmtdesc * f,const int * fmt,int fmt_num)357 static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
358 {
359 	if (f->index >= fmt_num)
360 		return -EINVAL;
361 
362 	strlcpy(f->description, sh_veu_fmt[fmt[f->index]].name, sizeof(f->description));
363 	f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
364 	return 0;
365 }
366 
sh_veu_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)367 static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
368 				   struct v4l2_fmtdesc *f)
369 {
370 	return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
371 }
372 
sh_veu_enum_fmt_vid_out(struct file * file,void * priv,struct v4l2_fmtdesc * f)373 static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
374 				   struct v4l2_fmtdesc *f)
375 {
376 	return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
377 }
378 
sh_veu_get_vfmt(struct sh_veu_dev * veu,enum v4l2_buf_type type)379 static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
380 					   enum v4l2_buf_type type)
381 {
382 	switch (type) {
383 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
384 		return &veu->vfmt_out;
385 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
386 		return &veu->vfmt_in;
387 	default:
388 		return NULL;
389 	}
390 }
391 
sh_veu_g_fmt(struct sh_veu_file * veu_file,struct v4l2_format * f)392 static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
393 {
394 	struct v4l2_pix_format *pix = &f->fmt.pix;
395 	struct sh_veu_dev *veu = veu_file->veu_dev;
396 	struct sh_veu_vfmt *vfmt;
397 
398 	vfmt = sh_veu_get_vfmt(veu, f->type);
399 
400 	pix->width		= vfmt->frame.width;
401 	pix->height		= vfmt->frame.height;
402 	pix->field		= V4L2_FIELD_NONE;
403 	pix->pixelformat	= vfmt->fmt->fourcc;
404 	pix->colorspace		= sh_veu_4cc2cspace(pix->pixelformat);
405 	pix->bytesperline	= vfmt->bytesperline;
406 	pix->sizeimage		= vfmt->bytesperline * pix->height *
407 		vfmt->fmt->depth / vfmt->fmt->ydepth;
408 	dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
409 		f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
410 
411 	return 0;
412 }
413 
sh_veu_g_fmt_vid_out(struct file * file,void * priv,struct v4l2_format * f)414 static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
415 				struct v4l2_format *f)
416 {
417 	return sh_veu_g_fmt(priv, f);
418 }
419 
sh_veu_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)420 static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
421 				struct v4l2_format *f)
422 {
423 	return sh_veu_g_fmt(priv, f);
424 }
425 
sh_veu_try_fmt(struct v4l2_format * f,const struct sh_veu_format * fmt)426 static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
427 {
428 	struct v4l2_pix_format *pix = &f->fmt.pix;
429 	unsigned int y_bytes_used;
430 
431 	/*
432 	 * V4L2 specification suggests, that the driver should correct the
433 	 * format struct if any of the dimensions is unsupported
434 	 */
435 	switch (pix->field) {
436 	default:
437 	case V4L2_FIELD_ANY:
438 		pix->field = V4L2_FIELD_NONE;
439 		/* fall through: continue handling V4L2_FIELD_NONE */
440 	case V4L2_FIELD_NONE:
441 		break;
442 	}
443 
444 	v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
445 			      &pix->height, MIN_H, MAX_H, 0, 0);
446 
447 	y_bytes_used = (pix->width * fmt->ydepth) >> 3;
448 
449 	if (pix->bytesperline < y_bytes_used)
450 		pix->bytesperline = y_bytes_used;
451 	pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
452 
453 	pix->pixelformat	= fmt->fourcc;
454 	pix->colorspace		= sh_veu_4cc2cspace(pix->pixelformat);
455 
456 	pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
457 
458 	return 0;
459 }
460 
sh_veu_find_fmt(const struct v4l2_format * f)461 static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
462 {
463 	const int *fmt;
464 	int i, n, dflt;
465 
466 	pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
467 
468 	switch (f->type) {
469 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
470 		fmt = sh_veu_fmt_out;
471 		n = ARRAY_SIZE(sh_veu_fmt_out);
472 		dflt = DEFAULT_OUT_FMTIDX;
473 		break;
474 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
475 	default:
476 		fmt = sh_veu_fmt_in;
477 		n = ARRAY_SIZE(sh_veu_fmt_in);
478 		dflt = DEFAULT_IN_FMTIDX;
479 		break;
480 	}
481 
482 	for (i = 0; i < n; i++)
483 		if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
484 			return &sh_veu_fmt[fmt[i]];
485 
486 	return &sh_veu_fmt[dflt];
487 }
488 
sh_veu_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)489 static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
490 				  struct v4l2_format *f)
491 {
492 	const struct sh_veu_format *fmt;
493 
494 	fmt = sh_veu_find_fmt(f);
495 	if (!fmt)
496 		/* wrong buffer type */
497 		return -EINVAL;
498 
499 	return sh_veu_try_fmt(f, fmt);
500 }
501 
sh_veu_try_fmt_vid_out(struct file * file,void * priv,struct v4l2_format * f)502 static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
503 				  struct v4l2_format *f)
504 {
505 	const struct sh_veu_format *fmt;
506 
507 	fmt = sh_veu_find_fmt(f);
508 	if (!fmt)
509 		/* wrong buffer type */
510 		return -EINVAL;
511 
512 	return sh_veu_try_fmt(f, fmt);
513 }
514 
sh_veu_colour_offset(struct sh_veu_dev * veu,struct sh_veu_vfmt * vfmt)515 static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
516 {
517 	/* dst_left and dst_top validity will be verified in CROP / COMPOSE */
518 	unsigned int left = vfmt->frame.left & ~0x03;
519 	unsigned int top = vfmt->frame.top;
520 	dma_addr_t offset = (dma_addr_t)top * veu->vfmt_out.bytesperline +
521 			(((dma_addr_t)left * veu->vfmt_out.fmt->depth) >> 3);
522 	unsigned int y_line;
523 
524 	vfmt->offset_y = offset;
525 
526 	switch (vfmt->fmt->fourcc) {
527 	case V4L2_PIX_FMT_NV12:
528 	case V4L2_PIX_FMT_NV16:
529 	case V4L2_PIX_FMT_NV24:
530 		y_line = ALIGN(vfmt->frame.width, 16);
531 		vfmt->offset_c = offset + y_line * vfmt->frame.height;
532 		break;
533 	case V4L2_PIX_FMT_RGB332:
534 	case V4L2_PIX_FMT_RGB444:
535 	case V4L2_PIX_FMT_RGB565:
536 	case V4L2_PIX_FMT_BGR666:
537 	case V4L2_PIX_FMT_RGB24:
538 		vfmt->offset_c = 0;
539 		break;
540 	default:
541 		BUG();
542 	}
543 }
544 
sh_veu_s_fmt(struct sh_veu_file * veu_file,struct v4l2_format * f)545 static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
546 {
547 	struct v4l2_pix_format *pix = &f->fmt.pix;
548 	struct sh_veu_dev *veu = veu_file->veu_dev;
549 	struct sh_veu_vfmt *vfmt;
550 	struct vb2_queue *vq;
551 	int ret = sh_veu_context_init(veu);
552 	if (ret < 0)
553 		return ret;
554 
555 	vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
556 	if (!vq)
557 		return -EINVAL;
558 
559 	if (vb2_is_busy(vq)) {
560 		v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
561 		return -EBUSY;
562 	}
563 
564 	vfmt = sh_veu_get_vfmt(veu, f->type);
565 	/* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
566 
567 	vfmt->fmt		= sh_veu_find_fmt(f);
568 	/* vfmt->fmt != NULL following the same argument as above */
569 	vfmt->frame.width	= pix->width;
570 	vfmt->frame.height	= pix->height;
571 	vfmt->bytesperline	= pix->bytesperline;
572 
573 	sh_veu_colour_offset(veu, vfmt);
574 
575 	/*
576 	 * We could also verify and require configuration only if any parameters
577 	 * actually have changed, but it is unlikely, that the user requests the
578 	 * same configuration several times without closing the device.
579 	 */
580 	veu_file->cfg_needed = true;
581 
582 	dev_dbg(veu->dev,
583 		"Setting format for type %d, wxh: %dx%d, fmt: %x\n",
584 		f->type, pix->width, pix->height, vfmt->fmt->fourcc);
585 
586 	return 0;
587 }
588 
sh_veu_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)589 static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
590 				struct v4l2_format *f)
591 {
592 	int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
593 	if (ret)
594 		return ret;
595 
596 	return sh_veu_s_fmt(priv, f);
597 }
598 
sh_veu_s_fmt_vid_out(struct file * file,void * priv,struct v4l2_format * f)599 static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
600 				struct v4l2_format *f)
601 {
602 	int ret = sh_veu_try_fmt_vid_out(file, priv, f);
603 	if (ret)
604 		return ret;
605 
606 	return sh_veu_s_fmt(priv, f);
607 }
608 
sh_veu_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * reqbufs)609 static int sh_veu_reqbufs(struct file *file, void *priv,
610 			  struct v4l2_requestbuffers *reqbufs)
611 {
612 	struct sh_veu_file *veu_file = priv;
613 	struct sh_veu_dev *veu = veu_file->veu_dev;
614 	int ret = sh_veu_context_init(veu);
615 	if (ret < 0)
616 		return ret;
617 
618 	ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
619 	if (ret < 0)
620 		return ret;
621 
622 	return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
623 }
624 
sh_veu_querybuf(struct file * file,void * priv,struct v4l2_buffer * buf)625 static int sh_veu_querybuf(struct file *file, void *priv,
626 			   struct v4l2_buffer *buf)
627 {
628 	struct sh_veu_file *veu_file = priv;
629 
630 	if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
631 		return -EBUSY;
632 
633 	return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
634 }
635 
sh_veu_qbuf(struct file * file,void * priv,struct v4l2_buffer * buf)636 static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
637 {
638 	struct sh_veu_file *veu_file = priv;
639 
640 	dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
641 	if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
642 		return -EBUSY;
643 
644 	return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
645 }
646 
sh_veu_dqbuf(struct file * file,void * priv,struct v4l2_buffer * buf)647 static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
648 {
649 	struct sh_veu_file *veu_file = priv;
650 
651 	dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
652 	if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
653 		return -EBUSY;
654 
655 	return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
656 }
657 
sh_veu_calc_scale(struct sh_veu_dev * veu,int size_in,int size_out,int crop_out,u32 * mant,u32 * frac,u32 * rep)658 static void sh_veu_calc_scale(struct sh_veu_dev *veu,
659 			      int size_in, int size_out, int crop_out,
660 			      u32 *mant, u32 *frac, u32 *rep)
661 {
662 	u32 fixpoint;
663 
664 	/* calculate FRAC and MANT */
665 	*rep = *mant = *frac = 0;
666 
667 	if (size_in == size_out) {
668 		if (crop_out != size_out)
669 			*mant = 1; /* needed for cropping */
670 		return;
671 	}
672 
673 	/* VEU2H special upscale */
674 	if (veu->is_2h && size_out > size_in) {
675 		u32 fixpoint = (4096 * size_in) / size_out;
676 		*mant = fixpoint / 4096;
677 		*frac = (fixpoint - (*mant * 4096)) & ~0x07;
678 
679 		switch (*frac) {
680 		case 0x800:
681 			*rep = 1;
682 			break;
683 		case 0x400:
684 			*rep = 3;
685 			break;
686 		case 0x200:
687 			*rep = 7;
688 			break;
689 		}
690 		if (*rep)
691 			return;
692 	}
693 
694 	fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
695 	*mant = fixpoint / 4096;
696 	*frac = fixpoint - (*mant * 4096);
697 
698 	if (*frac & 0x07) {
699 		/*
700 		 * FIXME: do we really have to round down twice in the
701 		 * up-scaling case?
702 		 */
703 		*frac &= ~0x07;
704 		if (size_out > size_in)
705 			*frac -= 8; /* round down if scaling up */
706 		else
707 			*frac += 8; /* round up if scaling down */
708 	}
709 }
710 
sh_veu_scale_v(struct sh_veu_dev * veu,int size_in,int size_out,int crop_out)711 static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
712 				    int size_in, int size_out, int crop_out)
713 {
714 	u32 mant, frac, value, rep;
715 
716 	sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
717 
718 	/* set scale */
719 	value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
720 		(((mant << 12) | frac) << 16);
721 
722 	sh_veu_reg_write(veu, VEU_RFCR, value);
723 
724 	/* set clip */
725 	value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
726 		(((rep << 12) | crop_out) << 16);
727 
728 	sh_veu_reg_write(veu, VEU_RFSR, value);
729 
730 	return ALIGN((size_in * crop_out) / size_out, 4);
731 }
732 
sh_veu_scale_h(struct sh_veu_dev * veu,int size_in,int size_out,int crop_out)733 static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
734 				    int size_in, int size_out, int crop_out)
735 {
736 	u32 mant, frac, value, rep;
737 
738 	sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
739 
740 	/* set scale */
741 	value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
742 		(mant << 12) | frac;
743 
744 	sh_veu_reg_write(veu, VEU_RFCR, value);
745 
746 	/* set clip */
747 	value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
748 		(rep << 12) | crop_out;
749 
750 	sh_veu_reg_write(veu, VEU_RFSR, value);
751 
752 	return ALIGN((size_in * crop_out) / size_out, 4);
753 }
754 
sh_veu_configure(struct sh_veu_dev * veu)755 static void sh_veu_configure(struct sh_veu_dev *veu)
756 {
757 	u32 src_width, src_stride, src_height;
758 	u32 dst_width, dst_stride, dst_height;
759 	u32 real_w, real_h;
760 
761 	/* reset VEU */
762 	sh_veu_reg_write(veu, VEU_BSRR, 0x100);
763 
764 	src_width = veu->vfmt_in.frame.width;
765 	src_height = veu->vfmt_in.frame.height;
766 	src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
767 
768 	dst_width = real_w = veu->vfmt_out.frame.width;
769 	dst_height = real_h = veu->vfmt_out.frame.height;
770 	/* Datasheet is unclear - whether it's always number of bytes or not */
771 	dst_stride = veu->vfmt_out.bytesperline;
772 
773 	/*
774 	 * So far real_w == dst_width && real_h == dst_height, but it wasn't
775 	 * necessarily the case in the original vidix driver, so, it may change
776 	 * here in the future too.
777 	 */
778 	src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
779 	src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
780 
781 	sh_veu_reg_write(veu, VEU_SWR, src_stride);
782 	sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
783 	sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
784 
785 	sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
786 	sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
787 
788 	sh_veu_reg_write(veu, VEU_SWPR, 0x67);
789 	sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
790 
791 	if (veu->is_2h) {
792 		sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
793 		sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
794 		sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
795 
796 		sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
797 		sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
798 		sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
799 
800 		sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
801 		sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
802 		sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
803 
804 		sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
805 	}
806 }
807 
sh_veu_streamon(struct file * file,void * priv,enum v4l2_buf_type type)808 static int sh_veu_streamon(struct file *file, void *priv,
809 			   enum v4l2_buf_type type)
810 {
811 	struct sh_veu_file *veu_file = priv;
812 
813 	if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
814 		return -EBUSY;
815 
816 	if (veu_file->cfg_needed) {
817 		struct sh_veu_dev *veu = veu_file->veu_dev;
818 		veu_file->cfg_needed = false;
819 		sh_veu_configure(veu_file->veu_dev);
820 		veu->xaction = 0;
821 		veu->aborting = false;
822 	}
823 
824 	return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
825 }
826 
sh_veu_streamoff(struct file * file,void * priv,enum v4l2_buf_type type)827 static int sh_veu_streamoff(struct file *file, void *priv,
828 			    enum v4l2_buf_type type)
829 {
830 	struct sh_veu_file *veu_file = priv;
831 
832 	if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
833 		return -EBUSY;
834 
835 	return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
836 }
837 
838 static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
839 	.vidioc_querycap	= sh_veu_querycap,
840 
841 	.vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
842 	.vidioc_g_fmt_vid_cap	= sh_veu_g_fmt_vid_cap,
843 	.vidioc_try_fmt_vid_cap	= sh_veu_try_fmt_vid_cap,
844 	.vidioc_s_fmt_vid_cap	= sh_veu_s_fmt_vid_cap,
845 
846 	.vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
847 	.vidioc_g_fmt_vid_out	= sh_veu_g_fmt_vid_out,
848 	.vidioc_try_fmt_vid_out	= sh_veu_try_fmt_vid_out,
849 	.vidioc_s_fmt_vid_out	= sh_veu_s_fmt_vid_out,
850 
851 	.vidioc_reqbufs		= sh_veu_reqbufs,
852 	.vidioc_querybuf	= sh_veu_querybuf,
853 
854 	.vidioc_qbuf		= sh_veu_qbuf,
855 	.vidioc_dqbuf		= sh_veu_dqbuf,
856 
857 	.vidioc_streamon	= sh_veu_streamon,
858 	.vidioc_streamoff	= sh_veu_streamoff,
859 };
860 
861 		/* ========== Queue operations ========== */
862 
sh_veu_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])863 static int sh_veu_queue_setup(struct vb2_queue *vq,
864 			      unsigned int *nbuffers, unsigned int *nplanes,
865 			      unsigned int sizes[], struct device *alloc_devs[])
866 {
867 	struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
868 	struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type);
869 	unsigned int count = *nbuffers;
870 	unsigned int size = vfmt->bytesperline * vfmt->frame.height *
871 		vfmt->fmt->depth / vfmt->fmt->ydepth;
872 
873 	if (count < 2)
874 		*nbuffers = count = 2;
875 
876 	if (size * count > VIDEO_MEM_LIMIT) {
877 		count = VIDEO_MEM_LIMIT / size;
878 		*nbuffers = count;
879 	}
880 
881 	if (*nplanes)
882 		return sizes[0] < size ? -EINVAL : 0;
883 
884 	*nplanes = 1;
885 	sizes[0] = size;
886 
887 	dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
888 
889 	return 0;
890 }
891 
sh_veu_buf_prepare(struct vb2_buffer * vb)892 static int sh_veu_buf_prepare(struct vb2_buffer *vb)
893 {
894 	struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
895 	struct sh_veu_vfmt *vfmt;
896 	unsigned int sizeimage;
897 
898 	vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
899 	sizeimage = vfmt->bytesperline * vfmt->frame.height *
900 		vfmt->fmt->depth / vfmt->fmt->ydepth;
901 
902 	if (vb2_plane_size(vb, 0) < sizeimage) {
903 		dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
904 			__func__, vb2_plane_size(vb, 0), sizeimage);
905 		return -EINVAL;
906 	}
907 
908 	vb2_set_plane_payload(vb, 0, sizeimage);
909 
910 	return 0;
911 }
912 
sh_veu_buf_queue(struct vb2_buffer * vb)913 static void sh_veu_buf_queue(struct vb2_buffer *vb)
914 {
915 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
916 	struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
917 	dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
918 	v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
919 }
920 
921 static const struct vb2_ops sh_veu_qops = {
922 	.queue_setup	 = sh_veu_queue_setup,
923 	.buf_prepare	 = sh_veu_buf_prepare,
924 	.buf_queue	 = sh_veu_buf_queue,
925 	.wait_prepare	 = vb2_ops_wait_prepare,
926 	.wait_finish	 = vb2_ops_wait_finish,
927 };
928 
sh_veu_queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)929 static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
930 			     struct vb2_queue *dst_vq)
931 {
932 	struct sh_veu_dev *veu = priv;
933 	int ret;
934 
935 	memset(src_vq, 0, sizeof(*src_vq));
936 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
937 	src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
938 	src_vq->drv_priv = veu;
939 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
940 	src_vq->ops = &sh_veu_qops;
941 	src_vq->mem_ops = &vb2_dma_contig_memops;
942 	src_vq->lock = &veu->fop_lock;
943 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
944 	src_vq->dev = veu->v4l2_dev.dev;
945 
946 	ret = vb2_queue_init(src_vq);
947 	if (ret < 0)
948 		return ret;
949 
950 	memset(dst_vq, 0, sizeof(*dst_vq));
951 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
952 	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
953 	dst_vq->drv_priv = veu;
954 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
955 	dst_vq->ops = &sh_veu_qops;
956 	dst_vq->mem_ops = &vb2_dma_contig_memops;
957 	dst_vq->lock = &veu->fop_lock;
958 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
959 	dst_vq->dev = veu->v4l2_dev.dev;
960 
961 	return vb2_queue_init(dst_vq);
962 }
963 
964 		/* ========== File operations ========== */
965 
sh_veu_open(struct file * file)966 static int sh_veu_open(struct file *file)
967 {
968 	struct sh_veu_dev *veu = video_drvdata(file);
969 	struct sh_veu_file *veu_file;
970 
971 	veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
972 	if (!veu_file)
973 		return -ENOMEM;
974 
975 	veu_file->veu_dev = veu;
976 	veu_file->cfg_needed = true;
977 
978 	file->private_data = veu_file;
979 
980 	pm_runtime_get_sync(veu->dev);
981 
982 	dev_dbg(veu->dev, "Created instance %p\n", veu_file);
983 
984 	return 0;
985 }
986 
sh_veu_release(struct file * file)987 static int sh_veu_release(struct file *file)
988 {
989 	struct sh_veu_dev *veu = video_drvdata(file);
990 	struct sh_veu_file *veu_file = file->private_data;
991 
992 	dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
993 
994 	if (veu_file == veu->capture) {
995 		veu->capture = NULL;
996 		vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
997 	}
998 
999 	if (veu_file == veu->output) {
1000 		veu->output = NULL;
1001 		vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
1002 	}
1003 
1004 	if (!veu->output && !veu->capture && veu->m2m_ctx) {
1005 		v4l2_m2m_ctx_release(veu->m2m_ctx);
1006 		veu->m2m_ctx = NULL;
1007 	}
1008 
1009 	pm_runtime_put(veu->dev);
1010 
1011 	kfree(veu_file);
1012 
1013 	return 0;
1014 }
1015 
sh_veu_poll(struct file * file,struct poll_table_struct * wait)1016 static __poll_t sh_veu_poll(struct file *file,
1017 				struct poll_table_struct *wait)
1018 {
1019 	struct sh_veu_file *veu_file = file->private_data;
1020 
1021 	return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
1022 }
1023 
sh_veu_mmap(struct file * file,struct vm_area_struct * vma)1024 static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
1025 {
1026 	struct sh_veu_file *veu_file = file->private_data;
1027 
1028 	return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
1029 }
1030 
1031 static const struct v4l2_file_operations sh_veu_fops = {
1032 	.owner		= THIS_MODULE,
1033 	.open		= sh_veu_open,
1034 	.release	= sh_veu_release,
1035 	.poll		= sh_veu_poll,
1036 	.unlocked_ioctl	= video_ioctl2,
1037 	.mmap		= sh_veu_mmap,
1038 };
1039 
1040 static const struct video_device sh_veu_videodev = {
1041 	.name		= "sh-veu",
1042 	.fops		= &sh_veu_fops,
1043 	.ioctl_ops	= &sh_veu_ioctl_ops,
1044 	.minor		= -1,
1045 	.release	= video_device_release_empty,
1046 	.vfl_dir	= VFL_DIR_M2M,
1047 };
1048 
1049 static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
1050 	.device_run	= sh_veu_device_run,
1051 	.job_abort	= sh_veu_job_abort,
1052 };
1053 
sh_veu_bh(int irq,void * dev_id)1054 static irqreturn_t sh_veu_bh(int irq, void *dev_id)
1055 {
1056 	struct sh_veu_dev *veu = dev_id;
1057 
1058 	if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
1059 		v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
1060 		veu->xaction = 0;
1061 	} else {
1062 		sh_veu_device_run(veu);
1063 	}
1064 
1065 	return IRQ_HANDLED;
1066 }
1067 
sh_veu_isr(int irq,void * dev_id)1068 static irqreturn_t sh_veu_isr(int irq, void *dev_id)
1069 {
1070 	struct sh_veu_dev *veu = dev_id;
1071 	struct vb2_v4l2_buffer *dst;
1072 	struct vb2_v4l2_buffer *src;
1073 	u32 status = sh_veu_reg_read(veu, VEU_EVTR);
1074 
1075 	/* bundle read mode not used */
1076 	if (!(status & 1))
1077 		return IRQ_NONE;
1078 
1079 	/* disable interrupt in VEU */
1080 	sh_veu_reg_write(veu, VEU_EIER, 0);
1081 	/* halt operation */
1082 	sh_veu_reg_write(veu, VEU_STR, 0);
1083 	/* ack int, write 0 to clear bits */
1084 	sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
1085 
1086 	/* conversion completed */
1087 	dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
1088 	src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
1089 	if (!src || !dst)
1090 		return IRQ_NONE;
1091 
1092 	dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
1093 	dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1094 	dst->flags |=
1095 		src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1096 	dst->timecode = src->timecode;
1097 
1098 	spin_lock(&veu->lock);
1099 	v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
1100 	v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
1101 	spin_unlock(&veu->lock);
1102 
1103 	veu->xaction++;
1104 
1105 	return IRQ_WAKE_THREAD;
1106 }
1107 
sh_veu_probe(struct platform_device * pdev)1108 static int sh_veu_probe(struct platform_device *pdev)
1109 {
1110 	struct sh_veu_dev *veu;
1111 	struct resource *reg_res;
1112 	struct video_device *vdev;
1113 	int irq, ret;
1114 
1115 	reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1116 	irq = platform_get_irq(pdev, 0);
1117 
1118 	if (!reg_res || irq <= 0) {
1119 		dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
1120 		return -ENODEV;
1121 	}
1122 
1123 	veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
1124 	if (!veu)
1125 		return -ENOMEM;
1126 
1127 	veu->is_2h = resource_size(reg_res) == 0x22c;
1128 
1129 	veu->base = devm_ioremap_resource(&pdev->dev, reg_res);
1130 	if (IS_ERR(veu->base))
1131 		return PTR_ERR(veu->base);
1132 
1133 	ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
1134 					0, "veu", veu);
1135 	if (ret < 0)
1136 		return ret;
1137 
1138 	ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
1139 	if (ret < 0) {
1140 		dev_err(&pdev->dev, "Error registering v4l2 device\n");
1141 		return ret;
1142 	}
1143 
1144 	vdev = &veu->vdev;
1145 
1146 	*vdev = sh_veu_videodev;
1147 	vdev->v4l2_dev = &veu->v4l2_dev;
1148 	spin_lock_init(&veu->lock);
1149 	mutex_init(&veu->fop_lock);
1150 	vdev->lock = &veu->fop_lock;
1151 
1152 	video_set_drvdata(vdev, veu);
1153 
1154 	veu->dev	= &pdev->dev;
1155 	veu->vfmt_out	= DEFAULT_OUT_VFMT;
1156 	veu->vfmt_in	= DEFAULT_IN_VFMT;
1157 
1158 	veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
1159 	if (IS_ERR(veu->m2m_dev)) {
1160 		ret = PTR_ERR(veu->m2m_dev);
1161 		v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
1162 		goto em2minit;
1163 	}
1164 
1165 	pm_runtime_enable(&pdev->dev);
1166 	pm_runtime_resume(&pdev->dev);
1167 
1168 	ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
1169 	pm_runtime_suspend(&pdev->dev);
1170 	if (ret < 0)
1171 		goto evidreg;
1172 
1173 	return ret;
1174 
1175 evidreg:
1176 	pm_runtime_disable(&pdev->dev);
1177 	v4l2_m2m_release(veu->m2m_dev);
1178 em2minit:
1179 	v4l2_device_unregister(&veu->v4l2_dev);
1180 	return ret;
1181 }
1182 
sh_veu_remove(struct platform_device * pdev)1183 static int sh_veu_remove(struct platform_device *pdev)
1184 {
1185 	struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
1186 	struct sh_veu_dev *veu = container_of(v4l2_dev,
1187 					      struct sh_veu_dev, v4l2_dev);
1188 
1189 	video_unregister_device(&veu->vdev);
1190 	pm_runtime_disable(&pdev->dev);
1191 	v4l2_m2m_release(veu->m2m_dev);
1192 	v4l2_device_unregister(&veu->v4l2_dev);
1193 
1194 	return 0;
1195 }
1196 
1197 static struct platform_driver __refdata sh_veu_pdrv = {
1198 	.remove		= sh_veu_remove,
1199 	.driver		= {
1200 		.name	= "sh_veu",
1201 	},
1202 };
1203 
1204 module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe);
1205 
1206 MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
1207 MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
1208 MODULE_LICENSE("GPL v2");
1209