1 /*
2  * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
3  *		http://www.samsung.com
4  *
5  * Samsung EXYNOS5 SoC series G-Scaler driver
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published
9  * by the Free Software Foundation, either version 2 of the License,
10  * or (at your option) any later version.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/bug.h>
18 #include <linux/interrupt.h>
19 #include <linux/workqueue.h>
20 #include <linux/device.h>
21 #include <linux/platform_device.h>
22 #include <linux/list.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/clk.h>
26 
27 #include <media/v4l2-ioctl.h>
28 
29 #include "gsc-core.h"
30 
gsc_m2m_ctx_stop_req(struct gsc_ctx * ctx)31 static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
32 {
33 	struct gsc_ctx *curr_ctx;
34 	struct gsc_dev *gsc = ctx->gsc_dev;
35 	int ret;
36 
37 	curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
38 	if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
39 		return 0;
40 
41 	gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
42 	ret = wait_event_timeout(gsc->irq_queue,
43 			!gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
44 			GSC_SHUTDOWN_TIMEOUT);
45 
46 	return ret == 0 ? -ETIMEDOUT : ret;
47 }
48 
__gsc_m2m_job_abort(struct gsc_ctx * ctx)49 static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
50 {
51 	int ret;
52 
53 	ret = gsc_m2m_ctx_stop_req(ctx);
54 	if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
55 		gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
56 		gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
57 	}
58 }
59 
gsc_m2m_start_streaming(struct vb2_queue * q,unsigned int count)60 static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
61 {
62 	struct gsc_ctx *ctx = q->drv_priv;
63 	int ret;
64 
65 	ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
66 	return ret > 0 ? 0 : ret;
67 }
68 
__gsc_m2m_cleanup_queue(struct gsc_ctx * ctx)69 static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
70 {
71 	struct vb2_v4l2_buffer *src_vb, *dst_vb;
72 
73 	while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
74 		src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
75 		v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
76 	}
77 
78 	while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
79 		dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
80 		v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
81 	}
82 }
83 
gsc_m2m_stop_streaming(struct vb2_queue * q)84 static void gsc_m2m_stop_streaming(struct vb2_queue *q)
85 {
86 	struct gsc_ctx *ctx = q->drv_priv;
87 
88 	__gsc_m2m_job_abort(ctx);
89 
90 	__gsc_m2m_cleanup_queue(ctx);
91 
92 	pm_runtime_put(&ctx->gsc_dev->pdev->dev);
93 }
94 
gsc_m2m_job_finish(struct gsc_ctx * ctx,int vb_state)95 void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
96 {
97 	struct vb2_v4l2_buffer *src_vb, *dst_vb;
98 
99 	if (!ctx || !ctx->m2m_ctx)
100 		return;
101 
102 	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
103 	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
104 
105 	if (src_vb && dst_vb) {
106 		dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
107 		dst_vb->timecode = src_vb->timecode;
108 		dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
109 		dst_vb->flags |=
110 			src_vb->flags
111 			& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
112 
113 		v4l2_m2m_buf_done(src_vb, vb_state);
114 		v4l2_m2m_buf_done(dst_vb, vb_state);
115 
116 		v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
117 				    ctx->m2m_ctx);
118 	}
119 }
120 
gsc_m2m_job_abort(void * priv)121 static void gsc_m2m_job_abort(void *priv)
122 {
123 	__gsc_m2m_job_abort((struct gsc_ctx *)priv);
124 }
125 
gsc_get_bufs(struct gsc_ctx * ctx)126 static int gsc_get_bufs(struct gsc_ctx *ctx)
127 {
128 	struct gsc_frame *s_frame, *d_frame;
129 	struct vb2_v4l2_buffer *src_vb, *dst_vb;
130 	int ret;
131 
132 	s_frame = &ctx->s_frame;
133 	d_frame = &ctx->d_frame;
134 
135 	src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
136 	ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
137 	if (ret)
138 		return ret;
139 
140 	dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
141 	ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
142 	if (ret)
143 		return ret;
144 
145 	dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
146 
147 	return 0;
148 }
149 
gsc_m2m_device_run(void * priv)150 static void gsc_m2m_device_run(void *priv)
151 {
152 	struct gsc_ctx *ctx = priv;
153 	struct gsc_dev *gsc;
154 	unsigned long flags;
155 	int ret;
156 	bool is_set = false;
157 
158 	if (WARN(!ctx, "null hardware context\n"))
159 		return;
160 
161 	gsc = ctx->gsc_dev;
162 	spin_lock_irqsave(&gsc->slock, flags);
163 
164 	set_bit(ST_M2M_PEND, &gsc->state);
165 
166 	/* Reconfigure hardware if the context has changed. */
167 	if (gsc->m2m.ctx != ctx) {
168 		pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
169 				gsc->m2m.ctx, ctx);
170 		ctx->state |= GSC_PARAMS;
171 		gsc->m2m.ctx = ctx;
172 	}
173 
174 	is_set = ctx->state & GSC_CTX_STOP_REQ;
175 	if (is_set) {
176 		ctx->state &= ~GSC_CTX_STOP_REQ;
177 		ctx->state |= GSC_CTX_ABORT;
178 		wake_up(&gsc->irq_queue);
179 		goto put_device;
180 	}
181 
182 	ret = gsc_get_bufs(ctx);
183 	if (ret) {
184 		pr_err("Wrong address");
185 		goto put_device;
186 	}
187 
188 	gsc_set_prefbuf(gsc, &ctx->s_frame);
189 	gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
190 	gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
191 
192 	if (ctx->state & GSC_PARAMS) {
193 		gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
194 		gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
195 		gsc_hw_set_frm_done_irq_mask(gsc, false);
196 		gsc_hw_set_gsc_irq_enable(gsc, true);
197 
198 		if (gsc_set_scaler_info(ctx)) {
199 			pr_err("Scaler setup error");
200 			goto put_device;
201 		}
202 
203 		gsc_hw_set_input_path(ctx);
204 		gsc_hw_set_in_size(ctx);
205 		gsc_hw_set_in_image_format(ctx);
206 
207 		gsc_hw_set_output_path(ctx);
208 		gsc_hw_set_out_size(ctx);
209 		gsc_hw_set_out_image_format(ctx);
210 
211 		gsc_hw_set_prescaler(ctx);
212 		gsc_hw_set_mainscaler(ctx);
213 		gsc_hw_set_rotation(ctx);
214 		gsc_hw_set_global_alpha(ctx);
215 	}
216 
217 	/* update shadow registers */
218 	gsc_hw_set_sfr_update(ctx);
219 
220 	ctx->state &= ~GSC_PARAMS;
221 	gsc_hw_enable_control(gsc, true);
222 
223 	spin_unlock_irqrestore(&gsc->slock, flags);
224 	return;
225 
226 put_device:
227 	ctx->state &= ~GSC_PARAMS;
228 	spin_unlock_irqrestore(&gsc->slock, flags);
229 }
230 
gsc_m2m_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])231 static int gsc_m2m_queue_setup(struct vb2_queue *vq,
232 			unsigned int *num_buffers, unsigned int *num_planes,
233 			unsigned int sizes[], struct device *alloc_devs[])
234 {
235 	struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
236 	struct gsc_frame *frame;
237 	int i;
238 
239 	frame = ctx_get_frame(ctx, vq->type);
240 	if (IS_ERR(frame))
241 		return PTR_ERR(frame);
242 
243 	if (!frame->fmt)
244 		return -EINVAL;
245 
246 	*num_planes = frame->fmt->num_planes;
247 	for (i = 0; i < frame->fmt->num_planes; i++)
248 		sizes[i] = frame->payload[i];
249 	return 0;
250 }
251 
gsc_m2m_buf_prepare(struct vb2_buffer * vb)252 static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
253 {
254 	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
255 	struct gsc_frame *frame;
256 	int i;
257 
258 	frame = ctx_get_frame(ctx, vb->vb2_queue->type);
259 	if (IS_ERR(frame))
260 		return PTR_ERR(frame);
261 
262 	if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
263 		for (i = 0; i < frame->fmt->num_planes; i++)
264 			vb2_set_plane_payload(vb, i, frame->payload[i]);
265 	}
266 
267 	return 0;
268 }
269 
gsc_m2m_buf_queue(struct vb2_buffer * vb)270 static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
271 {
272 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
273 	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
274 
275 	pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
276 
277 	if (ctx->m2m_ctx)
278 		v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
279 }
280 
281 static const struct vb2_ops gsc_m2m_qops = {
282 	.queue_setup	 = gsc_m2m_queue_setup,
283 	.buf_prepare	 = gsc_m2m_buf_prepare,
284 	.buf_queue	 = gsc_m2m_buf_queue,
285 	.wait_prepare	 = vb2_ops_wait_prepare,
286 	.wait_finish	 = vb2_ops_wait_finish,
287 	.stop_streaming	 = gsc_m2m_stop_streaming,
288 	.start_streaming = gsc_m2m_start_streaming,
289 };
290 
gsc_m2m_querycap(struct file * file,void * fh,struct v4l2_capability * cap)291 static int gsc_m2m_querycap(struct file *file, void *fh,
292 			   struct v4l2_capability *cap)
293 {
294 	struct gsc_ctx *ctx = fh_to_ctx(fh);
295 	struct gsc_dev *gsc = ctx->gsc_dev;
296 
297 	strlcpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
298 	strlcpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
299 	snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
300 		 dev_name(&gsc->pdev->dev));
301 	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
302 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
303 	return 0;
304 }
305 
gsc_m2m_enum_fmt_mplane(struct file * file,void * priv,struct v4l2_fmtdesc * f)306 static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
307 				struct v4l2_fmtdesc *f)
308 {
309 	return gsc_enum_fmt_mplane(f);
310 }
311 
gsc_m2m_g_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)312 static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
313 			     struct v4l2_format *f)
314 {
315 	struct gsc_ctx *ctx = fh_to_ctx(fh);
316 
317 	return gsc_g_fmt_mplane(ctx, f);
318 }
319 
gsc_m2m_try_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)320 static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
321 				  struct v4l2_format *f)
322 {
323 	struct gsc_ctx *ctx = fh_to_ctx(fh);
324 
325 	return gsc_try_fmt_mplane(ctx, f);
326 }
327 
gsc_m2m_s_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)328 static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
329 				 struct v4l2_format *f)
330 {
331 	struct gsc_ctx *ctx = fh_to_ctx(fh);
332 	struct vb2_queue *vq;
333 	struct gsc_frame *frame;
334 	struct v4l2_pix_format_mplane *pix;
335 	int i, ret = 0;
336 
337 	ret = gsc_m2m_try_fmt_mplane(file, fh, f);
338 	if (ret)
339 		return ret;
340 
341 	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
342 
343 	if (vb2_is_streaming(vq)) {
344 		pr_err("queue (%d) busy", f->type);
345 		return -EBUSY;
346 	}
347 
348 	if (V4L2_TYPE_IS_OUTPUT(f->type))
349 		frame = &ctx->s_frame;
350 	else
351 		frame = &ctx->d_frame;
352 
353 	pix = &f->fmt.pix_mp;
354 	frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
355 	frame->colorspace = pix->colorspace;
356 	if (!frame->fmt)
357 		return -EINVAL;
358 
359 	for (i = 0; i < frame->fmt->num_planes; i++)
360 		frame->payload[i] = pix->plane_fmt[i].sizeimage;
361 
362 	gsc_set_frame_size(frame, pix->width, pix->height);
363 
364 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
365 		gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
366 	else
367 		gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
368 
369 	pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
370 
371 	return 0;
372 }
373 
gsc_m2m_reqbufs(struct file * file,void * fh,struct v4l2_requestbuffers * reqbufs)374 static int gsc_m2m_reqbufs(struct file *file, void *fh,
375 			  struct v4l2_requestbuffers *reqbufs)
376 {
377 	struct gsc_ctx *ctx = fh_to_ctx(fh);
378 	struct gsc_dev *gsc = ctx->gsc_dev;
379 	u32 max_cnt;
380 
381 	max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
382 		gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
383 	if (reqbufs->count > max_cnt)
384 		return -EINVAL;
385 
386 	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
387 }
388 
gsc_m2m_expbuf(struct file * file,void * fh,struct v4l2_exportbuffer * eb)389 static int gsc_m2m_expbuf(struct file *file, void *fh,
390 				struct v4l2_exportbuffer *eb)
391 {
392 	struct gsc_ctx *ctx = fh_to_ctx(fh);
393 	return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
394 }
395 
gsc_m2m_querybuf(struct file * file,void * fh,struct v4l2_buffer * buf)396 static int gsc_m2m_querybuf(struct file *file, void *fh,
397 					struct v4l2_buffer *buf)
398 {
399 	struct gsc_ctx *ctx = fh_to_ctx(fh);
400 	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
401 }
402 
gsc_m2m_qbuf(struct file * file,void * fh,struct v4l2_buffer * buf)403 static int gsc_m2m_qbuf(struct file *file, void *fh,
404 			  struct v4l2_buffer *buf)
405 {
406 	struct gsc_ctx *ctx = fh_to_ctx(fh);
407 	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
408 }
409 
gsc_m2m_dqbuf(struct file * file,void * fh,struct v4l2_buffer * buf)410 static int gsc_m2m_dqbuf(struct file *file, void *fh,
411 			   struct v4l2_buffer *buf)
412 {
413 	struct gsc_ctx *ctx = fh_to_ctx(fh);
414 	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
415 }
416 
gsc_m2m_streamon(struct file * file,void * fh,enum v4l2_buf_type type)417 static int gsc_m2m_streamon(struct file *file, void *fh,
418 			   enum v4l2_buf_type type)
419 {
420 	struct gsc_ctx *ctx = fh_to_ctx(fh);
421 
422 	/* The source and target color format need to be set */
423 	if (V4L2_TYPE_IS_OUTPUT(type)) {
424 		if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
425 			return -EINVAL;
426 	} else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
427 		return -EINVAL;
428 	}
429 
430 	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
431 }
432 
gsc_m2m_streamoff(struct file * file,void * fh,enum v4l2_buf_type type)433 static int gsc_m2m_streamoff(struct file *file, void *fh,
434 			    enum v4l2_buf_type type)
435 {
436 	struct gsc_ctx *ctx = fh_to_ctx(fh);
437 	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
438 }
439 
440 /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
is_rectangle_enclosed(struct v4l2_rect * a,struct v4l2_rect * b)441 static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
442 {
443 	if (a->left < b->left || a->top < b->top)
444 		return 0;
445 
446 	if (a->left + a->width > b->left + b->width)
447 		return 0;
448 
449 	if (a->top + a->height > b->top + b->height)
450 		return 0;
451 
452 	return 1;
453 }
454 
gsc_m2m_g_selection(struct file * file,void * fh,struct v4l2_selection * s)455 static int gsc_m2m_g_selection(struct file *file, void *fh,
456 			struct v4l2_selection *s)
457 {
458 	struct gsc_frame *frame;
459 	struct gsc_ctx *ctx = fh_to_ctx(fh);
460 
461 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
462 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
463 		return -EINVAL;
464 
465 	frame = ctx_get_frame(ctx, s->type);
466 	if (IS_ERR(frame))
467 		return PTR_ERR(frame);
468 
469 	switch (s->target) {
470 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
471 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
472 	case V4L2_SEL_TGT_CROP_BOUNDS:
473 	case V4L2_SEL_TGT_CROP_DEFAULT:
474 		s->r.left = 0;
475 		s->r.top = 0;
476 		s->r.width = frame->f_width;
477 		s->r.height = frame->f_height;
478 		return 0;
479 
480 	case V4L2_SEL_TGT_COMPOSE:
481 	case V4L2_SEL_TGT_CROP:
482 		s->r.left = frame->crop.left;
483 		s->r.top = frame->crop.top;
484 		s->r.width = frame->crop.width;
485 		s->r.height = frame->crop.height;
486 		return 0;
487 	}
488 
489 	return -EINVAL;
490 }
491 
gsc_m2m_s_selection(struct file * file,void * fh,struct v4l2_selection * s)492 static int gsc_m2m_s_selection(struct file *file, void *fh,
493 				struct v4l2_selection *s)
494 {
495 	struct gsc_frame *frame;
496 	struct gsc_ctx *ctx = fh_to_ctx(fh);
497 	struct v4l2_crop cr;
498 	struct gsc_variant *variant = ctx->gsc_dev->variant;
499 	int ret;
500 
501 	cr.type = s->type;
502 	cr.c = s->r;
503 
504 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
505 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
506 		return -EINVAL;
507 
508 	ret = gsc_try_crop(ctx, &cr);
509 	if (ret)
510 		return ret;
511 
512 	if (s->flags & V4L2_SEL_FLAG_LE &&
513 	    !is_rectangle_enclosed(&cr.c, &s->r))
514 		return -ERANGE;
515 
516 	if (s->flags & V4L2_SEL_FLAG_GE &&
517 	    !is_rectangle_enclosed(&s->r, &cr.c))
518 		return -ERANGE;
519 
520 	s->r = cr.c;
521 
522 	switch (s->target) {
523 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
524 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
525 	case V4L2_SEL_TGT_COMPOSE:
526 		frame = &ctx->s_frame;
527 		break;
528 
529 	case V4L2_SEL_TGT_CROP_BOUNDS:
530 	case V4L2_SEL_TGT_CROP:
531 	case V4L2_SEL_TGT_CROP_DEFAULT:
532 		frame = &ctx->d_frame;
533 		break;
534 
535 	default:
536 		return -EINVAL;
537 	}
538 
539 	/* Check to see if scaling ratio is within supported range */
540 	if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
541 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
542 			ret = gsc_check_scaler_ratio(variant, cr.c.width,
543 				cr.c.height, ctx->d_frame.crop.width,
544 				ctx->d_frame.crop.height,
545 				ctx->gsc_ctrls.rotate->val, ctx->out_path);
546 		} else {
547 			ret = gsc_check_scaler_ratio(variant,
548 				ctx->s_frame.crop.width,
549 				ctx->s_frame.crop.height, cr.c.width,
550 				cr.c.height, ctx->gsc_ctrls.rotate->val,
551 				ctx->out_path);
552 		}
553 
554 		if (ret) {
555 			pr_err("Out of scaler range");
556 			return -EINVAL;
557 		}
558 	}
559 
560 	frame->crop = cr.c;
561 
562 	gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
563 	return 0;
564 }
565 
566 static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
567 	.vidioc_querycap		= gsc_m2m_querycap,
568 	.vidioc_enum_fmt_vid_cap_mplane	= gsc_m2m_enum_fmt_mplane,
569 	.vidioc_enum_fmt_vid_out_mplane	= gsc_m2m_enum_fmt_mplane,
570 	.vidioc_g_fmt_vid_cap_mplane	= gsc_m2m_g_fmt_mplane,
571 	.vidioc_g_fmt_vid_out_mplane	= gsc_m2m_g_fmt_mplane,
572 	.vidioc_try_fmt_vid_cap_mplane	= gsc_m2m_try_fmt_mplane,
573 	.vidioc_try_fmt_vid_out_mplane	= gsc_m2m_try_fmt_mplane,
574 	.vidioc_s_fmt_vid_cap_mplane	= gsc_m2m_s_fmt_mplane,
575 	.vidioc_s_fmt_vid_out_mplane	= gsc_m2m_s_fmt_mplane,
576 	.vidioc_reqbufs			= gsc_m2m_reqbufs,
577 	.vidioc_expbuf                  = gsc_m2m_expbuf,
578 	.vidioc_querybuf		= gsc_m2m_querybuf,
579 	.vidioc_qbuf			= gsc_m2m_qbuf,
580 	.vidioc_dqbuf			= gsc_m2m_dqbuf,
581 	.vidioc_streamon		= gsc_m2m_streamon,
582 	.vidioc_streamoff		= gsc_m2m_streamoff,
583 	.vidioc_g_selection		= gsc_m2m_g_selection,
584 	.vidioc_s_selection		= gsc_m2m_s_selection
585 };
586 
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)587 static int queue_init(void *priv, struct vb2_queue *src_vq,
588 			struct vb2_queue *dst_vq)
589 {
590 	struct gsc_ctx *ctx = priv;
591 	int ret;
592 
593 	memset(src_vq, 0, sizeof(*src_vq));
594 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
595 	src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
596 	src_vq->drv_priv = ctx;
597 	src_vq->ops = &gsc_m2m_qops;
598 	src_vq->mem_ops = &vb2_dma_contig_memops;
599 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
600 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
601 	src_vq->lock = &ctx->gsc_dev->lock;
602 	src_vq->dev = &ctx->gsc_dev->pdev->dev;
603 
604 	ret = vb2_queue_init(src_vq);
605 	if (ret)
606 		return ret;
607 
608 	memset(dst_vq, 0, sizeof(*dst_vq));
609 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
610 	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
611 	dst_vq->drv_priv = ctx;
612 	dst_vq->ops = &gsc_m2m_qops;
613 	dst_vq->mem_ops = &vb2_dma_contig_memops;
614 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
615 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
616 	dst_vq->lock = &ctx->gsc_dev->lock;
617 	dst_vq->dev = &ctx->gsc_dev->pdev->dev;
618 
619 	return vb2_queue_init(dst_vq);
620 }
621 
gsc_m2m_open(struct file * file)622 static int gsc_m2m_open(struct file *file)
623 {
624 	struct gsc_dev *gsc = video_drvdata(file);
625 	struct gsc_ctx *ctx = NULL;
626 	int ret;
627 
628 	pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
629 
630 	if (mutex_lock_interruptible(&gsc->lock))
631 		return -ERESTARTSYS;
632 
633 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
634 	if (!ctx) {
635 		ret = -ENOMEM;
636 		goto unlock;
637 	}
638 
639 	v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
640 	ret = gsc_ctrls_create(ctx);
641 	if (ret)
642 		goto error_fh;
643 
644 	/* Use separate control handler per file handle */
645 	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
646 	file->private_data = &ctx->fh;
647 	v4l2_fh_add(&ctx->fh);
648 
649 	ctx->gsc_dev = gsc;
650 	/* Default color format */
651 	ctx->s_frame.fmt = get_format(0);
652 	ctx->d_frame.fmt = get_format(0);
653 	/* Setup the device context for mem2mem mode. */
654 	ctx->state = GSC_CTX_M2M;
655 	ctx->flags = 0;
656 	ctx->in_path = GSC_DMA;
657 	ctx->out_path = GSC_DMA;
658 
659 	ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
660 	if (IS_ERR(ctx->m2m_ctx)) {
661 		pr_err("Failed to initialize m2m context");
662 		ret = PTR_ERR(ctx->m2m_ctx);
663 		goto error_ctrls;
664 	}
665 
666 	if (gsc->m2m.refcnt++ == 0)
667 		set_bit(ST_M2M_OPEN, &gsc->state);
668 
669 	pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
670 
671 	mutex_unlock(&gsc->lock);
672 	return 0;
673 
674 error_ctrls:
675 	gsc_ctrls_delete(ctx);
676 	v4l2_fh_del(&ctx->fh);
677 error_fh:
678 	v4l2_fh_exit(&ctx->fh);
679 	kfree(ctx);
680 unlock:
681 	mutex_unlock(&gsc->lock);
682 	return ret;
683 }
684 
gsc_m2m_release(struct file * file)685 static int gsc_m2m_release(struct file *file)
686 {
687 	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
688 	struct gsc_dev *gsc = ctx->gsc_dev;
689 
690 	pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
691 		task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
692 
693 	mutex_lock(&gsc->lock);
694 
695 	v4l2_m2m_ctx_release(ctx->m2m_ctx);
696 	gsc_ctrls_delete(ctx);
697 	v4l2_fh_del(&ctx->fh);
698 	v4l2_fh_exit(&ctx->fh);
699 
700 	if (--gsc->m2m.refcnt <= 0)
701 		clear_bit(ST_M2M_OPEN, &gsc->state);
702 	kfree(ctx);
703 
704 	mutex_unlock(&gsc->lock);
705 	return 0;
706 }
707 
gsc_m2m_poll(struct file * file,struct poll_table_struct * wait)708 static __poll_t gsc_m2m_poll(struct file *file,
709 					struct poll_table_struct *wait)
710 {
711 	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
712 	struct gsc_dev *gsc = ctx->gsc_dev;
713 	__poll_t ret;
714 
715 	if (mutex_lock_interruptible(&gsc->lock))
716 		return EPOLLERR;
717 
718 	ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
719 	mutex_unlock(&gsc->lock);
720 
721 	return ret;
722 }
723 
gsc_m2m_mmap(struct file * file,struct vm_area_struct * vma)724 static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
725 {
726 	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
727 	struct gsc_dev *gsc = ctx->gsc_dev;
728 	int ret;
729 
730 	if (mutex_lock_interruptible(&gsc->lock))
731 		return -ERESTARTSYS;
732 
733 	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
734 	mutex_unlock(&gsc->lock);
735 
736 	return ret;
737 }
738 
739 static const struct v4l2_file_operations gsc_m2m_fops = {
740 	.owner		= THIS_MODULE,
741 	.open		= gsc_m2m_open,
742 	.release	= gsc_m2m_release,
743 	.poll		= gsc_m2m_poll,
744 	.unlocked_ioctl	= video_ioctl2,
745 	.mmap		= gsc_m2m_mmap,
746 };
747 
748 static const struct v4l2_m2m_ops gsc_m2m_ops = {
749 	.device_run	= gsc_m2m_device_run,
750 	.job_abort	= gsc_m2m_job_abort,
751 };
752 
gsc_register_m2m_device(struct gsc_dev * gsc)753 int gsc_register_m2m_device(struct gsc_dev *gsc)
754 {
755 	struct platform_device *pdev;
756 	int ret;
757 
758 	if (!gsc)
759 		return -ENODEV;
760 
761 	pdev = gsc->pdev;
762 
763 	gsc->vdev.fops		= &gsc_m2m_fops;
764 	gsc->vdev.ioctl_ops	= &gsc_m2m_ioctl_ops;
765 	gsc->vdev.release	= video_device_release_empty;
766 	gsc->vdev.lock		= &gsc->lock;
767 	gsc->vdev.vfl_dir	= VFL_DIR_M2M;
768 	gsc->vdev.v4l2_dev	= &gsc->v4l2_dev;
769 	snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
770 					GSC_MODULE_NAME, gsc->id);
771 
772 	video_set_drvdata(&gsc->vdev, gsc);
773 
774 	gsc->m2m.vfd = &gsc->vdev;
775 	gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
776 	if (IS_ERR(gsc->m2m.m2m_dev)) {
777 		dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
778 		return PTR_ERR(gsc->m2m.m2m_dev);
779 	}
780 
781 	ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
782 	if (ret) {
783 		dev_err(&pdev->dev,
784 			 "%s(): failed to register video device\n", __func__);
785 		goto err_m2m_release;
786 	}
787 
788 	pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
789 	return 0;
790 
791 err_m2m_release:
792 	v4l2_m2m_release(gsc->m2m.m2m_dev);
793 
794 	return ret;
795 }
796 
gsc_unregister_m2m_device(struct gsc_dev * gsc)797 void gsc_unregister_m2m_device(struct gsc_dev *gsc)
798 {
799 	if (gsc) {
800 		v4l2_m2m_release(gsc->m2m.m2m_dev);
801 		video_unregister_device(&gsc->vdev);
802 	}
803 }
804