1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Samsung EXYNOS5 SoC series G-Scaler driver
7 */
8
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/bug.h>
14 #include <linux/interrupt.h>
15 #include <linux/workqueue.h>
16 #include <linux/device.h>
17 #include <linux/platform_device.h>
18 #include <linux/list.h>
19 #include <linux/io.h>
20 #include <linux/slab.h>
21 #include <linux/clk.h>
22
23 #include <media/v4l2-ioctl.h>
24
25 #include "gsc-core.h"
26
gsc_m2m_ctx_stop_req(struct gsc_ctx * ctx)27 static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
28 {
29 struct gsc_ctx *curr_ctx;
30 struct gsc_dev *gsc = ctx->gsc_dev;
31 int ret;
32
33 curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
34 if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
35 return 0;
36
37 gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
38 ret = wait_event_timeout(gsc->irq_queue,
39 !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
40 GSC_SHUTDOWN_TIMEOUT);
41
42 return ret == 0 ? -ETIMEDOUT : ret;
43 }
44
__gsc_m2m_job_abort(struct gsc_ctx * ctx)45 static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
46 {
47 int ret;
48
49 ret = gsc_m2m_ctx_stop_req(ctx);
50 if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
51 gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
52 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
53 }
54 }
55
gsc_m2m_start_streaming(struct vb2_queue * q,unsigned int count)56 static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
57 {
58 struct gsc_ctx *ctx = q->drv_priv;
59
60 return pm_runtime_resume_and_get(&ctx->gsc_dev->pdev->dev);
61 }
62
__gsc_m2m_cleanup_queue(struct gsc_ctx * ctx)63 static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
64 {
65 struct vb2_v4l2_buffer *src_vb, *dst_vb;
66
67 while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
68 src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
69 v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
70 }
71
72 while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
73 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
74 v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
75 }
76 }
77
gsc_m2m_stop_streaming(struct vb2_queue * q)78 static void gsc_m2m_stop_streaming(struct vb2_queue *q)
79 {
80 struct gsc_ctx *ctx = q->drv_priv;
81
82 __gsc_m2m_job_abort(ctx);
83
84 __gsc_m2m_cleanup_queue(ctx);
85
86 pm_runtime_put(&ctx->gsc_dev->pdev->dev);
87 }
88
gsc_m2m_job_finish(struct gsc_ctx * ctx,int vb_state)89 void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
90 {
91 struct vb2_v4l2_buffer *src_vb, *dst_vb;
92
93 if (!ctx || !ctx->m2m_ctx)
94 return;
95
96 src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
97 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
98
99 if (src_vb && dst_vb) {
100 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
101 dst_vb->timecode = src_vb->timecode;
102 dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
103 dst_vb->flags |=
104 src_vb->flags
105 & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
106
107 v4l2_m2m_buf_done(src_vb, vb_state);
108 v4l2_m2m_buf_done(dst_vb, vb_state);
109
110 v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
111 ctx->m2m_ctx);
112 }
113 }
114
gsc_m2m_job_abort(void * priv)115 static void gsc_m2m_job_abort(void *priv)
116 {
117 __gsc_m2m_job_abort((struct gsc_ctx *)priv);
118 }
119
gsc_get_bufs(struct gsc_ctx * ctx)120 static int gsc_get_bufs(struct gsc_ctx *ctx)
121 {
122 struct gsc_frame *s_frame, *d_frame;
123 struct vb2_v4l2_buffer *src_vb, *dst_vb;
124 int ret;
125
126 s_frame = &ctx->s_frame;
127 d_frame = &ctx->d_frame;
128
129 src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
130 ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
131 if (ret)
132 return ret;
133
134 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
135 ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
136 if (ret)
137 return ret;
138
139 dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
140
141 return 0;
142 }
143
gsc_m2m_device_run(void * priv)144 static void gsc_m2m_device_run(void *priv)
145 {
146 struct gsc_ctx *ctx = priv;
147 struct gsc_dev *gsc;
148 unsigned long flags;
149 int ret;
150 bool is_set = false;
151
152 if (WARN(!ctx, "null hardware context\n"))
153 return;
154
155 gsc = ctx->gsc_dev;
156 spin_lock_irqsave(&gsc->slock, flags);
157
158 set_bit(ST_M2M_PEND, &gsc->state);
159
160 /* Reconfigure hardware if the context has changed. */
161 if (gsc->m2m.ctx != ctx) {
162 pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
163 gsc->m2m.ctx, ctx);
164 ctx->state |= GSC_PARAMS;
165 gsc->m2m.ctx = ctx;
166 }
167
168 is_set = ctx->state & GSC_CTX_STOP_REQ;
169 if (is_set) {
170 ctx->state &= ~GSC_CTX_STOP_REQ;
171 ctx->state |= GSC_CTX_ABORT;
172 wake_up(&gsc->irq_queue);
173 goto put_device;
174 }
175
176 ret = gsc_get_bufs(ctx);
177 if (ret) {
178 pr_err("Wrong address");
179 goto put_device;
180 }
181
182 gsc_set_prefbuf(gsc, &ctx->s_frame);
183 gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
184 gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
185
186 if (ctx->state & GSC_PARAMS) {
187 gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
188 gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
189 gsc_hw_set_frm_done_irq_mask(gsc, false);
190 gsc_hw_set_gsc_irq_enable(gsc, true);
191
192 if (gsc_set_scaler_info(ctx)) {
193 pr_err("Scaler setup error");
194 goto put_device;
195 }
196
197 gsc_hw_set_input_path(ctx);
198 gsc_hw_set_in_size(ctx);
199 gsc_hw_set_in_image_format(ctx);
200
201 gsc_hw_set_output_path(ctx);
202 gsc_hw_set_out_size(ctx);
203 gsc_hw_set_out_image_format(ctx);
204
205 gsc_hw_set_prescaler(ctx);
206 gsc_hw_set_mainscaler(ctx);
207 gsc_hw_set_rotation(ctx);
208 gsc_hw_set_global_alpha(ctx);
209 }
210
211 /* update shadow registers */
212 gsc_hw_set_sfr_update(ctx);
213
214 ctx->state &= ~GSC_PARAMS;
215 gsc_hw_enable_control(gsc, true);
216
217 spin_unlock_irqrestore(&gsc->slock, flags);
218 return;
219
220 put_device:
221 ctx->state &= ~GSC_PARAMS;
222 spin_unlock_irqrestore(&gsc->slock, flags);
223 }
224
gsc_m2m_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])225 static int gsc_m2m_queue_setup(struct vb2_queue *vq,
226 unsigned int *num_buffers, unsigned int *num_planes,
227 unsigned int sizes[], struct device *alloc_devs[])
228 {
229 struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
230 struct gsc_frame *frame;
231 int i;
232
233 frame = ctx_get_frame(ctx, vq->type);
234 if (IS_ERR(frame))
235 return PTR_ERR(frame);
236
237 if (!frame->fmt)
238 return -EINVAL;
239
240 *num_planes = frame->fmt->num_planes;
241 for (i = 0; i < frame->fmt->num_planes; i++)
242 sizes[i] = frame->payload[i];
243 return 0;
244 }
245
gsc_m2m_buf_prepare(struct vb2_buffer * vb)246 static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
247 {
248 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
249 struct gsc_frame *frame;
250 int i;
251
252 frame = ctx_get_frame(ctx, vb->vb2_queue->type);
253 if (IS_ERR(frame))
254 return PTR_ERR(frame);
255
256 if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) {
257 for (i = 0; i < frame->fmt->num_planes; i++)
258 vb2_set_plane_payload(vb, i, frame->payload[i]);
259 }
260
261 return 0;
262 }
263
gsc_m2m_buf_queue(struct vb2_buffer * vb)264 static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
265 {
266 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
267 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
268
269 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
270
271 if (ctx->m2m_ctx)
272 v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
273 }
274
275 static const struct vb2_ops gsc_m2m_qops = {
276 .queue_setup = gsc_m2m_queue_setup,
277 .buf_prepare = gsc_m2m_buf_prepare,
278 .buf_queue = gsc_m2m_buf_queue,
279 .wait_prepare = vb2_ops_wait_prepare,
280 .wait_finish = vb2_ops_wait_finish,
281 .stop_streaming = gsc_m2m_stop_streaming,
282 .start_streaming = gsc_m2m_start_streaming,
283 };
284
gsc_m2m_querycap(struct file * file,void * fh,struct v4l2_capability * cap)285 static int gsc_m2m_querycap(struct file *file, void *fh,
286 struct v4l2_capability *cap)
287 {
288 struct gsc_ctx *ctx = fh_to_ctx(fh);
289 struct gsc_dev *gsc = ctx->gsc_dev;
290
291 strscpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
292 strscpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
293 snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
294 dev_name(&gsc->pdev->dev));
295 return 0;
296 }
297
gsc_m2m_enum_fmt(struct file * file,void * priv,struct v4l2_fmtdesc * f)298 static int gsc_m2m_enum_fmt(struct file *file, void *priv,
299 struct v4l2_fmtdesc *f)
300 {
301 return gsc_enum_fmt(f);
302 }
303
gsc_m2m_g_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)304 static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
305 struct v4l2_format *f)
306 {
307 struct gsc_ctx *ctx = fh_to_ctx(fh);
308
309 return gsc_g_fmt_mplane(ctx, f);
310 }
311
gsc_m2m_try_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)312 static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
313 struct v4l2_format *f)
314 {
315 struct gsc_ctx *ctx = fh_to_ctx(fh);
316
317 return gsc_try_fmt_mplane(ctx, f);
318 }
319
gsc_m2m_s_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)320 static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
321 struct v4l2_format *f)
322 {
323 struct gsc_ctx *ctx = fh_to_ctx(fh);
324 struct vb2_queue *vq;
325 struct gsc_frame *frame;
326 struct v4l2_pix_format_mplane *pix;
327 int i, ret = 0;
328
329 ret = gsc_m2m_try_fmt_mplane(file, fh, f);
330 if (ret)
331 return ret;
332
333 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
334
335 if (vb2_is_streaming(vq)) {
336 pr_err("queue (%d) busy", f->type);
337 return -EBUSY;
338 }
339
340 if (V4L2_TYPE_IS_OUTPUT(f->type))
341 frame = &ctx->s_frame;
342 else
343 frame = &ctx->d_frame;
344
345 pix = &f->fmt.pix_mp;
346 frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
347 frame->colorspace = pix->colorspace;
348 if (!frame->fmt)
349 return -EINVAL;
350
351 for (i = 0; i < frame->fmt->num_planes; i++)
352 frame->payload[i] = pix->plane_fmt[i].sizeimage;
353
354 gsc_set_frame_size(frame, pix->width, pix->height);
355
356 if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
357 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
358 else
359 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
360
361 pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
362
363 return 0;
364 }
365
gsc_m2m_reqbufs(struct file * file,void * fh,struct v4l2_requestbuffers * reqbufs)366 static int gsc_m2m_reqbufs(struct file *file, void *fh,
367 struct v4l2_requestbuffers *reqbufs)
368 {
369 struct gsc_ctx *ctx = fh_to_ctx(fh);
370 struct gsc_dev *gsc = ctx->gsc_dev;
371 u32 max_cnt;
372
373 max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
374 gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
375 if (reqbufs->count > max_cnt)
376 return -EINVAL;
377
378 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
379 }
380
gsc_m2m_expbuf(struct file * file,void * fh,struct v4l2_exportbuffer * eb)381 static int gsc_m2m_expbuf(struct file *file, void *fh,
382 struct v4l2_exportbuffer *eb)
383 {
384 struct gsc_ctx *ctx = fh_to_ctx(fh);
385 return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
386 }
387
gsc_m2m_querybuf(struct file * file,void * fh,struct v4l2_buffer * buf)388 static int gsc_m2m_querybuf(struct file *file, void *fh,
389 struct v4l2_buffer *buf)
390 {
391 struct gsc_ctx *ctx = fh_to_ctx(fh);
392 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
393 }
394
gsc_m2m_qbuf(struct file * file,void * fh,struct v4l2_buffer * buf)395 static int gsc_m2m_qbuf(struct file *file, void *fh,
396 struct v4l2_buffer *buf)
397 {
398 struct gsc_ctx *ctx = fh_to_ctx(fh);
399 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
400 }
401
gsc_m2m_dqbuf(struct file * file,void * fh,struct v4l2_buffer * buf)402 static int gsc_m2m_dqbuf(struct file *file, void *fh,
403 struct v4l2_buffer *buf)
404 {
405 struct gsc_ctx *ctx = fh_to_ctx(fh);
406 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
407 }
408
gsc_m2m_streamon(struct file * file,void * fh,enum v4l2_buf_type type)409 static int gsc_m2m_streamon(struct file *file, void *fh,
410 enum v4l2_buf_type type)
411 {
412 struct gsc_ctx *ctx = fh_to_ctx(fh);
413
414 /* The source and target color format need to be set */
415 if (V4L2_TYPE_IS_OUTPUT(type)) {
416 if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
417 return -EINVAL;
418 } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
419 return -EINVAL;
420 }
421
422 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
423 }
424
gsc_m2m_streamoff(struct file * file,void * fh,enum v4l2_buf_type type)425 static int gsc_m2m_streamoff(struct file *file, void *fh,
426 enum v4l2_buf_type type)
427 {
428 struct gsc_ctx *ctx = fh_to_ctx(fh);
429 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
430 }
431
432 /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
is_rectangle_enclosed(struct v4l2_rect * a,struct v4l2_rect * b)433 static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
434 {
435 if (a->left < b->left || a->top < b->top)
436 return 0;
437
438 if (a->left + a->width > b->left + b->width)
439 return 0;
440
441 if (a->top + a->height > b->top + b->height)
442 return 0;
443
444 return 1;
445 }
446
gsc_m2m_g_selection(struct file * file,void * fh,struct v4l2_selection * s)447 static int gsc_m2m_g_selection(struct file *file, void *fh,
448 struct v4l2_selection *s)
449 {
450 struct gsc_frame *frame;
451 struct gsc_ctx *ctx = fh_to_ctx(fh);
452
453 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
454 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
455 return -EINVAL;
456
457 frame = ctx_get_frame(ctx, s->type);
458 if (IS_ERR(frame))
459 return PTR_ERR(frame);
460
461 switch (s->target) {
462 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
463 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
464 case V4L2_SEL_TGT_CROP_BOUNDS:
465 case V4L2_SEL_TGT_CROP_DEFAULT:
466 s->r.left = 0;
467 s->r.top = 0;
468 s->r.width = frame->f_width;
469 s->r.height = frame->f_height;
470 return 0;
471
472 case V4L2_SEL_TGT_COMPOSE:
473 case V4L2_SEL_TGT_CROP:
474 s->r.left = frame->crop.left;
475 s->r.top = frame->crop.top;
476 s->r.width = frame->crop.width;
477 s->r.height = frame->crop.height;
478 return 0;
479 }
480
481 return -EINVAL;
482 }
483
gsc_m2m_s_selection(struct file * file,void * fh,struct v4l2_selection * s)484 static int gsc_m2m_s_selection(struct file *file, void *fh,
485 struct v4l2_selection *s)
486 {
487 struct gsc_frame *frame;
488 struct gsc_ctx *ctx = fh_to_ctx(fh);
489 struct gsc_variant *variant = ctx->gsc_dev->variant;
490 struct v4l2_selection sel = *s;
491 int ret;
492
493 if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
494 (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
495 return -EINVAL;
496
497 ret = gsc_try_selection(ctx, &sel);
498 if (ret)
499 return ret;
500
501 if (s->flags & V4L2_SEL_FLAG_LE &&
502 !is_rectangle_enclosed(&sel.r, &s->r))
503 return -ERANGE;
504
505 if (s->flags & V4L2_SEL_FLAG_GE &&
506 !is_rectangle_enclosed(&s->r, &sel.r))
507 return -ERANGE;
508
509 s->r = sel.r;
510
511 switch (s->target) {
512 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
513 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
514 case V4L2_SEL_TGT_COMPOSE:
515 frame = &ctx->s_frame;
516 break;
517
518 case V4L2_SEL_TGT_CROP_BOUNDS:
519 case V4L2_SEL_TGT_CROP:
520 case V4L2_SEL_TGT_CROP_DEFAULT:
521 frame = &ctx->d_frame;
522 break;
523
524 default:
525 return -EINVAL;
526 }
527
528 /* Check to see if scaling ratio is within supported range */
529 if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
530 if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
531 ret = gsc_check_scaler_ratio(variant, sel.r.width,
532 sel.r.height, ctx->d_frame.crop.width,
533 ctx->d_frame.crop.height,
534 ctx->gsc_ctrls.rotate->val, ctx->out_path);
535 } else {
536 ret = gsc_check_scaler_ratio(variant,
537 ctx->s_frame.crop.width,
538 ctx->s_frame.crop.height, sel.r.width,
539 sel.r.height, ctx->gsc_ctrls.rotate->val,
540 ctx->out_path);
541 }
542
543 if (ret) {
544 pr_err("Out of scaler range");
545 return -EINVAL;
546 }
547 }
548
549 frame->crop = sel.r;
550
551 gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
552 return 0;
553 }
554
555 static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
556 .vidioc_querycap = gsc_m2m_querycap,
557 .vidioc_enum_fmt_vid_cap = gsc_m2m_enum_fmt,
558 .vidioc_enum_fmt_vid_out = gsc_m2m_enum_fmt,
559 .vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
560 .vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
561 .vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
562 .vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
563 .vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
564 .vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
565 .vidioc_reqbufs = gsc_m2m_reqbufs,
566 .vidioc_expbuf = gsc_m2m_expbuf,
567 .vidioc_querybuf = gsc_m2m_querybuf,
568 .vidioc_qbuf = gsc_m2m_qbuf,
569 .vidioc_dqbuf = gsc_m2m_dqbuf,
570 .vidioc_streamon = gsc_m2m_streamon,
571 .vidioc_streamoff = gsc_m2m_streamoff,
572 .vidioc_g_selection = gsc_m2m_g_selection,
573 .vidioc_s_selection = gsc_m2m_s_selection
574 };
575
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)576 static int queue_init(void *priv, struct vb2_queue *src_vq,
577 struct vb2_queue *dst_vq)
578 {
579 struct gsc_ctx *ctx = priv;
580 int ret;
581
582 memset(src_vq, 0, sizeof(*src_vq));
583 src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
584 src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
585 src_vq->drv_priv = ctx;
586 src_vq->ops = &gsc_m2m_qops;
587 src_vq->mem_ops = &vb2_dma_contig_memops;
588 src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
589 src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
590 src_vq->lock = &ctx->gsc_dev->lock;
591 src_vq->dev = &ctx->gsc_dev->pdev->dev;
592
593 ret = vb2_queue_init(src_vq);
594 if (ret)
595 return ret;
596
597 memset(dst_vq, 0, sizeof(*dst_vq));
598 dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
599 dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
600 dst_vq->drv_priv = ctx;
601 dst_vq->ops = &gsc_m2m_qops;
602 dst_vq->mem_ops = &vb2_dma_contig_memops;
603 dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
604 dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
605 dst_vq->lock = &ctx->gsc_dev->lock;
606 dst_vq->dev = &ctx->gsc_dev->pdev->dev;
607
608 return vb2_queue_init(dst_vq);
609 }
610
gsc_m2m_open(struct file * file)611 static int gsc_m2m_open(struct file *file)
612 {
613 struct gsc_dev *gsc = video_drvdata(file);
614 struct gsc_ctx *ctx = NULL;
615 int ret;
616
617 pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
618
619 if (mutex_lock_interruptible(&gsc->lock))
620 return -ERESTARTSYS;
621
622 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
623 if (!ctx) {
624 ret = -ENOMEM;
625 goto unlock;
626 }
627
628 v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
629 ret = gsc_ctrls_create(ctx);
630 if (ret)
631 goto error_fh;
632
633 /* Use separate control handler per file handle */
634 ctx->fh.ctrl_handler = &ctx->ctrl_handler;
635 file->private_data = &ctx->fh;
636 v4l2_fh_add(&ctx->fh);
637
638 ctx->gsc_dev = gsc;
639 /* Default color format */
640 ctx->s_frame.fmt = get_format(0);
641 ctx->d_frame.fmt = get_format(0);
642 /* Setup the device context for mem2mem mode. */
643 ctx->state = GSC_CTX_M2M;
644 ctx->flags = 0;
645 ctx->in_path = GSC_DMA;
646 ctx->out_path = GSC_DMA;
647
648 ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
649 if (IS_ERR(ctx->m2m_ctx)) {
650 pr_err("Failed to initialize m2m context");
651 ret = PTR_ERR(ctx->m2m_ctx);
652 goto error_ctrls;
653 }
654
655 if (gsc->m2m.refcnt++ == 0)
656 set_bit(ST_M2M_OPEN, &gsc->state);
657
658 pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
659
660 mutex_unlock(&gsc->lock);
661 return 0;
662
663 error_ctrls:
664 gsc_ctrls_delete(ctx);
665 v4l2_fh_del(&ctx->fh);
666 error_fh:
667 v4l2_fh_exit(&ctx->fh);
668 kfree(ctx);
669 unlock:
670 mutex_unlock(&gsc->lock);
671 return ret;
672 }
673
gsc_m2m_release(struct file * file)674 static int gsc_m2m_release(struct file *file)
675 {
676 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
677 struct gsc_dev *gsc = ctx->gsc_dev;
678
679 pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
680 task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
681
682 mutex_lock(&gsc->lock);
683
684 v4l2_m2m_ctx_release(ctx->m2m_ctx);
685 gsc_ctrls_delete(ctx);
686 v4l2_fh_del(&ctx->fh);
687 v4l2_fh_exit(&ctx->fh);
688
689 if (--gsc->m2m.refcnt <= 0)
690 clear_bit(ST_M2M_OPEN, &gsc->state);
691 kfree(ctx);
692
693 mutex_unlock(&gsc->lock);
694 return 0;
695 }
696
gsc_m2m_poll(struct file * file,struct poll_table_struct * wait)697 static __poll_t gsc_m2m_poll(struct file *file,
698 struct poll_table_struct *wait)
699 {
700 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
701 struct gsc_dev *gsc = ctx->gsc_dev;
702 __poll_t ret;
703
704 if (mutex_lock_interruptible(&gsc->lock))
705 return EPOLLERR;
706
707 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
708 mutex_unlock(&gsc->lock);
709
710 return ret;
711 }
712
gsc_m2m_mmap(struct file * file,struct vm_area_struct * vma)713 static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
714 {
715 struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
716 struct gsc_dev *gsc = ctx->gsc_dev;
717 int ret;
718
719 if (mutex_lock_interruptible(&gsc->lock))
720 return -ERESTARTSYS;
721
722 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
723 mutex_unlock(&gsc->lock);
724
725 return ret;
726 }
727
728 static const struct v4l2_file_operations gsc_m2m_fops = {
729 .owner = THIS_MODULE,
730 .open = gsc_m2m_open,
731 .release = gsc_m2m_release,
732 .poll = gsc_m2m_poll,
733 .unlocked_ioctl = video_ioctl2,
734 .mmap = gsc_m2m_mmap,
735 };
736
737 static const struct v4l2_m2m_ops gsc_m2m_ops = {
738 .device_run = gsc_m2m_device_run,
739 .job_abort = gsc_m2m_job_abort,
740 };
741
gsc_register_m2m_device(struct gsc_dev * gsc)742 int gsc_register_m2m_device(struct gsc_dev *gsc)
743 {
744 struct platform_device *pdev;
745 int ret;
746
747 if (!gsc)
748 return -ENODEV;
749
750 pdev = gsc->pdev;
751
752 gsc->vdev.fops = &gsc_m2m_fops;
753 gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
754 gsc->vdev.release = video_device_release_empty;
755 gsc->vdev.lock = &gsc->lock;
756 gsc->vdev.vfl_dir = VFL_DIR_M2M;
757 gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
758 gsc->vdev.device_caps = V4L2_CAP_STREAMING |
759 V4L2_CAP_VIDEO_M2M_MPLANE;
760 snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
761 GSC_MODULE_NAME, gsc->id);
762
763 video_set_drvdata(&gsc->vdev, gsc);
764
765 gsc->m2m.vfd = &gsc->vdev;
766 gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
767 if (IS_ERR(gsc->m2m.m2m_dev)) {
768 dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
769 return PTR_ERR(gsc->m2m.m2m_dev);
770 }
771
772 ret = video_register_device(&gsc->vdev, VFL_TYPE_VIDEO, -1);
773 if (ret) {
774 dev_err(&pdev->dev,
775 "%s(): failed to register video device\n", __func__);
776 goto err_m2m_release;
777 }
778
779 pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
780 return 0;
781
782 err_m2m_release:
783 v4l2_m2m_release(gsc->m2m.m2m_dev);
784
785 return ret;
786 }
787
gsc_unregister_m2m_device(struct gsc_dev * gsc)788 void gsc_unregister_m2m_device(struct gsc_dev *gsc)
789 {
790 if (gsc) {
791 v4l2_m2m_release(gsc->m2m.m2m_dev);
792 video_unregister_device(&gsc->vdev);
793 }
794 }
795