Lines Matching refs:m2m_ctx

110 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,  in get_queue_ctx()  argument
114 return &m2m_ctx->out_q_ctx; in get_queue_ctx()
116 return &m2m_ctx->cap_q_ctx; in get_queue_ctx()
119 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_get_vq() argument
124 q_ctx = get_queue_ctx(m2m_ctx, type); in v4l2_m2m_get_vq()
284 struct v4l2_m2m_ctx *m2m_ctx) in __v4l2_m2m_try_queue() argument
288 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); in __v4l2_m2m_try_queue()
290 if (!m2m_ctx->out_q_ctx.q.streaming in __v4l2_m2m_try_queue()
291 || !m2m_ctx->cap_q_ctx.q.streaming) { in __v4l2_m2m_try_queue()
299 if (m2m_ctx->job_flags & TRANS_ABORT) { in __v4l2_m2m_try_queue()
305 if (m2m_ctx->job_flags & TRANS_QUEUED) { in __v4l2_m2m_try_queue()
311 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); in __v4l2_m2m_try_queue()
312 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) in __v4l2_m2m_try_queue()
313 && !m2m_ctx->out_q_ctx.buffered) { in __v4l2_m2m_try_queue()
314 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, in __v4l2_m2m_try_queue()
320 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); in __v4l2_m2m_try_queue()
321 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) in __v4l2_m2m_try_queue()
322 && !m2m_ctx->cap_q_ctx.buffered) { in __v4l2_m2m_try_queue()
323 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, in __v4l2_m2m_try_queue()
325 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, in __v4l2_m2m_try_queue()
331 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); in __v4l2_m2m_try_queue()
332 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); in __v4l2_m2m_try_queue()
335 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { in __v4l2_m2m_try_queue()
341 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); in __v4l2_m2m_try_queue()
342 m2m_ctx->job_flags |= TRANS_QUEUED; in __v4l2_m2m_try_queue()
359 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_try_schedule() argument
361 struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_try_schedule()
363 __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); in v4l2_m2m_try_schedule()
377 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_cancel_job() argument
382 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_cancel_job()
385 m2m_ctx->job_flags |= TRANS_ABORT; in v4l2_m2m_cancel_job()
386 if (m2m_ctx->job_flags & TRANS_RUNNING) { in v4l2_m2m_cancel_job()
389 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); in v4l2_m2m_cancel_job()
390 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); in v4l2_m2m_cancel_job()
391 wait_event(m2m_ctx->finished, in v4l2_m2m_cancel_job()
392 !(m2m_ctx->job_flags & TRANS_RUNNING)); in v4l2_m2m_cancel_job()
393 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { in v4l2_m2m_cancel_job()
394 list_del(&m2m_ctx->queue); in v4l2_m2m_cancel_job()
395 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); in v4l2_m2m_cancel_job()
398 m2m_ctx); in v4l2_m2m_cancel_job()
406 struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_job_finish() argument
411 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { in v4l2_m2m_job_finish()
427 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_job_finish()
431 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_reqbufs() argument
437 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); in v4l2_m2m_reqbufs()
448 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_querybuf() argument
455 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_querybuf()
473 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_qbuf() argument
479 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_qbuf()
482 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_qbuf()
488 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_dqbuf() argument
493 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_dqbuf()
498 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_prepare_buf() argument
504 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_prepare_buf()
507 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_prepare_buf()
513 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_create_bufs() argument
518 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); in v4l2_m2m_create_bufs()
523 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_expbuf() argument
528 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); in v4l2_m2m_expbuf()
533 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_streamon() argument
539 vq = v4l2_m2m_get_vq(m2m_ctx, type); in v4l2_m2m_streamon()
542 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_streamon()
548 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_streamoff() argument
557 v4l2_m2m_cancel_job(m2m_ctx); in v4l2_m2m_streamoff()
559 q_ctx = get_queue_ctx(m2m_ctx, type); in v4l2_m2m_streamoff()
564 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_streamoff()
567 if (m2m_ctx->job_flags & TRANS_QUEUED) in v4l2_m2m_streamoff()
568 list_del(&m2m_ctx->queue); in v4l2_m2m_streamoff()
569 m2m_ctx->job_flags = 0; in v4l2_m2m_streamoff()
578 if (m2m_dev->curr_ctx == m2m_ctx) { in v4l2_m2m_streamoff()
580 wake_up(&m2m_ctx->finished); in v4l2_m2m_streamoff()
588 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_poll() argument
609 src_q = v4l2_m2m_get_src_vq(m2m_ctx); in v4l2_m2m_poll()
610 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); in v4l2_m2m_poll()
666 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_mmap() argument
673 vq = v4l2_m2m_get_src_vq(m2m_ctx); in v4l2_m2m_mmap()
675 vq = v4l2_m2m_get_dst_vq(m2m_ctx); in v4l2_m2m_mmap()
881 struct v4l2_m2m_ctx *m2m_ctx; in v4l2_m2m_ctx_init() local
885 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); in v4l2_m2m_ctx_init()
886 if (!m2m_ctx) in v4l2_m2m_ctx_init()
889 m2m_ctx->priv = drv_priv; in v4l2_m2m_ctx_init()
890 m2m_ctx->m2m_dev = m2m_dev; in v4l2_m2m_ctx_init()
891 init_waitqueue_head(&m2m_ctx->finished); in v4l2_m2m_ctx_init()
893 out_q_ctx = &m2m_ctx->out_q_ctx; in v4l2_m2m_ctx_init()
894 cap_q_ctx = &m2m_ctx->cap_q_ctx; in v4l2_m2m_ctx_init()
901 INIT_LIST_HEAD(&m2m_ctx->queue); in v4l2_m2m_ctx_init()
913 m2m_ctx->q_lock = out_q_ctx->q.lock; in v4l2_m2m_ctx_init()
915 return m2m_ctx; in v4l2_m2m_ctx_init()
917 kfree(m2m_ctx); in v4l2_m2m_ctx_init()
922 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_ctx_release() argument
925 v4l2_m2m_cancel_job(m2m_ctx); in v4l2_m2m_ctx_release()
927 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); in v4l2_m2m_ctx_release()
928 vb2_queue_release(&m2m_ctx->out_q_ctx.q); in v4l2_m2m_ctx_release()
930 kfree(m2m_ctx); in v4l2_m2m_ctx_release()
934 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_buf_queue() argument
942 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); in v4l2_m2m_buf_queue()
960 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); in v4l2_m2m_ioctl_reqbufs()
969 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); in v4l2_m2m_ioctl_create_bufs()
978 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_querybuf()
987 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_qbuf()
996 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_dqbuf()
1005 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_prepare_buf()
1014 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); in v4l2_m2m_ioctl_expbuf()
1023 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamon()
1032 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamoff()
1045 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); in v4l2_m2m_fop_mmap()
1052 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; in v4l2_m2m_fop_poll() local
1055 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
1056 mutex_lock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()
1058 ret = v4l2_m2m_poll(file, m2m_ctx, wait); in v4l2_m2m_fop_poll()
1060 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
1061 mutex_unlock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()