1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Memory-to-memory device framework for Video for Linux 2.
4  *
5  * Helper functions for devices that use memory buffers for both source
6  * and destination.
7  *
8  * Copyright (c) 2009 Samsung Electronics Co., Ltd.
9  * Pawel Osciak, <pawel@osciak.com>
10  * Marek Szyprowski, <m.szyprowski@samsung.com>
11  */
12 
13 #ifndef _MEDIA_V4L2_MEM2MEM_H
14 #define _MEDIA_V4L2_MEM2MEM_H
15 
16 #include <media/videobuf2-v4l2.h>
17 
18 /**
19  * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
20  * @device_run:	required. Begin the actual job (transaction) inside this
21  *		callback.
22  *		The job does NOT have to end before this callback returns
23  *		(and it will be the usual case). When the job finishes,
24  *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
25  *		has to be called.
26  * @job_ready:	optional. Should return 0 if the driver does not have a job
27  *		fully prepared to run yet (i.e. it will not be able to finish a
28  *		transaction without sleeping). If not provided, it will be
29  *		assumed that one source and one destination buffer are all
30  *		that is required for the driver to perform one full transaction.
31  *		This method may not sleep.
32  * @job_abort:	optional. Informs the driver that it has to abort the currently
33  *		running transaction as soon as possible (i.e. as soon as it can
34  *		stop the device safely; e.g. in the next interrupt handler),
35  *		even if the transaction would not have been finished by then.
36  *		After the driver performs the necessary steps, it has to call
37  *		v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
38  *		if the transaction ended normally.
39  *		This function does not have to (and will usually not) wait
40  *		until the device enters a state when it can be stopped.
41  */
42 struct v4l2_m2m_ops {
43 	void (*device_run)(void *priv);
44 	int (*job_ready)(void *priv);
45 	void (*job_abort)(void *priv);
46 };
47 
48 struct video_device;
49 struct v4l2_m2m_dev;
50 
51 /**
52  * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
53  *	processed
54  *
55  * @q:		pointer to struct &vb2_queue
56  * @rdy_queue:	List of V4L2 mem-to-mem queues
57  * @rdy_spinlock: spin lock to protect the struct usage
58  * @num_rdy:	number of buffers ready to be processed
59  * @buffered:	is the queue buffered?
60  *
61  * Queue for buffers ready to be processed as soon as this
62  * instance receives access to the device.
63  */
64 
65 struct v4l2_m2m_queue_ctx {
66 	struct vb2_queue	q;
67 
68 	struct list_head	rdy_queue;
69 	spinlock_t		rdy_spinlock;
70 	u8			num_rdy;
71 	bool			buffered;
72 };
73 
74 /**
75  * struct v4l2_m2m_ctx - Memory to memory context structure
76  *
77  * @q_lock: struct &mutex lock
78  * @new_frame: valid in the device_run callback: if true, then this
79  *		starts a new frame; if false, then this is a new slice
80  *		for an existing frame. This is always true unless
81  *		V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
82  *		indicates slicing support.
83  * @is_draining: indicates device is in draining phase
84  * @last_src_buf: indicate the last source buffer for draining
85  * @next_buf_last: next capture queud buffer will be tagged as last
86  * @has_stopped: indicate the device has been stopped
87  * @m2m_dev: opaque pointer to the internal data to handle M2M context
88  * @cap_q_ctx: Capture (output to memory) queue context
89  * @out_q_ctx: Output (input from memory) queue context
90  * @queue: List of memory to memory contexts
91  * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
92  *		%TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
93  * @finished: Wait queue used to signalize when a job queue finished.
94  * @priv: Instance private data
95  *
96  * The memory to memory context is specific to a file handle, NOT to e.g.
97  * a device.
98  */
99 struct v4l2_m2m_ctx {
100 	/* optional cap/out vb2 queues lock */
101 	struct mutex			*q_lock;
102 
103 	bool				new_frame;
104 
105 	bool				is_draining;
106 	struct vb2_v4l2_buffer		*last_src_buf;
107 	bool				next_buf_last;
108 	bool				has_stopped;
109 
110 	/* internal use only */
111 	struct v4l2_m2m_dev		*m2m_dev;
112 
113 	struct v4l2_m2m_queue_ctx	cap_q_ctx;
114 
115 	struct v4l2_m2m_queue_ctx	out_q_ctx;
116 
117 	/* For device job queue */
118 	struct list_head		queue;
119 	unsigned long			job_flags;
120 	wait_queue_head_t		finished;
121 
122 	void				*priv;
123 };
124 
125 /**
126  * struct v4l2_m2m_buffer - Memory to memory buffer
127  *
128  * @vb: pointer to struct &vb2_v4l2_buffer
129  * @list: list of m2m buffers
130  */
131 struct v4l2_m2m_buffer {
132 	struct vb2_v4l2_buffer	vb;
133 	struct list_head	list;
134 };
135 
136 /**
137  * v4l2_m2m_get_curr_priv() - return driver private data for the currently
138  * running instance or NULL if no instance is running
139  *
140  * @m2m_dev: opaque pointer to the internal data to handle M2M context
141  */
142 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
143 
144 /**
145  * v4l2_m2m_get_vq() - return vb2_queue for the given type
146  *
147  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
148  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
149  */
150 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
151 				       enum v4l2_buf_type type);
152 
153 /**
154  * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
155  * the pending job queue and add it if so.
156  *
157  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
158  *
159  * There are three basic requirements an instance has to meet to be able to run:
160  * 1) at least one source buffer has to be queued,
161  * 2) at least one destination buffer has to be queued,
162  * 3) streaming has to be on.
163  *
164  * If a queue is buffered (for example a decoder hardware ringbuffer that has
165  * to be drained before doing streamoff), allow scheduling without v4l2 buffers
166  * on that queue.
167  *
168  * There may also be additional, custom requirements. In such case the driver
169  * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
170  * return 1 if the instance is ready.
171  * An example of the above could be an instance that requires more than one
172  * src/dst buffer per transaction.
173  */
174 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
175 
176 /**
177  * v4l2_m2m_job_finish() - inform the framework that a job has been finished
178  * and have it clean up
179  *
180  * @m2m_dev: opaque pointer to the internal data to handle M2M context
181  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
182  *
183  * Called by a driver to yield back the device after it has finished with it.
184  * Should be called as soon as possible after reaching a state which allows
185  * other instances to take control of the device.
186  *
187  * This function has to be called only after &v4l2_m2m_ops->device_run
188  * callback has been called on the driver. To prevent recursion, it should
189  * not be called directly from the &v4l2_m2m_ops->device_run callback though.
190  */
191 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
192 			 struct v4l2_m2m_ctx *m2m_ctx);
193 
194 /**
195  * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
196  * state and inform the framework that a job has been finished and have it
197  * clean up
198  *
199  * @m2m_dev: opaque pointer to the internal data to handle M2M context
200  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
201  * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
202  *
203  * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
204  * function instead of job_finish() to take held buffers into account. It is
205  * optional for other drivers.
206  *
207  * This function removes the source buffer from the ready list and returns
208  * it with the given state. The same is done for the destination buffer, unless
209  * it is marked 'held'. In that case the buffer is kept on the ready list.
210  *
211  * After that the job is finished (see job_finish()).
212  *
213  * This allows for multiple output buffers to be used to fill in a single
214  * capture buffer. This is typically used by stateless decoders where
215  * multiple e.g. H.264 slices contribute to a single decoded frame.
216  */
217 void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
218 				      struct v4l2_m2m_ctx *m2m_ctx,
219 				      enum vb2_buffer_state state);
220 
221 static inline void
v4l2_m2m_buf_done(struct vb2_v4l2_buffer * buf,enum vb2_buffer_state state)222 v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
223 {
224 	vb2_buffer_done(&buf->vb2_buf, state);
225 }
226 
227 /**
228  * v4l2_m2m_clear_state() - clear encoding/decoding state
229  *
230  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
231  */
232 static inline void
v4l2_m2m_clear_state(struct v4l2_m2m_ctx * m2m_ctx)233 v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
234 {
235 	m2m_ctx->next_buf_last = false;
236 	m2m_ctx->is_draining = false;
237 	m2m_ctx->has_stopped = false;
238 }
239 
240 /**
241  * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
242  *
243  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
244  */
245 static inline void
v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx * m2m_ctx)246 v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
247 {
248 	m2m_ctx->next_buf_last = false;
249 	m2m_ctx->is_draining = false;
250 	m2m_ctx->has_stopped = true;
251 }
252 
253 /**
254  * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
255  * draining management state of next queued capture buffer
256  *
257  * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
258  * the end of the capture session.
259  *
260  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
261  */
262 static inline bool
v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx * m2m_ctx)263 v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
264 {
265 	return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
266 }
267 
268 /**
269  * v4l2_m2m_has_stopped() - return the current encoding/decoding session
270  * stopped state
271  *
272  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
273  */
274 static inline bool
v4l2_m2m_has_stopped(struct v4l2_m2m_ctx * m2m_ctx)275 v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
276 {
277 	return m2m_ctx->has_stopped;
278 }
279 
280 /**
281  * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
282  * state in the current encoding/decoding session
283  *
284  * This will identify the last output buffer queued before a session stop
285  * was required, leading to an actual encoding/decoding session stop state
286  * in the encoding/decoding process after being processed.
287  *
288  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
289  * @vbuf: pointer to struct &v4l2_buffer
290  */
291 static inline bool
v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx * m2m_ctx,struct vb2_v4l2_buffer * vbuf)292 v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
293 				  struct vb2_v4l2_buffer *vbuf)
294 {
295 	return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
296 }
297 
298 /**
299  * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
300  *
301  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
302  * @vbuf: pointer to struct &v4l2_buffer
303  */
304 void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
305 			       struct vb2_v4l2_buffer *vbuf);
306 
307 /**
308  * v4l2_m2m_suspend() - stop new jobs from being run and wait for current job
309  * to finish
310  *
311  * @m2m_dev: opaque pointer to the internal data to handle M2M context
312  *
313  * Called by a driver in the suspend hook. Stop new jobs from being run, and
314  * wait for current running job to finish.
315  */
316 void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev);
317 
318 /**
319  * v4l2_m2m_resume() - resume job running and try to run a queued job
320  *
321  * @m2m_dev: opaque pointer to the internal data to handle M2M context
322  *
323  * Called by a driver in the resume hook. This reverts the operation of
324  * v4l2_m2m_suspend() and allows job to be run. Also try to run a queued job if
325  * there is any.
326  */
327 void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev);
328 
329 /**
330  * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
331  *
332  * @file: pointer to struct &file
333  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
334  * @reqbufs: pointer to struct &v4l2_requestbuffers
335  */
336 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
337 		     struct v4l2_requestbuffers *reqbufs);
338 
339 /**
340  * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
341  *
342  * @file: pointer to struct &file
343  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
344  * @buf: pointer to struct &v4l2_buffer
345  *
346  * See v4l2_m2m_mmap() documentation for details.
347  */
348 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
349 		      struct v4l2_buffer *buf);
350 
351 /**
352  * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
353  * the type
354  *
355  * @file: pointer to struct &file
356  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
357  * @buf: pointer to struct &v4l2_buffer
358  */
359 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
360 		  struct v4l2_buffer *buf);
361 
362 /**
363  * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
364  * the type
365  *
366  * @file: pointer to struct &file
367  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
368  * @buf: pointer to struct &v4l2_buffer
369  */
370 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
371 		   struct v4l2_buffer *buf);
372 
373 /**
374  * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
375  * the type
376  *
377  * @file: pointer to struct &file
378  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
379  * @buf: pointer to struct &v4l2_buffer
380  */
381 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
382 			 struct v4l2_buffer *buf);
383 
384 /**
385  * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
386  * on the type
387  *
388  * @file: pointer to struct &file
389  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
390  * @create: pointer to struct &v4l2_create_buffers
391  */
392 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
393 			 struct v4l2_create_buffers *create);
394 
395 /**
396  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
397  * the type
398  *
399  * @file: pointer to struct &file
400  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
401  * @eb: pointer to struct &v4l2_exportbuffer
402  */
403 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
404 		   struct v4l2_exportbuffer *eb);
405 
406 /**
407  * v4l2_m2m_streamon() - turn on streaming for a video queue
408  *
409  * @file: pointer to struct &file
410  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
411  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
412  */
413 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
414 		      enum v4l2_buf_type type);
415 
416 /**
417  * v4l2_m2m_streamoff() - turn off streaming for a video queue
418  *
419  * @file: pointer to struct &file
420  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
421  * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
422  */
423 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
424 		       enum v4l2_buf_type type);
425 
426 /**
427  * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
428  * session state when a start of streaming of a video queue is requested
429  *
430  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
431  * @q: queue
432  */
433 void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
434 					   struct vb2_queue *q);
435 
436 /**
437  * v4l2_m2m_update_stop_streaming_state() -  update the encoding/decoding
438  * session state when a stop of streaming of a video queue is requested
439  *
440  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
441  * @q: queue
442  */
443 void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
444 					  struct vb2_queue *q);
445 
446 /**
447  * v4l2_m2m_encoder_cmd() - execute an encoder command
448  *
449  * @file: pointer to struct &file
450  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
451  * @ec: pointer to the encoder command
452  */
453 int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
454 			 struct v4l2_encoder_cmd *ec);
455 
456 /**
457  * v4l2_m2m_decoder_cmd() - execute a decoder command
458  *
459  * @file: pointer to struct &file
460  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
461  * @dc: pointer to the decoder command
462  */
463 int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
464 			 struct v4l2_decoder_cmd *dc);
465 
466 /**
467  * v4l2_m2m_poll() - poll replacement, for destination buffers only
468  *
469  * @file: pointer to struct &file
470  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
471  * @wait: pointer to struct &poll_table_struct
472  *
473  * Call from the driver's poll() function. Will poll both queues. If a buffer
474  * is available to dequeue (with dqbuf) from the source queue, this will
475  * indicate that a non-blocking write can be performed, while read will be
476  * returned in case of the destination queue.
477  */
478 __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
479 			   struct poll_table_struct *wait);
480 
481 /**
482  * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
483  *
484  * @file: pointer to struct &file
485  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
486  * @vma: pointer to struct &vm_area_struct
487  *
488  * Call from driver's mmap() function. Will handle mmap() for both queues
489  * seamlessly for the video buffer, which will receive normal per-queue offsets
490  * and proper vb2 queue pointers. The differentiation is made outside
491  * vb2 by adding a predefined offset to buffers from one of the queues
492  * and subtracting it before passing it back to vb2. Only drivers (and
493  * thus applications) receive modified offsets.
494  */
495 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
496 		  struct vm_area_struct *vma);
497 
498 #ifndef CONFIG_MMU
499 unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr,
500 					 unsigned long len, unsigned long pgoff,
501 					 unsigned long flags);
502 #endif
503 /**
504  * v4l2_m2m_init() - initialize per-driver m2m data
505  *
506  * @m2m_ops: pointer to struct v4l2_m2m_ops
507  *
508  * Usually called from driver's ``probe()`` function.
509  *
510  * Return: returns an opaque pointer to the internal data to handle M2M context
511  */
512 struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
513 
514 #if defined(CONFIG_MEDIA_CONTROLLER)
515 void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
516 int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
517 			struct video_device *vdev, int function);
518 #else
519 static inline void
v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev * m2m_dev)520 v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
521 {
522 }
523 
524 static inline int
v4l2_m2m_register_media_controller(struct v4l2_m2m_dev * m2m_dev,struct video_device * vdev,int function)525 v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
526 		struct video_device *vdev, int function)
527 {
528 	return 0;
529 }
530 #endif
531 
532 /**
533  * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
534  *
535  * @m2m_dev: opaque pointer to the internal data to handle M2M context
536  *
537  * Usually called from driver's ``remove()`` function.
538  */
539 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
540 
541 /**
542  * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
543  *
544  * @m2m_dev: opaque pointer to the internal data to handle M2M context
545  * @drv_priv: driver's instance private data
546  * @queue_init: a callback for queue type-specific initialization function
547  *	to be used for initializing vb2_queues
548  *
549  * Usually called from driver's ``open()`` function.
550  */
551 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
552 		void *drv_priv,
553 		int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
554 
v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx * m2m_ctx,bool buffered)555 static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
556 					     bool buffered)
557 {
558 	m2m_ctx->out_q_ctx.buffered = buffered;
559 }
560 
v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx * m2m_ctx,bool buffered)561 static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
562 					     bool buffered)
563 {
564 	m2m_ctx->cap_q_ctx.buffered = buffered;
565 }
566 
567 /**
568  * v4l2_m2m_ctx_release() - release m2m context
569  *
570  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
571  *
572  * Usually called from driver's release() function.
573  */
574 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
575 
576 /**
577  * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
578  *
579  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
580  * @vbuf: pointer to struct &vb2_v4l2_buffer
581  *
582  * Call from vb2_queue_ops->ops->buf_queue, vb2_queue_ops callback.
583  */
584 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
585 			struct vb2_v4l2_buffer *vbuf);
586 
587 /**
588  * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
589  * use
590  *
591  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
592  */
593 static inline
v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx * m2m_ctx)594 unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
595 {
596 	unsigned int num_buf_rdy;
597 	unsigned long flags;
598 
599 	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
600 	num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
601 	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
602 
603 	return num_buf_rdy;
604 }
605 
606 /**
607  * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
608  * ready for use
609  *
610  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
611  */
612 static inline
v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx * m2m_ctx)613 unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
614 {
615 	unsigned int num_buf_rdy;
616 	unsigned long flags;
617 
618 	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
619 	num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
620 	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
621 
622 	return num_buf_rdy;
623 }
624 
625 /**
626  * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
627  *
628  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
629  */
630 struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
631 
632 /**
633  * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
634  * buffers
635  *
636  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
637  */
638 static inline struct vb2_v4l2_buffer *
v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx * m2m_ctx)639 v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
640 {
641 	return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
642 }
643 
644 /**
645  * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
646  * ready buffers
647  *
648  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
649  */
650 static inline struct vb2_v4l2_buffer *
v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx * m2m_ctx)651 v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
652 {
653 	return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
654 }
655 
656 /**
657  * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
658  *
659  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
660  */
661 struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
662 
663 /**
664  * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
665  * ready buffers
666  *
667  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
668  */
669 static inline struct vb2_v4l2_buffer *
v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx * m2m_ctx)670 v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
671 {
672 	return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
673 }
674 
675 /**
676  * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
677  * ready buffers
678  *
679  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
680  */
681 static inline struct vb2_v4l2_buffer *
v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx * m2m_ctx)682 v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
683 {
684 	return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
685 }
686 
687 /**
688  * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
689  * buffers
690  *
691  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
692  * @b: current buffer of type struct v4l2_m2m_buffer
693  */
694 #define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)	\
695 	list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
696 
697 /**
698  * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
699  *
700  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
701  * @b: current buffer of type struct v4l2_m2m_buffer
702  */
703 #define v4l2_m2m_for_each_src_buf(m2m_ctx, b)	\
704 	list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
705 
706 /**
707  * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
708  * buffers safely
709  *
710  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
711  * @b: current buffer of type struct v4l2_m2m_buffer
712  * @n: used as temporary storage
713  */
714 #define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)	\
715 	list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
716 
717 /**
718  * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
719  * buffers safely
720  *
721  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
722  * @b: current buffer of type struct v4l2_m2m_buffer
723  * @n: used as temporary storage
724  */
725 #define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)	\
726 	list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
727 
728 /**
729  * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
730  *
731  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
732  */
733 static inline
v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx * m2m_ctx)734 struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
735 {
736 	return &m2m_ctx->out_q_ctx.q;
737 }
738 
739 /**
740  * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
741  *
742  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
743  */
744 static inline
v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx * m2m_ctx)745 struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
746 {
747 	return &m2m_ctx->cap_q_ctx.q;
748 }
749 
750 /**
751  * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
752  * return it
753  *
754  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
755  */
756 struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
757 
758 /**
759  * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
760  * buffers and return it
761  *
762  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
763  */
764 static inline struct vb2_v4l2_buffer *
v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx * m2m_ctx)765 v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
766 {
767 	return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
768 }
769 
770 /**
771  * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
772  * ready buffers and return it
773  *
774  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
775  */
776 static inline struct vb2_v4l2_buffer *
v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx * m2m_ctx)777 v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
778 {
779 	return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
780 }
781 
782 /**
783  * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
784  * buffers
785  *
786  * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
787  * @vbuf: the buffer to be removed
788  */
789 void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
790 				struct vb2_v4l2_buffer *vbuf);
791 
792 /**
793  * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
794  * of ready buffers
795  *
796  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
797  * @vbuf: the buffer to be removed
798  */
v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx * m2m_ctx,struct vb2_v4l2_buffer * vbuf)799 static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
800 						  struct vb2_v4l2_buffer *vbuf)
801 {
802 	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
803 }
804 
805 /**
806  * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
807  * list of ready buffers
808  *
809  * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
810  * @vbuf: the buffer to be removed
811  */
v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx * m2m_ctx,struct vb2_v4l2_buffer * vbuf)812 static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
813 						  struct vb2_v4l2_buffer *vbuf)
814 {
815 	v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
816 }
817 
818 struct vb2_v4l2_buffer *
819 v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
820 
821 static inline struct vb2_v4l2_buffer *
v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx * m2m_ctx,unsigned int idx)822 v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
823 {
824 	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
825 }
826 
827 static inline struct vb2_v4l2_buffer *
v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx * m2m_ctx,unsigned int idx)828 v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
829 {
830 	return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
831 }
832 
833 /**
834  * v4l2_m2m_buf_copy_metadata() - copy buffer metadata from
835  * the output buffer to the capture buffer
836  *
837  * @out_vb: the output buffer that is the source of the metadata.
838  * @cap_vb: the capture buffer that will receive the metadata.
839  * @copy_frame_flags: copy the KEY/B/PFRAME flags as well.
840  *
841  * This helper function copies the timestamp, timecode (if the TIMECODE
842  * buffer flag was set), field and the TIMECODE, KEYFRAME, BFRAME, PFRAME
843  * and TSTAMP_SRC_MASK flags from @out_vb to @cap_vb.
844  *
845  * If @copy_frame_flags is false, then the KEYFRAME, BFRAME and PFRAME
846  * flags are not copied. This is typically needed for encoders that
847  * set this bits explicitly.
848  */
849 void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb,
850 				struct vb2_v4l2_buffer *cap_vb,
851 				bool copy_frame_flags);
852 
853 /* v4l2 request helper */
854 
855 void v4l2_m2m_request_queue(struct media_request *req);
856 
857 /* v4l2 ioctl helpers */
858 
859 int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
860 				struct v4l2_requestbuffers *rb);
861 int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
862 				struct v4l2_create_buffers *create);
863 int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
864 				struct v4l2_buffer *buf);
865 int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
866 				struct v4l2_exportbuffer *eb);
867 int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
868 				struct v4l2_buffer *buf);
869 int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
870 				struct v4l2_buffer *buf);
871 int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
872 			       struct v4l2_buffer *buf);
873 int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
874 				enum v4l2_buf_type type);
875 int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
876 				enum v4l2_buf_type type);
877 int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
878 			       struct v4l2_encoder_cmd *ec);
879 int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
880 			       struct v4l2_decoder_cmd *dc);
881 int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
882 				   struct v4l2_encoder_cmd *ec);
883 int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
884 				   struct v4l2_decoder_cmd *dc);
885 int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
886 					     struct v4l2_decoder_cmd *dc);
887 int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
888 					 struct v4l2_decoder_cmd *dc);
889 int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
890 __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
891 
892 #endif /* _MEDIA_V4L2_MEM2MEM_H */
893 
894