1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * camss-vfe.c
4  *
5  * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
6  *
7  * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
8  * Copyright (C) 2015-2018 Linaro Ltd.
9  */
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/interrupt.h>
13 #include <linux/iommu.h>
14 #include <linux/mutex.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/spinlock_types.h>
19 #include <linux/spinlock.h>
20 #include <media/media-entity.h>
21 #include <media/v4l2-device.h>
22 #include <media/v4l2-subdev.h>
23 
24 #include "camss-vfe.h"
25 #include "camss.h"
26 
27 #define MSM_VFE_NAME "msm_vfe"
28 
29 #define vfe_line_array(ptr_line)	\
30 	((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
31 
32 #define to_vfe(ptr_line)	\
33 	container_of(vfe_line_array(ptr_line), struct vfe_device, line)
34 
35 /* VFE reset timeout */
36 #define VFE_RESET_TIMEOUT_MS 50
37 /* VFE halt timeout */
38 #define VFE_HALT_TIMEOUT_MS 100
39 /* Max number of frame drop updates per frame */
40 #define VFE_FRAME_DROP_UPDATES 2
41 /* Frame drop value. VAL + UPDATES - 1 should not exceed 31 */
42 #define VFE_FRAME_DROP_VAL 30
43 
44 #define VFE_NEXT_SOF_MS 500
45 
46 #define SCALER_RATIO_MAX 16
47 
48 struct vfe_format {
49 	u32 code;
50 	u8 bpp;
51 };
52 
53 static const struct vfe_format formats_rdi_8x16[] = {
54 	{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
55 	{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
56 	{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
57 	{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
58 	{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
59 	{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
60 	{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
61 	{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
62 	{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
63 	{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
64 	{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
65 	{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
66 	{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
67 	{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
68 	{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
69 	{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
70 	{ MEDIA_BUS_FMT_Y10_1X10, 10 },
71 };
72 
73 static const struct vfe_format formats_pix_8x16[] = {
74 	{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
75 	{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
76 	{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
77 	{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
78 };
79 
80 static const struct vfe_format formats_rdi_8x96[] = {
81 	{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
82 	{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
83 	{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
84 	{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
85 	{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
86 	{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
87 	{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
88 	{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
89 	{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
90 	{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
91 	{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
92 	{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
93 	{ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 },
94 	{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
95 	{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
96 	{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
97 	{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
98 	{ MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
99 	{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
100 	{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
101 	{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
102 	{ MEDIA_BUS_FMT_Y10_1X10, 10 },
103 	{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
104 };
105 
106 static const struct vfe_format formats_pix_8x96[] = {
107 	{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
108 	{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
109 	{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
110 	{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
111 };
112 
113 /*
114  * vfe_get_bpp - map media bus format to bits per pixel
115  * @formats: supported media bus formats array
116  * @nformats: size of @formats array
117  * @code: media bus format code
118  *
119  * Return number of bits per pixel
120  */
vfe_get_bpp(const struct vfe_format * formats,unsigned int nformats,u32 code)121 static u8 vfe_get_bpp(const struct vfe_format *formats,
122 		      unsigned int nformats, u32 code)
123 {
124 	unsigned int i;
125 
126 	for (i = 0; i < nformats; i++)
127 		if (code == formats[i].code)
128 			return formats[i].bpp;
129 
130 	WARN(1, "Unknown format\n");
131 
132 	return formats[0].bpp;
133 }
134 
vfe_find_code(u32 * code,unsigned int n_code,unsigned int index,u32 req_code)135 static u32 vfe_find_code(u32 *code, unsigned int n_code,
136 			 unsigned int index, u32 req_code)
137 {
138 	int i;
139 
140 	if (!req_code && (index >= n_code))
141 		return 0;
142 
143 	for (i = 0; i < n_code; i++)
144 		if (req_code) {
145 			if (req_code == code[i])
146 				return req_code;
147 		} else {
148 			if (i == index)
149 				return code[i];
150 		}
151 
152 	return code[0];
153 }
154 
vfe_src_pad_code(struct vfe_line * line,u32 sink_code,unsigned int index,u32 src_req_code)155 static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
156 			    unsigned int index, u32 src_req_code)
157 {
158 	struct vfe_device *vfe = to_vfe(line);
159 
160 	if (vfe->camss->version == CAMSS_8x16)
161 		switch (sink_code) {
162 		case MEDIA_BUS_FMT_YUYV8_2X8:
163 		{
164 			u32 src_code[] = {
165 				MEDIA_BUS_FMT_YUYV8_2X8,
166 				MEDIA_BUS_FMT_YUYV8_1_5X8,
167 			};
168 
169 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
170 					     index, src_req_code);
171 		}
172 		case MEDIA_BUS_FMT_YVYU8_2X8:
173 		{
174 			u32 src_code[] = {
175 				MEDIA_BUS_FMT_YVYU8_2X8,
176 				MEDIA_BUS_FMT_YVYU8_1_5X8,
177 			};
178 
179 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
180 					     index, src_req_code);
181 		}
182 		case MEDIA_BUS_FMT_UYVY8_2X8:
183 		{
184 			u32 src_code[] = {
185 				MEDIA_BUS_FMT_UYVY8_2X8,
186 				MEDIA_BUS_FMT_UYVY8_1_5X8,
187 			};
188 
189 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
190 					     index, src_req_code);
191 		}
192 		case MEDIA_BUS_FMT_VYUY8_2X8:
193 		{
194 			u32 src_code[] = {
195 				MEDIA_BUS_FMT_VYUY8_2X8,
196 				MEDIA_BUS_FMT_VYUY8_1_5X8,
197 			};
198 
199 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
200 					     index, src_req_code);
201 		}
202 		default:
203 			if (index > 0)
204 				return 0;
205 
206 			return sink_code;
207 		}
208 	else if (vfe->camss->version == CAMSS_8x96)
209 		switch (sink_code) {
210 		case MEDIA_BUS_FMT_YUYV8_2X8:
211 		{
212 			u32 src_code[] = {
213 				MEDIA_BUS_FMT_YUYV8_2X8,
214 				MEDIA_BUS_FMT_YVYU8_2X8,
215 				MEDIA_BUS_FMT_UYVY8_2X8,
216 				MEDIA_BUS_FMT_VYUY8_2X8,
217 				MEDIA_BUS_FMT_YUYV8_1_5X8,
218 			};
219 
220 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
221 					     index, src_req_code);
222 		}
223 		case MEDIA_BUS_FMT_YVYU8_2X8:
224 		{
225 			u32 src_code[] = {
226 				MEDIA_BUS_FMT_YVYU8_2X8,
227 				MEDIA_BUS_FMT_YUYV8_2X8,
228 				MEDIA_BUS_FMT_UYVY8_2X8,
229 				MEDIA_BUS_FMT_VYUY8_2X8,
230 				MEDIA_BUS_FMT_YVYU8_1_5X8,
231 			};
232 
233 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
234 					     index, src_req_code);
235 		}
236 		case MEDIA_BUS_FMT_UYVY8_2X8:
237 		{
238 			u32 src_code[] = {
239 				MEDIA_BUS_FMT_UYVY8_2X8,
240 				MEDIA_BUS_FMT_YUYV8_2X8,
241 				MEDIA_BUS_FMT_YVYU8_2X8,
242 				MEDIA_BUS_FMT_VYUY8_2X8,
243 				MEDIA_BUS_FMT_UYVY8_1_5X8,
244 			};
245 
246 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
247 					     index, src_req_code);
248 		}
249 		case MEDIA_BUS_FMT_VYUY8_2X8:
250 		{
251 			u32 src_code[] = {
252 				MEDIA_BUS_FMT_VYUY8_2X8,
253 				MEDIA_BUS_FMT_YUYV8_2X8,
254 				MEDIA_BUS_FMT_YVYU8_2X8,
255 				MEDIA_BUS_FMT_UYVY8_2X8,
256 				MEDIA_BUS_FMT_VYUY8_1_5X8,
257 			};
258 
259 			return vfe_find_code(src_code, ARRAY_SIZE(src_code),
260 					     index, src_req_code);
261 		}
262 		default:
263 			if (index > 0)
264 				return 0;
265 
266 			return sink_code;
267 		}
268 	else
269 		return 0;
270 }
271 
272 /*
273  * vfe_reset - Trigger reset on VFE module and wait to complete
274  * @vfe: VFE device
275  *
276  * Return 0 on success or a negative error code otherwise
277  */
vfe_reset(struct vfe_device * vfe)278 static int vfe_reset(struct vfe_device *vfe)
279 {
280 	unsigned long time;
281 
282 	reinit_completion(&vfe->reset_complete);
283 
284 	vfe->ops->global_reset(vfe);
285 
286 	time = wait_for_completion_timeout(&vfe->reset_complete,
287 		msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
288 	if (!time) {
289 		dev_err(vfe->camss->dev, "VFE reset timeout\n");
290 		return -EIO;
291 	}
292 
293 	return 0;
294 }
295 
296 /*
297  * vfe_halt - Trigger halt on VFE module and wait to complete
298  * @vfe: VFE device
299  *
300  * Return 0 on success or a negative error code otherwise
301  */
vfe_halt(struct vfe_device * vfe)302 static int vfe_halt(struct vfe_device *vfe)
303 {
304 	unsigned long time;
305 
306 	reinit_completion(&vfe->halt_complete);
307 
308 	vfe->ops->halt_request(vfe);
309 
310 	time = wait_for_completion_timeout(&vfe->halt_complete,
311 		msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
312 	if (!time) {
313 		dev_err(vfe->camss->dev, "VFE halt timeout\n");
314 		return -EIO;
315 	}
316 
317 	return 0;
318 }
319 
vfe_init_outputs(struct vfe_device * vfe)320 static void vfe_init_outputs(struct vfe_device *vfe)
321 {
322 	int i;
323 
324 	for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
325 		struct vfe_output *output = &vfe->line[i].output;
326 
327 		output->state = VFE_OUTPUT_OFF;
328 		output->buf[0] = NULL;
329 		output->buf[1] = NULL;
330 		INIT_LIST_HEAD(&output->pending_bufs);
331 	}
332 }
333 
vfe_reset_output_maps(struct vfe_device * vfe)334 static void vfe_reset_output_maps(struct vfe_device *vfe)
335 {
336 	int i;
337 
338 	for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
339 		vfe->wm_output_map[i] = VFE_LINE_NONE;
340 }
341 
vfe_output_init_addrs(struct vfe_device * vfe,struct vfe_output * output,u8 sync)342 static void vfe_output_init_addrs(struct vfe_device *vfe,
343 				  struct vfe_output *output, u8 sync)
344 {
345 	u32 ping_addr;
346 	u32 pong_addr;
347 	unsigned int i;
348 
349 	output->active_buf = 0;
350 
351 	for (i = 0; i < output->wm_num; i++) {
352 		if (output->buf[0])
353 			ping_addr = output->buf[0]->addr[i];
354 		else
355 			ping_addr = 0;
356 
357 		if (output->buf[1])
358 			pong_addr = output->buf[1]->addr[i];
359 		else
360 			pong_addr = ping_addr;
361 
362 		vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
363 		vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
364 		if (sync)
365 			vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
366 	}
367 }
368 
vfe_output_update_ping_addr(struct vfe_device * vfe,struct vfe_output * output,u8 sync)369 static void vfe_output_update_ping_addr(struct vfe_device *vfe,
370 					struct vfe_output *output, u8 sync)
371 {
372 	u32 addr;
373 	unsigned int i;
374 
375 	for (i = 0; i < output->wm_num; i++) {
376 		if (output->buf[0])
377 			addr = output->buf[0]->addr[i];
378 		else
379 			addr = 0;
380 
381 		vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
382 		if (sync)
383 			vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
384 	}
385 }
386 
vfe_output_update_pong_addr(struct vfe_device * vfe,struct vfe_output * output,u8 sync)387 static void vfe_output_update_pong_addr(struct vfe_device *vfe,
388 					struct vfe_output *output, u8 sync)
389 {
390 	u32 addr;
391 	unsigned int i;
392 
393 	for (i = 0; i < output->wm_num; i++) {
394 		if (output->buf[1])
395 			addr = output->buf[1]->addr[i];
396 		else
397 			addr = 0;
398 
399 		vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
400 		if (sync)
401 			vfe->ops->bus_reload_wm(vfe, output->wm_idx[i]);
402 	}
403 
404 }
405 
vfe_reserve_wm(struct vfe_device * vfe,enum vfe_line_id line_id)406 static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id)
407 {
408 	int ret = -EBUSY;
409 	int i;
410 
411 	for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
412 		if (vfe->wm_output_map[i] == VFE_LINE_NONE) {
413 			vfe->wm_output_map[i] = line_id;
414 			ret = i;
415 			break;
416 		}
417 	}
418 
419 	return ret;
420 }
421 
vfe_release_wm(struct vfe_device * vfe,u8 wm)422 static int vfe_release_wm(struct vfe_device *vfe, u8 wm)
423 {
424 	if (wm >= ARRAY_SIZE(vfe->wm_output_map))
425 		return -EINVAL;
426 
427 	vfe->wm_output_map[wm] = VFE_LINE_NONE;
428 
429 	return 0;
430 }
431 
vfe_output_frame_drop(struct vfe_device * vfe,struct vfe_output * output,u32 drop_pattern)432 static void vfe_output_frame_drop(struct vfe_device *vfe,
433 				  struct vfe_output *output,
434 				  u32 drop_pattern)
435 {
436 	u8 drop_period;
437 	unsigned int i;
438 
439 	/* We need to toggle update period to be valid on next frame */
440 	output->drop_update_idx++;
441 	output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
442 	drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
443 
444 	for (i = 0; i < output->wm_num; i++) {
445 		vfe->ops->wm_set_framedrop_period(vfe, output->wm_idx[i],
446 						  drop_period);
447 		vfe->ops->wm_set_framedrop_pattern(vfe, output->wm_idx[i],
448 						   drop_pattern);
449 	}
450 	vfe->ops->reg_update(vfe,
451 			     container_of(output, struct vfe_line, output)->id);
452 }
453 
vfe_buf_get_pending(struct vfe_output * output)454 static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
455 {
456 	struct camss_buffer *buffer = NULL;
457 
458 	if (!list_empty(&output->pending_bufs)) {
459 		buffer = list_first_entry(&output->pending_bufs,
460 					  struct camss_buffer,
461 					  queue);
462 		list_del(&buffer->queue);
463 	}
464 
465 	return buffer;
466 }
467 
468 /*
469  * vfe_buf_add_pending - Add output buffer to list of pending
470  * @output: VFE output
471  * @buffer: Video buffer
472  */
vfe_buf_add_pending(struct vfe_output * output,struct camss_buffer * buffer)473 static void vfe_buf_add_pending(struct vfe_output *output,
474 				struct camss_buffer *buffer)
475 {
476 	INIT_LIST_HEAD(&buffer->queue);
477 	list_add_tail(&buffer->queue, &output->pending_bufs);
478 }
479 
480 /*
481  * vfe_buf_flush_pending - Flush all pending buffers.
482  * @output: VFE output
483  * @state: vb2 buffer state
484  */
vfe_buf_flush_pending(struct vfe_output * output,enum vb2_buffer_state state)485 static void vfe_buf_flush_pending(struct vfe_output *output,
486 				  enum vb2_buffer_state state)
487 {
488 	struct camss_buffer *buf;
489 	struct camss_buffer *t;
490 
491 	list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
492 		vb2_buffer_done(&buf->vb.vb2_buf, state);
493 		list_del(&buf->queue);
494 	}
495 }
496 
vfe_buf_update_wm_on_next(struct vfe_device * vfe,struct vfe_output * output)497 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
498 				      struct vfe_output *output)
499 {
500 	switch (output->state) {
501 	case VFE_OUTPUT_CONTINUOUS:
502 		vfe_output_frame_drop(vfe, output, 3);
503 		break;
504 	case VFE_OUTPUT_SINGLE:
505 	default:
506 		dev_err_ratelimited(vfe->camss->dev,
507 				    "Next buf in wrong state! %d\n",
508 				    output->state);
509 		break;
510 	}
511 }
512 
vfe_buf_update_wm_on_last(struct vfe_device * vfe,struct vfe_output * output)513 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
514 				      struct vfe_output *output)
515 {
516 	switch (output->state) {
517 	case VFE_OUTPUT_CONTINUOUS:
518 		output->state = VFE_OUTPUT_SINGLE;
519 		vfe_output_frame_drop(vfe, output, 1);
520 		break;
521 	case VFE_OUTPUT_SINGLE:
522 		output->state = VFE_OUTPUT_STOPPING;
523 		vfe_output_frame_drop(vfe, output, 0);
524 		break;
525 	default:
526 		dev_err_ratelimited(vfe->camss->dev,
527 				    "Last buff in wrong state! %d\n",
528 				    output->state);
529 		break;
530 	}
531 }
532 
vfe_buf_update_wm_on_new(struct vfe_device * vfe,struct vfe_output * output,struct camss_buffer * new_buf)533 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
534 				     struct vfe_output *output,
535 				     struct camss_buffer *new_buf)
536 {
537 	int inactive_idx;
538 
539 	switch (output->state) {
540 	case VFE_OUTPUT_SINGLE:
541 		inactive_idx = !output->active_buf;
542 
543 		if (!output->buf[inactive_idx]) {
544 			output->buf[inactive_idx] = new_buf;
545 
546 			if (inactive_idx)
547 				vfe_output_update_pong_addr(vfe, output, 0);
548 			else
549 				vfe_output_update_ping_addr(vfe, output, 0);
550 
551 			vfe_output_frame_drop(vfe, output, 3);
552 			output->state = VFE_OUTPUT_CONTINUOUS;
553 		} else {
554 			vfe_buf_add_pending(output, new_buf);
555 			dev_err_ratelimited(vfe->camss->dev,
556 					    "Inactive buffer is busy\n");
557 		}
558 		break;
559 
560 	case VFE_OUTPUT_IDLE:
561 		if (!output->buf[0]) {
562 			output->buf[0] = new_buf;
563 
564 			vfe_output_init_addrs(vfe, output, 1);
565 
566 			vfe_output_frame_drop(vfe, output, 1);
567 			output->state = VFE_OUTPUT_SINGLE;
568 		} else {
569 			vfe_buf_add_pending(output, new_buf);
570 			dev_err_ratelimited(vfe->camss->dev,
571 					    "Output idle with buffer set!\n");
572 		}
573 		break;
574 
575 	case VFE_OUTPUT_CONTINUOUS:
576 	default:
577 		vfe_buf_add_pending(output, new_buf);
578 		break;
579 	}
580 }
581 
vfe_get_output(struct vfe_line * line)582 static int vfe_get_output(struct vfe_line *line)
583 {
584 	struct vfe_device *vfe = to_vfe(line);
585 	struct vfe_output *output;
586 	struct v4l2_format *f = &line->video_out.active_fmt;
587 	unsigned long flags;
588 	int i;
589 	int wm_idx;
590 
591 	spin_lock_irqsave(&vfe->output_lock, flags);
592 
593 	output = &line->output;
594 	if (output->state != VFE_OUTPUT_OFF) {
595 		dev_err(vfe->camss->dev, "Output is running\n");
596 		goto error;
597 	}
598 	output->state = VFE_OUTPUT_RESERVED;
599 
600 	output->active_buf = 0;
601 
602 	switch (f->fmt.pix_mp.pixelformat) {
603 	case V4L2_PIX_FMT_NV12:
604 	case V4L2_PIX_FMT_NV21:
605 	case V4L2_PIX_FMT_NV16:
606 	case V4L2_PIX_FMT_NV61:
607 		output->wm_num = 2;
608 		break;
609 	default:
610 		output->wm_num = 1;
611 		break;
612 	}
613 
614 	for (i = 0; i < output->wm_num; i++) {
615 		wm_idx = vfe_reserve_wm(vfe, line->id);
616 		if (wm_idx < 0) {
617 			dev_err(vfe->camss->dev, "Can not reserve wm\n");
618 			goto error_get_wm;
619 		}
620 		output->wm_idx[i] = wm_idx;
621 	}
622 
623 	output->drop_update_idx = 0;
624 
625 	spin_unlock_irqrestore(&vfe->output_lock, flags);
626 
627 	return 0;
628 
629 error_get_wm:
630 	for (i--; i >= 0; i--)
631 		vfe_release_wm(vfe, output->wm_idx[i]);
632 	output->state = VFE_OUTPUT_OFF;
633 error:
634 	spin_unlock_irqrestore(&vfe->output_lock, flags);
635 
636 	return -EINVAL;
637 }
638 
vfe_put_output(struct vfe_line * line)639 static int vfe_put_output(struct vfe_line *line)
640 {
641 	struct vfe_device *vfe = to_vfe(line);
642 	struct vfe_output *output = &line->output;
643 	unsigned long flags;
644 	unsigned int i;
645 
646 	spin_lock_irqsave(&vfe->output_lock, flags);
647 
648 	for (i = 0; i < output->wm_num; i++)
649 		vfe_release_wm(vfe, output->wm_idx[i]);
650 
651 	output->state = VFE_OUTPUT_OFF;
652 
653 	spin_unlock_irqrestore(&vfe->output_lock, flags);
654 	return 0;
655 }
656 
vfe_enable_output(struct vfe_line * line)657 static int vfe_enable_output(struct vfe_line *line)
658 {
659 	struct vfe_device *vfe = to_vfe(line);
660 	struct vfe_output *output = &line->output;
661 	const struct vfe_hw_ops *ops = vfe->ops;
662 	struct media_entity *sensor;
663 	unsigned long flags;
664 	unsigned int frame_skip = 0;
665 	unsigned int i;
666 	u16 ub_size;
667 
668 	ub_size = ops->get_ub_size(vfe->id);
669 	if (!ub_size)
670 		return -EINVAL;
671 
672 	sensor = camss_find_sensor(&line->subdev.entity);
673 	if (sensor) {
674 		struct v4l2_subdev *subdev =
675 					media_entity_to_v4l2_subdev(sensor);
676 
677 		v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
678 		/* Max frame skip is 29 frames */
679 		if (frame_skip > VFE_FRAME_DROP_VAL - 1)
680 			frame_skip = VFE_FRAME_DROP_VAL - 1;
681 	}
682 
683 	spin_lock_irqsave(&vfe->output_lock, flags);
684 
685 	ops->reg_update_clear(vfe, line->id);
686 
687 	if (output->state != VFE_OUTPUT_RESERVED) {
688 		dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
689 			output->state);
690 		spin_unlock_irqrestore(&vfe->output_lock, flags);
691 		return -EINVAL;
692 	}
693 	output->state = VFE_OUTPUT_IDLE;
694 
695 	output->buf[0] = vfe_buf_get_pending(output);
696 	output->buf[1] = vfe_buf_get_pending(output);
697 
698 	if (!output->buf[0] && output->buf[1]) {
699 		output->buf[0] = output->buf[1];
700 		output->buf[1] = NULL;
701 	}
702 
703 	if (output->buf[0])
704 		output->state = VFE_OUTPUT_SINGLE;
705 
706 	if (output->buf[1])
707 		output->state = VFE_OUTPUT_CONTINUOUS;
708 
709 	switch (output->state) {
710 	case VFE_OUTPUT_SINGLE:
711 		vfe_output_frame_drop(vfe, output, 1 << frame_skip);
712 		break;
713 	case VFE_OUTPUT_CONTINUOUS:
714 		vfe_output_frame_drop(vfe, output, 3 << frame_skip);
715 		break;
716 	default:
717 		vfe_output_frame_drop(vfe, output, 0);
718 		break;
719 	}
720 
721 	output->sequence = 0;
722 	output->wait_sof = 0;
723 	output->wait_reg_update = 0;
724 	reinit_completion(&output->sof);
725 	reinit_completion(&output->reg_update);
726 
727 	vfe_output_init_addrs(vfe, output, 0);
728 
729 	if (line->id != VFE_LINE_PIX) {
730 		ops->set_cgc_override(vfe, output->wm_idx[0], 1);
731 		ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
732 		ops->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
733 		ops->wm_set_subsample(vfe, output->wm_idx[0]);
734 		ops->set_rdi_cid(vfe, line->id, 0);
735 		ops->wm_set_ub_cfg(vfe, output->wm_idx[0],
736 				   (ub_size + 1) * output->wm_idx[0], ub_size);
737 		ops->wm_frame_based(vfe, output->wm_idx[0], 1);
738 		ops->wm_enable(vfe, output->wm_idx[0], 1);
739 		ops->bus_reload_wm(vfe, output->wm_idx[0]);
740 	} else {
741 		ub_size /= output->wm_num;
742 		for (i = 0; i < output->wm_num; i++) {
743 			ops->set_cgc_override(vfe, output->wm_idx[i], 1);
744 			ops->wm_set_subsample(vfe, output->wm_idx[i]);
745 			ops->wm_set_ub_cfg(vfe, output->wm_idx[i],
746 					   (ub_size + 1) * output->wm_idx[i],
747 					   ub_size);
748 			ops->wm_line_based(vfe, output->wm_idx[i],
749 					&line->video_out.active_fmt.fmt.pix_mp,
750 					i, 1);
751 			ops->wm_enable(vfe, output->wm_idx[i], 1);
752 			ops->bus_reload_wm(vfe, output->wm_idx[i]);
753 		}
754 		ops->enable_irq_pix_line(vfe, 0, line->id, 1);
755 		ops->set_module_cfg(vfe, 1);
756 		ops->set_camif_cfg(vfe, line);
757 		ops->set_realign_cfg(vfe, line, 1);
758 		ops->set_xbar_cfg(vfe, output, 1);
759 		ops->set_demux_cfg(vfe, line);
760 		ops->set_scale_cfg(vfe, line);
761 		ops->set_crop_cfg(vfe, line);
762 		ops->set_clamp_cfg(vfe);
763 		ops->set_camif_cmd(vfe, 1);
764 	}
765 
766 	ops->reg_update(vfe, line->id);
767 
768 	spin_unlock_irqrestore(&vfe->output_lock, flags);
769 
770 	return 0;
771 }
772 
vfe_disable_output(struct vfe_line * line)773 static int vfe_disable_output(struct vfe_line *line)
774 {
775 	struct vfe_device *vfe = to_vfe(line);
776 	struct vfe_output *output = &line->output;
777 	const struct vfe_hw_ops *ops = vfe->ops;
778 	unsigned long flags;
779 	unsigned long time;
780 	unsigned int i;
781 
782 	spin_lock_irqsave(&vfe->output_lock, flags);
783 
784 	output->wait_sof = 1;
785 	spin_unlock_irqrestore(&vfe->output_lock, flags);
786 
787 	time = wait_for_completion_timeout(&output->sof,
788 					   msecs_to_jiffies(VFE_NEXT_SOF_MS));
789 	if (!time)
790 		dev_err(vfe->camss->dev, "VFE sof timeout\n");
791 
792 	spin_lock_irqsave(&vfe->output_lock, flags);
793 	for (i = 0; i < output->wm_num; i++)
794 		ops->wm_enable(vfe, output->wm_idx[i], 0);
795 
796 	ops->reg_update(vfe, line->id);
797 	output->wait_reg_update = 1;
798 	spin_unlock_irqrestore(&vfe->output_lock, flags);
799 
800 	time = wait_for_completion_timeout(&output->reg_update,
801 					   msecs_to_jiffies(VFE_NEXT_SOF_MS));
802 	if (!time)
803 		dev_err(vfe->camss->dev, "VFE reg update timeout\n");
804 
805 	spin_lock_irqsave(&vfe->output_lock, flags);
806 
807 	if (line->id != VFE_LINE_PIX) {
808 		ops->wm_frame_based(vfe, output->wm_idx[0], 0);
809 		ops->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0],
810 						line->id);
811 		ops->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
812 		ops->set_cgc_override(vfe, output->wm_idx[0], 0);
813 		spin_unlock_irqrestore(&vfe->output_lock, flags);
814 	} else {
815 		for (i = 0; i < output->wm_num; i++) {
816 			ops->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
817 			ops->set_cgc_override(vfe, output->wm_idx[i], 0);
818 		}
819 
820 		ops->enable_irq_pix_line(vfe, 0, line->id, 0);
821 		ops->set_module_cfg(vfe, 0);
822 		ops->set_realign_cfg(vfe, line, 0);
823 		ops->set_xbar_cfg(vfe, output, 0);
824 
825 		ops->set_camif_cmd(vfe, 0);
826 		spin_unlock_irqrestore(&vfe->output_lock, flags);
827 
828 		ops->camif_wait_for_stop(vfe, vfe->camss->dev);
829 	}
830 
831 	return 0;
832 }
833 
834 /*
835  * vfe_enable - Enable streaming on VFE line
836  * @line: VFE line
837  *
838  * Return 0 on success or a negative error code otherwise
839  */
vfe_enable(struct vfe_line * line)840 static int vfe_enable(struct vfe_line *line)
841 {
842 	struct vfe_device *vfe = to_vfe(line);
843 	int ret;
844 
845 	mutex_lock(&vfe->stream_lock);
846 
847 	if (!vfe->stream_count) {
848 		vfe->ops->enable_irq_common(vfe);
849 
850 		vfe->ops->bus_enable_wr_if(vfe, 1);
851 
852 		vfe->ops->set_qos(vfe);
853 
854 		vfe->ops->set_ds(vfe);
855 	}
856 
857 	vfe->stream_count++;
858 
859 	mutex_unlock(&vfe->stream_lock);
860 
861 	ret = vfe_get_output(line);
862 	if (ret < 0)
863 		goto error_get_output;
864 
865 	ret = vfe_enable_output(line);
866 	if (ret < 0)
867 		goto error_enable_output;
868 
869 	vfe->was_streaming = 1;
870 
871 	return 0;
872 
873 
874 error_enable_output:
875 	vfe_put_output(line);
876 
877 error_get_output:
878 	mutex_lock(&vfe->stream_lock);
879 
880 	if (vfe->stream_count == 1)
881 		vfe->ops->bus_enable_wr_if(vfe, 0);
882 
883 	vfe->stream_count--;
884 
885 	mutex_unlock(&vfe->stream_lock);
886 
887 	return ret;
888 }
889 
890 /*
891  * vfe_disable - Disable streaming on VFE line
892  * @line: VFE line
893  *
894  * Return 0 on success or a negative error code otherwise
895  */
vfe_disable(struct vfe_line * line)896 static int vfe_disable(struct vfe_line *line)
897 {
898 	struct vfe_device *vfe = to_vfe(line);
899 
900 	vfe_disable_output(line);
901 
902 	vfe_put_output(line);
903 
904 	mutex_lock(&vfe->stream_lock);
905 
906 	if (vfe->stream_count == 1)
907 		vfe->ops->bus_enable_wr_if(vfe, 0);
908 
909 	vfe->stream_count--;
910 
911 	mutex_unlock(&vfe->stream_lock);
912 
913 	return 0;
914 }
915 
916 /*
917  * vfe_isr_sof - Process start of frame interrupt
918  * @vfe: VFE Device
919  * @line_id: VFE line
920  */
vfe_isr_sof(struct vfe_device * vfe,enum vfe_line_id line_id)921 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
922 {
923 	struct vfe_output *output;
924 	unsigned long flags;
925 
926 	spin_lock_irqsave(&vfe->output_lock, flags);
927 	output = &vfe->line[line_id].output;
928 	if (output->wait_sof) {
929 		output->wait_sof = 0;
930 		complete(&output->sof);
931 	}
932 	spin_unlock_irqrestore(&vfe->output_lock, flags);
933 }
934 
935 /*
936  * vfe_isr_reg_update - Process reg update interrupt
937  * @vfe: VFE Device
938  * @line_id: VFE line
939  */
vfe_isr_reg_update(struct vfe_device * vfe,enum vfe_line_id line_id)940 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
941 {
942 	struct vfe_output *output;
943 	unsigned long flags;
944 
945 	spin_lock_irqsave(&vfe->output_lock, flags);
946 	vfe->ops->reg_update_clear(vfe, line_id);
947 
948 	output = &vfe->line[line_id].output;
949 
950 	if (output->wait_reg_update) {
951 		output->wait_reg_update = 0;
952 		complete(&output->reg_update);
953 		spin_unlock_irqrestore(&vfe->output_lock, flags);
954 		return;
955 	}
956 
957 	if (output->state == VFE_OUTPUT_STOPPING) {
958 		/* Release last buffer when hw is idle */
959 		if (output->last_buffer) {
960 			vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
961 					VB2_BUF_STATE_DONE);
962 			output->last_buffer = NULL;
963 		}
964 		output->state = VFE_OUTPUT_IDLE;
965 
966 		/* Buffers received in stopping state are queued in */
967 		/* dma pending queue, start next capture here */
968 
969 		output->buf[0] = vfe_buf_get_pending(output);
970 		output->buf[1] = vfe_buf_get_pending(output);
971 
972 		if (!output->buf[0] && output->buf[1]) {
973 			output->buf[0] = output->buf[1];
974 			output->buf[1] = NULL;
975 		}
976 
977 		if (output->buf[0])
978 			output->state = VFE_OUTPUT_SINGLE;
979 
980 		if (output->buf[1])
981 			output->state = VFE_OUTPUT_CONTINUOUS;
982 
983 		switch (output->state) {
984 		case VFE_OUTPUT_SINGLE:
985 			vfe_output_frame_drop(vfe, output, 2);
986 			break;
987 		case VFE_OUTPUT_CONTINUOUS:
988 			vfe_output_frame_drop(vfe, output, 3);
989 			break;
990 		default:
991 			vfe_output_frame_drop(vfe, output, 0);
992 			break;
993 		}
994 
995 		vfe_output_init_addrs(vfe, output, 1);
996 	}
997 
998 	spin_unlock_irqrestore(&vfe->output_lock, flags);
999 }
1000 
1001 /*
1002  * vfe_isr_wm_done - Process write master done interrupt
1003  * @vfe: VFE Device
1004  * @wm: Write master id
1005  */
vfe_isr_wm_done(struct vfe_device * vfe,u8 wm)1006 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
1007 {
1008 	struct camss_buffer *ready_buf;
1009 	struct vfe_output *output;
1010 	dma_addr_t *new_addr;
1011 	unsigned long flags;
1012 	u32 active_index;
1013 	u64 ts = ktime_get_ns();
1014 	unsigned int i;
1015 
1016 	active_index = vfe->ops->wm_get_ping_pong_status(vfe, wm);
1017 
1018 	spin_lock_irqsave(&vfe->output_lock, flags);
1019 
1020 	if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
1021 		dev_err_ratelimited(vfe->camss->dev,
1022 				    "Received wm done for unmapped index\n");
1023 		goto out_unlock;
1024 	}
1025 	output = &vfe->line[vfe->wm_output_map[wm]].output;
1026 
1027 	if (output->active_buf == active_index) {
1028 		dev_err_ratelimited(vfe->camss->dev,
1029 				    "Active buffer mismatch!\n");
1030 		goto out_unlock;
1031 	}
1032 	output->active_buf = active_index;
1033 
1034 	ready_buf = output->buf[!active_index];
1035 	if (!ready_buf) {
1036 		dev_err_ratelimited(vfe->camss->dev,
1037 				    "Missing ready buf %d %d!\n",
1038 				    !active_index, output->state);
1039 		goto out_unlock;
1040 	}
1041 
1042 	ready_buf->vb.vb2_buf.timestamp = ts;
1043 	ready_buf->vb.sequence = output->sequence++;
1044 
1045 	/* Get next buffer */
1046 	output->buf[!active_index] = vfe_buf_get_pending(output);
1047 	if (!output->buf[!active_index]) {
1048 		/* No next buffer - set same address */
1049 		new_addr = ready_buf->addr;
1050 		vfe_buf_update_wm_on_last(vfe, output);
1051 	} else {
1052 		new_addr = output->buf[!active_index]->addr;
1053 		vfe_buf_update_wm_on_next(vfe, output);
1054 	}
1055 
1056 	if (active_index)
1057 		for (i = 0; i < output->wm_num; i++)
1058 			vfe->ops->wm_set_ping_addr(vfe, output->wm_idx[i],
1059 						   new_addr[i]);
1060 	else
1061 		for (i = 0; i < output->wm_num; i++)
1062 			vfe->ops->wm_set_pong_addr(vfe, output->wm_idx[i],
1063 						   new_addr[i]);
1064 
1065 	spin_unlock_irqrestore(&vfe->output_lock, flags);
1066 
1067 	if (output->state == VFE_OUTPUT_STOPPING)
1068 		output->last_buffer = ready_buf;
1069 	else
1070 		vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1071 
1072 	return;
1073 
1074 out_unlock:
1075 	spin_unlock_irqrestore(&vfe->output_lock, flags);
1076 }
1077 
1078 /*
1079  * vfe_isr_wm_done - Process composite image done interrupt
1080  * @vfe: VFE Device
1081  * @comp: Composite image id
1082  */
vfe_isr_comp_done(struct vfe_device * vfe,u8 comp)1083 static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
1084 {
1085 	unsigned int i;
1086 
1087 	for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
1088 		if (vfe->wm_output_map[i] == VFE_LINE_PIX) {
1089 			vfe_isr_wm_done(vfe, i);
1090 			break;
1091 		}
1092 }
1093 
vfe_isr_reset_ack(struct vfe_device * vfe)1094 static inline void vfe_isr_reset_ack(struct vfe_device *vfe)
1095 {
1096 	complete(&vfe->reset_complete);
1097 }
1098 
vfe_isr_halt_ack(struct vfe_device * vfe)1099 static inline void vfe_isr_halt_ack(struct vfe_device *vfe)
1100 {
1101 	complete(&vfe->halt_complete);
1102 	vfe->ops->halt_clear(vfe);
1103 }
1104 
1105 /*
1106  * vfe_set_clock_rates - Calculate and set clock rates on VFE module
1107  * @vfe: VFE device
1108  *
1109  * Return 0 on success or a negative error code otherwise
1110  */
vfe_set_clock_rates(struct vfe_device * vfe)1111 static int vfe_set_clock_rates(struct vfe_device *vfe)
1112 {
1113 	struct device *dev = vfe->camss->dev;
1114 	u32 pixel_clock[MSM_VFE_LINE_NUM];
1115 	int i, j;
1116 	int ret;
1117 
1118 	for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
1119 		ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
1120 					    &pixel_clock[i]);
1121 		if (ret)
1122 			pixel_clock[i] = 0;
1123 	}
1124 
1125 	for (i = 0; i < vfe->nclocks; i++) {
1126 		struct camss_clock *clock = &vfe->clock[i];
1127 
1128 		if (!strcmp(clock->name, "vfe0") ||
1129 		    !strcmp(clock->name, "vfe1")) {
1130 			u64 min_rate = 0;
1131 			long rate;
1132 
1133 			for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
1134 				u32 tmp;
1135 				u8 bpp;
1136 
1137 				if (j == VFE_LINE_PIX) {
1138 					tmp = pixel_clock[j];
1139 				} else {
1140 					struct vfe_line *l = &vfe->line[j];
1141 
1142 					bpp = vfe_get_bpp(l->formats,
1143 						l->nformats,
1144 						l->fmt[MSM_VFE_PAD_SINK].code);
1145 					tmp = pixel_clock[j] * bpp / 64;
1146 				}
1147 
1148 				if (min_rate < tmp)
1149 					min_rate = tmp;
1150 			}
1151 
1152 			camss_add_clock_margin(&min_rate);
1153 
1154 			for (j = 0; j < clock->nfreqs; j++)
1155 				if (min_rate < clock->freq[j])
1156 					break;
1157 
1158 			if (j == clock->nfreqs) {
1159 				dev_err(dev,
1160 					"Pixel clock is too high for VFE");
1161 				return -EINVAL;
1162 			}
1163 
1164 			/* if sensor pixel clock is not available */
1165 			/* set highest possible VFE clock rate */
1166 			if (min_rate == 0)
1167 				j = clock->nfreqs - 1;
1168 
1169 			rate = clk_round_rate(clock->clk, clock->freq[j]);
1170 			if (rate < 0) {
1171 				dev_err(dev, "clk round rate failed: %ld\n",
1172 					rate);
1173 				return -EINVAL;
1174 			}
1175 
1176 			ret = clk_set_rate(clock->clk, rate);
1177 			if (ret < 0) {
1178 				dev_err(dev, "clk set rate failed: %d\n", ret);
1179 				return ret;
1180 			}
1181 		}
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 /*
1188  * vfe_check_clock_rates - Check current clock rates on VFE module
1189  * @vfe: VFE device
1190  *
1191  * Return 0 if current clock rates are suitable for a new pipeline
1192  * or a negative error code otherwise
1193  */
vfe_check_clock_rates(struct vfe_device * vfe)1194 static int vfe_check_clock_rates(struct vfe_device *vfe)
1195 {
1196 	u32 pixel_clock[MSM_VFE_LINE_NUM];
1197 	int i, j;
1198 	int ret;
1199 
1200 	for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
1201 		ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
1202 					    &pixel_clock[i]);
1203 		if (ret)
1204 			pixel_clock[i] = 0;
1205 	}
1206 
1207 	for (i = 0; i < vfe->nclocks; i++) {
1208 		struct camss_clock *clock = &vfe->clock[i];
1209 
1210 		if (!strcmp(clock->name, "vfe0") ||
1211 		    !strcmp(clock->name, "vfe1")) {
1212 			u64 min_rate = 0;
1213 			unsigned long rate;
1214 
1215 			for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
1216 				u32 tmp;
1217 				u8 bpp;
1218 
1219 				if (j == VFE_LINE_PIX) {
1220 					tmp = pixel_clock[j];
1221 				} else {
1222 					struct vfe_line *l = &vfe->line[j];
1223 
1224 					bpp = vfe_get_bpp(l->formats,
1225 						l->nformats,
1226 						l->fmt[MSM_VFE_PAD_SINK].code);
1227 					tmp = pixel_clock[j] * bpp / 64;
1228 				}
1229 
1230 				if (min_rate < tmp)
1231 					min_rate = tmp;
1232 			}
1233 
1234 			camss_add_clock_margin(&min_rate);
1235 
1236 			rate = clk_get_rate(clock->clk);
1237 			if (rate < min_rate)
1238 				return -EBUSY;
1239 		}
1240 	}
1241 
1242 	return 0;
1243 }
1244 
1245 /*
1246  * vfe_get - Power up and reset VFE module
1247  * @vfe: VFE Device
1248  *
1249  * Return 0 on success or a negative error code otherwise
1250  */
vfe_get(struct vfe_device * vfe)1251 static int vfe_get(struct vfe_device *vfe)
1252 {
1253 	int ret;
1254 
1255 	mutex_lock(&vfe->power_lock);
1256 
1257 	if (vfe->power_count == 0) {
1258 		ret = camss_pm_domain_on(vfe->camss, vfe->id);
1259 		if (ret < 0)
1260 			goto error_pm_domain;
1261 
1262 		ret = pm_runtime_get_sync(vfe->camss->dev);
1263 		if (ret < 0)
1264 			goto error_pm_runtime_get;
1265 
1266 		ret = vfe_set_clock_rates(vfe);
1267 		if (ret < 0)
1268 			goto error_pm_runtime_get;
1269 
1270 		ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
1271 					  vfe->camss->dev);
1272 		if (ret < 0)
1273 			goto error_pm_runtime_get;
1274 
1275 		ret = vfe_reset(vfe);
1276 		if (ret < 0)
1277 			goto error_reset;
1278 
1279 		vfe_reset_output_maps(vfe);
1280 
1281 		vfe_init_outputs(vfe);
1282 	} else {
1283 		ret = vfe_check_clock_rates(vfe);
1284 		if (ret < 0)
1285 			goto error_pm_runtime_get;
1286 	}
1287 	vfe->power_count++;
1288 
1289 	mutex_unlock(&vfe->power_lock);
1290 
1291 	return 0;
1292 
1293 error_reset:
1294 	camss_disable_clocks(vfe->nclocks, vfe->clock);
1295 
1296 error_pm_runtime_get:
1297 	pm_runtime_put_sync(vfe->camss->dev);
1298 	camss_pm_domain_off(vfe->camss, vfe->id);
1299 
1300 error_pm_domain:
1301 	mutex_unlock(&vfe->power_lock);
1302 
1303 	return ret;
1304 }
1305 
1306 /*
1307  * vfe_put - Power down VFE module
1308  * @vfe: VFE Device
1309  */
vfe_put(struct vfe_device * vfe)1310 static void vfe_put(struct vfe_device *vfe)
1311 {
1312 	mutex_lock(&vfe->power_lock);
1313 
1314 	if (vfe->power_count == 0) {
1315 		dev_err(vfe->camss->dev, "vfe power off on power_count == 0\n");
1316 		goto exit;
1317 	} else if (vfe->power_count == 1) {
1318 		if (vfe->was_streaming) {
1319 			vfe->was_streaming = 0;
1320 			vfe_halt(vfe);
1321 		}
1322 		camss_disable_clocks(vfe->nclocks, vfe->clock);
1323 		pm_runtime_put_sync(vfe->camss->dev);
1324 		camss_pm_domain_off(vfe->camss, vfe->id);
1325 	}
1326 
1327 	vfe->power_count--;
1328 
1329 exit:
1330 	mutex_unlock(&vfe->power_lock);
1331 }
1332 
1333 /*
1334  * vfe_queue_buffer - Add empty buffer
1335  * @vid: Video device structure
1336  * @buf: Buffer to be enqueued
1337  *
1338  * Add an empty buffer - depending on the current number of buffers it will be
1339  * put in pending buffer queue or directly given to the hardware to be filled.
1340  *
1341  * Return 0 on success or a negative error code otherwise
1342  */
vfe_queue_buffer(struct camss_video * vid,struct camss_buffer * buf)1343 static int vfe_queue_buffer(struct camss_video *vid,
1344 			    struct camss_buffer *buf)
1345 {
1346 	struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
1347 	struct vfe_device *vfe = to_vfe(line);
1348 	struct vfe_output *output;
1349 	unsigned long flags;
1350 
1351 	output = &line->output;
1352 
1353 	spin_lock_irqsave(&vfe->output_lock, flags);
1354 
1355 	vfe_buf_update_wm_on_new(vfe, output, buf);
1356 
1357 	spin_unlock_irqrestore(&vfe->output_lock, flags);
1358 
1359 	return 0;
1360 }
1361 
1362 /*
1363  * vfe_flush_buffers - Return all vb2 buffers
1364  * @vid: Video device structure
1365  * @state: vb2 buffer state of the returned buffers
1366  *
1367  * Return all buffers to vb2. This includes queued pending buffers (still
1368  * unused) and any buffers given to the hardware but again still not used.
1369  *
1370  * Return 0 on success or a negative error code otherwise
1371  */
vfe_flush_buffers(struct camss_video * vid,enum vb2_buffer_state state)1372 static int vfe_flush_buffers(struct camss_video *vid,
1373 			     enum vb2_buffer_state state)
1374 {
1375 	struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
1376 	struct vfe_device *vfe = to_vfe(line);
1377 	struct vfe_output *output;
1378 	unsigned long flags;
1379 
1380 	output = &line->output;
1381 
1382 	spin_lock_irqsave(&vfe->output_lock, flags);
1383 
1384 	vfe_buf_flush_pending(output, state);
1385 
1386 	if (output->buf[0])
1387 		vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
1388 
1389 	if (output->buf[1])
1390 		vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
1391 
1392 	if (output->last_buffer) {
1393 		vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
1394 		output->last_buffer = NULL;
1395 	}
1396 
1397 	spin_unlock_irqrestore(&vfe->output_lock, flags);
1398 
1399 	return 0;
1400 }
1401 
1402 /*
1403  * vfe_set_power - Power on/off VFE module
1404  * @sd: VFE V4L2 subdevice
1405  * @on: Requested power state
1406  *
1407  * Return 0 on success or a negative error code otherwise
1408  */
vfe_set_power(struct v4l2_subdev * sd,int on)1409 static int vfe_set_power(struct v4l2_subdev *sd, int on)
1410 {
1411 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1412 	struct vfe_device *vfe = to_vfe(line);
1413 	int ret;
1414 
1415 	if (on) {
1416 		ret = vfe_get(vfe);
1417 		if (ret < 0)
1418 			return ret;
1419 
1420 		vfe->ops->hw_version_read(vfe, vfe->camss->dev);
1421 	} else {
1422 		vfe_put(vfe);
1423 	}
1424 
1425 	return 0;
1426 }
1427 
1428 /*
1429  * vfe_set_stream - Enable/disable streaming on VFE module
1430  * @sd: VFE V4L2 subdevice
1431  * @enable: Requested streaming state
1432  *
1433  * Main configuration of VFE module is triggered here.
1434  *
1435  * Return 0 on success or a negative error code otherwise
1436  */
vfe_set_stream(struct v4l2_subdev * sd,int enable)1437 static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
1438 {
1439 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1440 	struct vfe_device *vfe = to_vfe(line);
1441 	int ret;
1442 
1443 	if (enable) {
1444 		ret = vfe_enable(line);
1445 		if (ret < 0)
1446 			dev_err(vfe->camss->dev,
1447 				"Failed to enable vfe outputs\n");
1448 	} else {
1449 		ret = vfe_disable(line);
1450 		if (ret < 0)
1451 			dev_err(vfe->camss->dev,
1452 				"Failed to disable vfe outputs\n");
1453 	}
1454 
1455 	return ret;
1456 }
1457 
1458 /*
1459  * __vfe_get_format - Get pointer to format structure
1460  * @line: VFE line
1461  * @cfg: V4L2 subdev pad configuration
1462  * @pad: pad from which format is requested
1463  * @which: TRY or ACTIVE format
1464  *
1465  * Return pointer to TRY or ACTIVE format structure
1466  */
1467 static struct v4l2_mbus_framefmt *
__vfe_get_format(struct vfe_line * line,struct v4l2_subdev_pad_config * cfg,unsigned int pad,enum v4l2_subdev_format_whence which)1468 __vfe_get_format(struct vfe_line *line,
1469 		 struct v4l2_subdev_pad_config *cfg,
1470 		 unsigned int pad,
1471 		 enum v4l2_subdev_format_whence which)
1472 {
1473 	if (which == V4L2_SUBDEV_FORMAT_TRY)
1474 		return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
1475 
1476 	return &line->fmt[pad];
1477 }
1478 
1479 /*
1480  * __vfe_get_compose - Get pointer to compose selection structure
1481  * @line: VFE line
1482  * @cfg: V4L2 subdev pad configuration
1483  * @which: TRY or ACTIVE format
1484  *
1485  * Return pointer to TRY or ACTIVE compose rectangle structure
1486  */
1487 static struct v4l2_rect *
__vfe_get_compose(struct vfe_line * line,struct v4l2_subdev_pad_config * cfg,enum v4l2_subdev_format_whence which)1488 __vfe_get_compose(struct vfe_line *line,
1489 		  struct v4l2_subdev_pad_config *cfg,
1490 		  enum v4l2_subdev_format_whence which)
1491 {
1492 	if (which == V4L2_SUBDEV_FORMAT_TRY)
1493 		return v4l2_subdev_get_try_compose(&line->subdev, cfg,
1494 						   MSM_VFE_PAD_SINK);
1495 
1496 	return &line->compose;
1497 }
1498 
1499 /*
1500  * __vfe_get_crop - Get pointer to crop selection structure
1501  * @line: VFE line
1502  * @cfg: V4L2 subdev pad configuration
1503  * @which: TRY or ACTIVE format
1504  *
1505  * Return pointer to TRY or ACTIVE crop rectangle structure
1506  */
1507 static struct v4l2_rect *
__vfe_get_crop(struct vfe_line * line,struct v4l2_subdev_pad_config * cfg,enum v4l2_subdev_format_whence which)1508 __vfe_get_crop(struct vfe_line *line,
1509 	       struct v4l2_subdev_pad_config *cfg,
1510 	       enum v4l2_subdev_format_whence which)
1511 {
1512 	if (which == V4L2_SUBDEV_FORMAT_TRY)
1513 		return v4l2_subdev_get_try_crop(&line->subdev, cfg,
1514 						MSM_VFE_PAD_SRC);
1515 
1516 	return &line->crop;
1517 }
1518 
1519 /*
1520  * vfe_try_format - Handle try format by pad subdev method
1521  * @line: VFE line
1522  * @cfg: V4L2 subdev pad configuration
1523  * @pad: pad on which format is requested
1524  * @fmt: pointer to v4l2 format structure
1525  * @which: wanted subdev format
1526  */
vfe_try_format(struct vfe_line * line,struct v4l2_subdev_pad_config * cfg,unsigned int pad,struct v4l2_mbus_framefmt * fmt,enum v4l2_subdev_format_whence which)1527 static void vfe_try_format(struct vfe_line *line,
1528 			   struct v4l2_subdev_pad_config *cfg,
1529 			   unsigned int pad,
1530 			   struct v4l2_mbus_framefmt *fmt,
1531 			   enum v4l2_subdev_format_whence which)
1532 {
1533 	unsigned int i;
1534 	u32 code;
1535 
1536 	switch (pad) {
1537 	case MSM_VFE_PAD_SINK:
1538 		/* Set format on sink pad */
1539 
1540 		for (i = 0; i < line->nformats; i++)
1541 			if (fmt->code == line->formats[i].code)
1542 				break;
1543 
1544 		/* If not found, use UYVY as default */
1545 		if (i >= line->nformats)
1546 			fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
1547 
1548 		fmt->width = clamp_t(u32, fmt->width, 1, 8191);
1549 		fmt->height = clamp_t(u32, fmt->height, 1, 8191);
1550 
1551 		fmt->field = V4L2_FIELD_NONE;
1552 		fmt->colorspace = V4L2_COLORSPACE_SRGB;
1553 
1554 		break;
1555 
1556 	case MSM_VFE_PAD_SRC:
1557 		/* Set and return a format same as sink pad */
1558 		code = fmt->code;
1559 
1560 		*fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
1561 
1562 		fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
1563 
1564 		if (line->id == VFE_LINE_PIX) {
1565 			struct v4l2_rect *rect;
1566 
1567 			rect = __vfe_get_crop(line, cfg, which);
1568 
1569 			fmt->width = rect->width;
1570 			fmt->height = rect->height;
1571 		}
1572 
1573 		break;
1574 	}
1575 
1576 	fmt->colorspace = V4L2_COLORSPACE_SRGB;
1577 }
1578 
1579 /*
1580  * vfe_try_compose - Handle try compose selection by pad subdev method
1581  * @line: VFE line
1582  * @cfg: V4L2 subdev pad configuration
1583  * @rect: pointer to v4l2 rect structure
1584  * @which: wanted subdev format
1585  */
vfe_try_compose(struct vfe_line * line,struct v4l2_subdev_pad_config * cfg,struct v4l2_rect * rect,enum v4l2_subdev_format_whence which)1586 static void vfe_try_compose(struct vfe_line *line,
1587 			    struct v4l2_subdev_pad_config *cfg,
1588 			    struct v4l2_rect *rect,
1589 			    enum v4l2_subdev_format_whence which)
1590 {
1591 	struct v4l2_mbus_framefmt *fmt;
1592 
1593 	fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
1594 
1595 	if (rect->width > fmt->width)
1596 		rect->width = fmt->width;
1597 
1598 	if (rect->height > fmt->height)
1599 		rect->height = fmt->height;
1600 
1601 	if (fmt->width > rect->width * SCALER_RATIO_MAX)
1602 		rect->width = (fmt->width + SCALER_RATIO_MAX - 1) /
1603 							SCALER_RATIO_MAX;
1604 
1605 	rect->width &= ~0x1;
1606 
1607 	if (fmt->height > rect->height * SCALER_RATIO_MAX)
1608 		rect->height = (fmt->height + SCALER_RATIO_MAX - 1) /
1609 							SCALER_RATIO_MAX;
1610 
1611 	if (rect->width < 16)
1612 		rect->width = 16;
1613 
1614 	if (rect->height < 4)
1615 		rect->height = 4;
1616 }
1617 
1618 /*
1619  * vfe_try_crop - Handle try crop selection by pad subdev method
1620  * @line: VFE line
1621  * @cfg: V4L2 subdev pad configuration
1622  * @rect: pointer to v4l2 rect structure
1623  * @which: wanted subdev format
1624  */
vfe_try_crop(struct vfe_line * line,struct v4l2_subdev_pad_config * cfg,struct v4l2_rect * rect,enum v4l2_subdev_format_whence which)1625 static void vfe_try_crop(struct vfe_line *line,
1626 			 struct v4l2_subdev_pad_config *cfg,
1627 			 struct v4l2_rect *rect,
1628 			 enum v4l2_subdev_format_whence which)
1629 {
1630 	struct v4l2_rect *compose;
1631 
1632 	compose = __vfe_get_compose(line, cfg, which);
1633 
1634 	if (rect->width > compose->width)
1635 		rect->width = compose->width;
1636 
1637 	if (rect->width + rect->left > compose->width)
1638 		rect->left = compose->width - rect->width;
1639 
1640 	if (rect->height > compose->height)
1641 		rect->height = compose->height;
1642 
1643 	if (rect->height + rect->top > compose->height)
1644 		rect->top = compose->height - rect->height;
1645 
1646 	/* wm in line based mode writes multiple of 16 horizontally */
1647 	rect->left += (rect->width & 0xf) >> 1;
1648 	rect->width &= ~0xf;
1649 
1650 	if (rect->width < 16) {
1651 		rect->left = 0;
1652 		rect->width = 16;
1653 	}
1654 
1655 	if (rect->height < 4) {
1656 		rect->top = 0;
1657 		rect->height = 4;
1658 	}
1659 }
1660 
1661 /*
1662  * vfe_enum_mbus_code - Handle pixel format enumeration
1663  * @sd: VFE V4L2 subdevice
1664  * @cfg: V4L2 subdev pad configuration
1665  * @code: pointer to v4l2_subdev_mbus_code_enum structure
1666  *
1667  * return -EINVAL or zero on success
1668  */
vfe_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_mbus_code_enum * code)1669 static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
1670 			      struct v4l2_subdev_pad_config *cfg,
1671 			      struct v4l2_subdev_mbus_code_enum *code)
1672 {
1673 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1674 
1675 	if (code->pad == MSM_VFE_PAD_SINK) {
1676 		if (code->index >= line->nformats)
1677 			return -EINVAL;
1678 
1679 		code->code = line->formats[code->index].code;
1680 	} else {
1681 		struct v4l2_mbus_framefmt *sink_fmt;
1682 
1683 		sink_fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
1684 					    code->which);
1685 
1686 		code->code = vfe_src_pad_code(line, sink_fmt->code,
1687 					      code->index, 0);
1688 		if (!code->code)
1689 			return -EINVAL;
1690 	}
1691 
1692 	return 0;
1693 }
1694 
1695 /*
1696  * vfe_enum_frame_size - Handle frame size enumeration
1697  * @sd: VFE V4L2 subdevice
1698  * @cfg: V4L2 subdev pad configuration
1699  * @fse: pointer to v4l2_subdev_frame_size_enum structure
1700  *
1701  * Return -EINVAL or zero on success
1702  */
vfe_enum_frame_size(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_frame_size_enum * fse)1703 static int vfe_enum_frame_size(struct v4l2_subdev *sd,
1704 			       struct v4l2_subdev_pad_config *cfg,
1705 			       struct v4l2_subdev_frame_size_enum *fse)
1706 {
1707 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1708 	struct v4l2_mbus_framefmt format;
1709 
1710 	if (fse->index != 0)
1711 		return -EINVAL;
1712 
1713 	format.code = fse->code;
1714 	format.width = 1;
1715 	format.height = 1;
1716 	vfe_try_format(line, cfg, fse->pad, &format, fse->which);
1717 	fse->min_width = format.width;
1718 	fse->min_height = format.height;
1719 
1720 	if (format.code != fse->code)
1721 		return -EINVAL;
1722 
1723 	format.code = fse->code;
1724 	format.width = -1;
1725 	format.height = -1;
1726 	vfe_try_format(line, cfg, fse->pad, &format, fse->which);
1727 	fse->max_width = format.width;
1728 	fse->max_height = format.height;
1729 
1730 	return 0;
1731 }
1732 
1733 /*
1734  * vfe_get_format - Handle get format by pads subdev method
1735  * @sd: VFE V4L2 subdevice
1736  * @cfg: V4L2 subdev pad configuration
1737  * @fmt: pointer to v4l2 subdev format structure
1738  *
1739  * Return -EINVAL or zero on success
1740  */
vfe_get_format(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)1741 static int vfe_get_format(struct v4l2_subdev *sd,
1742 			  struct v4l2_subdev_pad_config *cfg,
1743 			  struct v4l2_subdev_format *fmt)
1744 {
1745 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1746 	struct v4l2_mbus_framefmt *format;
1747 
1748 	format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
1749 	if (format == NULL)
1750 		return -EINVAL;
1751 
1752 	fmt->format = *format;
1753 
1754 	return 0;
1755 }
1756 
1757 static int vfe_set_selection(struct v4l2_subdev *sd,
1758 			     struct v4l2_subdev_pad_config *cfg,
1759 			     struct v4l2_subdev_selection *sel);
1760 
1761 /*
1762  * vfe_set_format - Handle set format by pads subdev method
1763  * @sd: VFE V4L2 subdevice
1764  * @cfg: V4L2 subdev pad configuration
1765  * @fmt: pointer to v4l2 subdev format structure
1766  *
1767  * Return -EINVAL or zero on success
1768  */
vfe_set_format(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)1769 static int vfe_set_format(struct v4l2_subdev *sd,
1770 			  struct v4l2_subdev_pad_config *cfg,
1771 			  struct v4l2_subdev_format *fmt)
1772 {
1773 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1774 	struct v4l2_mbus_framefmt *format;
1775 
1776 	format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
1777 	if (format == NULL)
1778 		return -EINVAL;
1779 
1780 	vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
1781 	*format = fmt->format;
1782 
1783 	if (fmt->pad == MSM_VFE_PAD_SINK) {
1784 		struct v4l2_subdev_selection sel = { 0 };
1785 		int ret;
1786 
1787 		/* Propagate the format from sink to source */
1788 		format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
1789 					  fmt->which);
1790 
1791 		*format = fmt->format;
1792 		vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
1793 			       fmt->which);
1794 
1795 		if (line->id != VFE_LINE_PIX)
1796 			return 0;
1797 
1798 		/* Reset sink pad compose selection */
1799 		sel.which = fmt->which;
1800 		sel.pad = MSM_VFE_PAD_SINK;
1801 		sel.target = V4L2_SEL_TGT_COMPOSE;
1802 		sel.r.width = fmt->format.width;
1803 		sel.r.height = fmt->format.height;
1804 		ret = vfe_set_selection(sd, cfg, &sel);
1805 		if (ret < 0)
1806 			return ret;
1807 	}
1808 
1809 	return 0;
1810 }
1811 
1812 /*
1813  * vfe_get_selection - Handle get selection by pads subdev method
1814  * @sd: VFE V4L2 subdevice
1815  * @cfg: V4L2 subdev pad configuration
1816  * @sel: pointer to v4l2 subdev selection structure
1817  *
1818  * Return -EINVAL or zero on success
1819  */
vfe_get_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)1820 static int vfe_get_selection(struct v4l2_subdev *sd,
1821 			     struct v4l2_subdev_pad_config *cfg,
1822 			     struct v4l2_subdev_selection *sel)
1823 {
1824 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1825 	struct v4l2_subdev_format fmt = { 0 };
1826 	struct v4l2_rect *rect;
1827 	int ret;
1828 
1829 	if (line->id != VFE_LINE_PIX)
1830 		return -EINVAL;
1831 
1832 	if (sel->pad == MSM_VFE_PAD_SINK)
1833 		switch (sel->target) {
1834 		case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1835 			fmt.pad = sel->pad;
1836 			fmt.which = sel->which;
1837 			ret = vfe_get_format(sd, cfg, &fmt);
1838 			if (ret < 0)
1839 				return ret;
1840 
1841 			sel->r.left = 0;
1842 			sel->r.top = 0;
1843 			sel->r.width = fmt.format.width;
1844 			sel->r.height = fmt.format.height;
1845 			break;
1846 		case V4L2_SEL_TGT_COMPOSE:
1847 			rect = __vfe_get_compose(line, cfg, sel->which);
1848 			if (rect == NULL)
1849 				return -EINVAL;
1850 
1851 			sel->r = *rect;
1852 			break;
1853 		default:
1854 			return -EINVAL;
1855 		}
1856 	else if (sel->pad == MSM_VFE_PAD_SRC)
1857 		switch (sel->target) {
1858 		case V4L2_SEL_TGT_CROP_BOUNDS:
1859 			rect = __vfe_get_compose(line, cfg, sel->which);
1860 			if (rect == NULL)
1861 				return -EINVAL;
1862 
1863 			sel->r.left = rect->left;
1864 			sel->r.top = rect->top;
1865 			sel->r.width = rect->width;
1866 			sel->r.height = rect->height;
1867 			break;
1868 		case V4L2_SEL_TGT_CROP:
1869 			rect = __vfe_get_crop(line, cfg, sel->which);
1870 			if (rect == NULL)
1871 				return -EINVAL;
1872 
1873 			sel->r = *rect;
1874 			break;
1875 		default:
1876 			return -EINVAL;
1877 		}
1878 
1879 	return 0;
1880 }
1881 
1882 /*
1883  * vfe_set_selection - Handle set selection by pads subdev method
1884  * @sd: VFE V4L2 subdevice
1885  * @cfg: V4L2 subdev pad configuration
1886  * @sel: pointer to v4l2 subdev selection structure
1887  *
1888  * Return -EINVAL or zero on success
1889  */
vfe_set_selection(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_selection * sel)1890 static int vfe_set_selection(struct v4l2_subdev *sd,
1891 			     struct v4l2_subdev_pad_config *cfg,
1892 			     struct v4l2_subdev_selection *sel)
1893 {
1894 	struct vfe_line *line = v4l2_get_subdevdata(sd);
1895 	struct v4l2_rect *rect;
1896 	int ret;
1897 
1898 	if (line->id != VFE_LINE_PIX)
1899 		return -EINVAL;
1900 
1901 	if (sel->target == V4L2_SEL_TGT_COMPOSE &&
1902 		sel->pad == MSM_VFE_PAD_SINK) {
1903 		struct v4l2_subdev_selection crop = { 0 };
1904 
1905 		rect = __vfe_get_compose(line, cfg, sel->which);
1906 		if (rect == NULL)
1907 			return -EINVAL;
1908 
1909 		vfe_try_compose(line, cfg, &sel->r, sel->which);
1910 		*rect = sel->r;
1911 
1912 		/* Reset source crop selection */
1913 		crop.which = sel->which;
1914 		crop.pad = MSM_VFE_PAD_SRC;
1915 		crop.target = V4L2_SEL_TGT_CROP;
1916 		crop.r = *rect;
1917 		ret = vfe_set_selection(sd, cfg, &crop);
1918 	} else if (sel->target == V4L2_SEL_TGT_CROP &&
1919 		sel->pad == MSM_VFE_PAD_SRC) {
1920 		struct v4l2_subdev_format fmt = { 0 };
1921 
1922 		rect = __vfe_get_crop(line, cfg, sel->which);
1923 		if (rect == NULL)
1924 			return -EINVAL;
1925 
1926 		vfe_try_crop(line, cfg, &sel->r, sel->which);
1927 		*rect = sel->r;
1928 
1929 		/* Reset source pad format width and height */
1930 		fmt.which = sel->which;
1931 		fmt.pad = MSM_VFE_PAD_SRC;
1932 		ret = vfe_get_format(sd, cfg, &fmt);
1933 		if (ret < 0)
1934 			return ret;
1935 
1936 		fmt.format.width = rect->width;
1937 		fmt.format.height = rect->height;
1938 		ret = vfe_set_format(sd, cfg, &fmt);
1939 	} else {
1940 		ret = -EINVAL;
1941 	}
1942 
1943 	return ret;
1944 }
1945 
1946 /*
1947  * vfe_init_formats - Initialize formats on all pads
1948  * @sd: VFE V4L2 subdevice
1949  * @fh: V4L2 subdev file handle
1950  *
1951  * Initialize all pad formats with default values.
1952  *
1953  * Return 0 on success or a negative error code otherwise
1954  */
vfe_init_formats(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1955 static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1956 {
1957 	struct v4l2_subdev_format format = {
1958 		.pad = MSM_VFE_PAD_SINK,
1959 		.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
1960 			      V4L2_SUBDEV_FORMAT_ACTIVE,
1961 		.format = {
1962 			.code = MEDIA_BUS_FMT_UYVY8_2X8,
1963 			.width = 1920,
1964 			.height = 1080
1965 		}
1966 	};
1967 
1968 	return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
1969 }
1970 
1971 /*
1972  * msm_vfe_subdev_init - Initialize VFE device structure and resources
1973  * @vfe: VFE device
1974  * @res: VFE module resources table
1975  *
1976  * Return 0 on success or a negative error code otherwise
1977  */
msm_vfe_subdev_init(struct camss * camss,struct vfe_device * vfe,const struct resources * res,u8 id)1978 int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
1979 			const struct resources *res, u8 id)
1980 {
1981 	struct device *dev = camss->dev;
1982 	struct platform_device *pdev = to_platform_device(dev);
1983 	struct resource *r;
1984 	int i, j;
1985 	int ret;
1986 
1987 	vfe->isr_ops.reset_ack = vfe_isr_reset_ack;
1988 	vfe->isr_ops.halt_ack = vfe_isr_halt_ack;
1989 	vfe->isr_ops.reg_update = vfe_isr_reg_update;
1990 	vfe->isr_ops.sof = vfe_isr_sof;
1991 	vfe->isr_ops.comp_done = vfe_isr_comp_done;
1992 	vfe->isr_ops.wm_done = vfe_isr_wm_done;
1993 
1994 	if (camss->version == CAMSS_8x16)
1995 		vfe->ops = &vfe_ops_4_1;
1996 	else if (camss->version == CAMSS_8x96)
1997 		vfe->ops = &vfe_ops_4_7;
1998 	else
1999 		return -EINVAL;
2000 
2001 	/* Memory */
2002 
2003 	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
2004 	vfe->base = devm_ioremap_resource(dev, r);
2005 	if (IS_ERR(vfe->base)) {
2006 		dev_err(dev, "could not map memory\n");
2007 		return PTR_ERR(vfe->base);
2008 	}
2009 
2010 	/* Interrupt */
2011 
2012 	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
2013 					 res->interrupt[0]);
2014 	if (!r) {
2015 		dev_err(dev, "missing IRQ\n");
2016 		return -EINVAL;
2017 	}
2018 
2019 	vfe->irq = r->start;
2020 	snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
2021 		 dev_name(dev), MSM_VFE_NAME, vfe->id);
2022 	ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr,
2023 			       IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
2024 	if (ret < 0) {
2025 		dev_err(dev, "request_irq failed: %d\n", ret);
2026 		return ret;
2027 	}
2028 
2029 	/* Clocks */
2030 
2031 	vfe->nclocks = 0;
2032 	while (res->clock[vfe->nclocks])
2033 		vfe->nclocks++;
2034 
2035 	vfe->clock = devm_kcalloc(dev, vfe->nclocks, sizeof(*vfe->clock),
2036 				  GFP_KERNEL);
2037 	if (!vfe->clock)
2038 		return -ENOMEM;
2039 
2040 	for (i = 0; i < vfe->nclocks; i++) {
2041 		struct camss_clock *clock = &vfe->clock[i];
2042 
2043 		clock->clk = devm_clk_get(dev, res->clock[i]);
2044 		if (IS_ERR(clock->clk))
2045 			return PTR_ERR(clock->clk);
2046 
2047 		clock->name = res->clock[i];
2048 
2049 		clock->nfreqs = 0;
2050 		while (res->clock_rate[i][clock->nfreqs])
2051 			clock->nfreqs++;
2052 
2053 		if (!clock->nfreqs) {
2054 			clock->freq = NULL;
2055 			continue;
2056 		}
2057 
2058 		clock->freq = devm_kcalloc(dev,
2059 					   clock->nfreqs,
2060 					   sizeof(*clock->freq),
2061 					   GFP_KERNEL);
2062 		if (!clock->freq)
2063 			return -ENOMEM;
2064 
2065 		for (j = 0; j < clock->nfreqs; j++)
2066 			clock->freq[j] = res->clock_rate[i][j];
2067 	}
2068 
2069 	mutex_init(&vfe->power_lock);
2070 	vfe->power_count = 0;
2071 
2072 	mutex_init(&vfe->stream_lock);
2073 	vfe->stream_count = 0;
2074 
2075 	spin_lock_init(&vfe->output_lock);
2076 
2077 	vfe->camss = camss;
2078 	vfe->id = id;
2079 	vfe->reg_update = 0;
2080 
2081 	for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
2082 		struct vfe_line *l = &vfe->line[i];
2083 
2084 		l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2085 		l->video_out.camss = camss;
2086 		l->id = i;
2087 		init_completion(&l->output.sof);
2088 		init_completion(&l->output.reg_update);
2089 
2090 		if (camss->version == CAMSS_8x16) {
2091 			if (i == VFE_LINE_PIX) {
2092 				l->formats = formats_pix_8x16;
2093 				l->nformats = ARRAY_SIZE(formats_pix_8x16);
2094 			} else {
2095 				l->formats = formats_rdi_8x16;
2096 				l->nformats = ARRAY_SIZE(formats_rdi_8x16);
2097 			}
2098 		} else if (camss->version == CAMSS_8x96) {
2099 			if (i == VFE_LINE_PIX) {
2100 				l->formats = formats_pix_8x96;
2101 				l->nformats = ARRAY_SIZE(formats_pix_8x96);
2102 			} else {
2103 				l->formats = formats_rdi_8x96;
2104 				l->nformats = ARRAY_SIZE(formats_rdi_8x96);
2105 			}
2106 		} else {
2107 			return -EINVAL;
2108 		}
2109 	}
2110 
2111 	init_completion(&vfe->reset_complete);
2112 	init_completion(&vfe->halt_complete);
2113 
2114 	return 0;
2115 }
2116 
2117 /*
2118  * msm_vfe_get_vfe_id - Get VFE HW module id
2119  * @entity: Pointer to VFE media entity structure
2120  * @id: Return CSID HW module id here
2121  */
msm_vfe_get_vfe_id(struct media_entity * entity,u8 * id)2122 void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id)
2123 {
2124 	struct v4l2_subdev *sd;
2125 	struct vfe_line *line;
2126 	struct vfe_device *vfe;
2127 
2128 	sd = media_entity_to_v4l2_subdev(entity);
2129 	line = v4l2_get_subdevdata(sd);
2130 	vfe = to_vfe(line);
2131 
2132 	*id = vfe->id;
2133 }
2134 
2135 /*
2136  * msm_vfe_get_vfe_line_id - Get VFE line id by media entity
2137  * @entity: Pointer to VFE media entity structure
2138  * @id: Return VFE line id here
2139  */
msm_vfe_get_vfe_line_id(struct media_entity * entity,enum vfe_line_id * id)2140 void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id)
2141 {
2142 	struct v4l2_subdev *sd;
2143 	struct vfe_line *line;
2144 
2145 	sd = media_entity_to_v4l2_subdev(entity);
2146 	line = v4l2_get_subdevdata(sd);
2147 
2148 	*id = line->id;
2149 }
2150 
2151 /*
2152  * vfe_link_setup - Setup VFE connections
2153  * @entity: Pointer to media entity structure
2154  * @local: Pointer to local pad
2155  * @remote: Pointer to remote pad
2156  * @flags: Link flags
2157  *
2158  * Return 0 on success
2159  */
vfe_link_setup(struct media_entity * entity,const struct media_pad * local,const struct media_pad * remote,u32 flags)2160 static int vfe_link_setup(struct media_entity *entity,
2161 			  const struct media_pad *local,
2162 			  const struct media_pad *remote, u32 flags)
2163 {
2164 	if (flags & MEDIA_LNK_FL_ENABLED)
2165 		if (media_entity_remote_pad(local))
2166 			return -EBUSY;
2167 
2168 	return 0;
2169 }
2170 
2171 static const struct v4l2_subdev_core_ops vfe_core_ops = {
2172 	.s_power = vfe_set_power,
2173 };
2174 
2175 static const struct v4l2_subdev_video_ops vfe_video_ops = {
2176 	.s_stream = vfe_set_stream,
2177 };
2178 
2179 static const struct v4l2_subdev_pad_ops vfe_pad_ops = {
2180 	.enum_mbus_code = vfe_enum_mbus_code,
2181 	.enum_frame_size = vfe_enum_frame_size,
2182 	.get_fmt = vfe_get_format,
2183 	.set_fmt = vfe_set_format,
2184 	.get_selection = vfe_get_selection,
2185 	.set_selection = vfe_set_selection,
2186 };
2187 
2188 static const struct v4l2_subdev_ops vfe_v4l2_ops = {
2189 	.core = &vfe_core_ops,
2190 	.video = &vfe_video_ops,
2191 	.pad = &vfe_pad_ops,
2192 };
2193 
2194 static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = {
2195 	.open = vfe_init_formats,
2196 };
2197 
2198 static const struct media_entity_operations vfe_media_ops = {
2199 	.link_setup = vfe_link_setup,
2200 	.link_validate = v4l2_subdev_link_validate,
2201 };
2202 
2203 static const struct camss_video_ops camss_vfe_video_ops = {
2204 	.queue_buffer = vfe_queue_buffer,
2205 	.flush_buffers = vfe_flush_buffers,
2206 };
2207 
2208 /*
2209  * msm_vfe_register_entities - Register subdev node for VFE module
2210  * @vfe: VFE device
2211  * @v4l2_dev: V4L2 device
2212  *
2213  * Initialize and register a subdev node for the VFE module. Then
2214  * call msm_video_register() to register the video device node which
2215  * will be connected to this subdev node. Then actually create the
2216  * media link between them.
2217  *
2218  * Return 0 on success or a negative error code otherwise
2219  */
msm_vfe_register_entities(struct vfe_device * vfe,struct v4l2_device * v4l2_dev)2220 int msm_vfe_register_entities(struct vfe_device *vfe,
2221 			      struct v4l2_device *v4l2_dev)
2222 {
2223 	struct device *dev = vfe->camss->dev;
2224 	struct v4l2_subdev *sd;
2225 	struct media_pad *pads;
2226 	struct camss_video *video_out;
2227 	int ret;
2228 	int i;
2229 
2230 	for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
2231 		char name[32];
2232 
2233 		sd = &vfe->line[i].subdev;
2234 		pads = vfe->line[i].pads;
2235 		video_out = &vfe->line[i].video_out;
2236 
2237 		v4l2_subdev_init(sd, &vfe_v4l2_ops);
2238 		sd->internal_ops = &vfe_v4l2_internal_ops;
2239 		sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
2240 		if (i == VFE_LINE_PIX)
2241 			snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
2242 				 MSM_VFE_NAME, vfe->id, "pix");
2243 		else
2244 			snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d",
2245 				 MSM_VFE_NAME, vfe->id, "rdi", i);
2246 
2247 		v4l2_set_subdevdata(sd, &vfe->line[i]);
2248 
2249 		ret = vfe_init_formats(sd, NULL);
2250 		if (ret < 0) {
2251 			dev_err(dev, "Failed to init format: %d\n", ret);
2252 			goto error_init;
2253 		}
2254 
2255 		pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
2256 		pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
2257 
2258 		sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
2259 		sd->entity.ops = &vfe_media_ops;
2260 		ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM,
2261 					     pads);
2262 		if (ret < 0) {
2263 			dev_err(dev, "Failed to init media entity: %d\n", ret);
2264 			goto error_init;
2265 		}
2266 
2267 		ret = v4l2_device_register_subdev(v4l2_dev, sd);
2268 		if (ret < 0) {
2269 			dev_err(dev, "Failed to register subdev: %d\n", ret);
2270 			goto error_reg_subdev;
2271 		}
2272 
2273 		video_out->ops = &camss_vfe_video_ops;
2274 		video_out->bpl_alignment = 8;
2275 		video_out->line_based = 0;
2276 		if (i == VFE_LINE_PIX) {
2277 			video_out->bpl_alignment = 16;
2278 			video_out->line_based = 1;
2279 		}
2280 		snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d",
2281 			 MSM_VFE_NAME, vfe->id, "video", i);
2282 		ret = msm_video_register(video_out, v4l2_dev, name,
2283 					 i == VFE_LINE_PIX ? 1 : 0);
2284 		if (ret < 0) {
2285 			dev_err(dev, "Failed to register video node: %d\n",
2286 				ret);
2287 			goto error_reg_video;
2288 		}
2289 
2290 		ret = media_create_pad_link(
2291 				&sd->entity, MSM_VFE_PAD_SRC,
2292 				&video_out->vdev.entity, 0,
2293 				MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
2294 		if (ret < 0) {
2295 			dev_err(dev, "Failed to link %s->%s entities: %d\n",
2296 				sd->entity.name, video_out->vdev.entity.name,
2297 				ret);
2298 			goto error_link;
2299 		}
2300 	}
2301 
2302 	return 0;
2303 
2304 error_link:
2305 	msm_video_unregister(video_out);
2306 
2307 error_reg_video:
2308 	v4l2_device_unregister_subdev(sd);
2309 
2310 error_reg_subdev:
2311 	media_entity_cleanup(&sd->entity);
2312 
2313 error_init:
2314 	for (i--; i >= 0; i--) {
2315 		sd = &vfe->line[i].subdev;
2316 		video_out = &vfe->line[i].video_out;
2317 
2318 		msm_video_unregister(video_out);
2319 		v4l2_device_unregister_subdev(sd);
2320 		media_entity_cleanup(&sd->entity);
2321 	}
2322 
2323 	return ret;
2324 }
2325 
2326 /*
2327  * msm_vfe_unregister_entities - Unregister VFE module subdev node
2328  * @vfe: VFE device
2329  */
msm_vfe_unregister_entities(struct vfe_device * vfe)2330 void msm_vfe_unregister_entities(struct vfe_device *vfe)
2331 {
2332 	int i;
2333 
2334 	mutex_destroy(&vfe->power_lock);
2335 	mutex_destroy(&vfe->stream_lock);
2336 
2337 	for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
2338 		struct v4l2_subdev *sd = &vfe->line[i].subdev;
2339 		struct camss_video *video_out = &vfe->line[i].video_out;
2340 
2341 		msm_video_unregister(video_out);
2342 		v4l2_device_unregister_subdev(sd);
2343 		media_entity_cleanup(&sd->entity);
2344 	}
2345 }
2346