1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * TI CAL camera interface driver
4  *
5  * Copyright (c) 2015 Texas Instruments Inc.
6  * Benoit Parrot, <bparrot@ti.com>
7  */
8 
9 #include <linux/interrupt.h>
10 #include <linux/io.h>
11 #include <linux/ioctl.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/slab.h>
17 #include <linux/videodev2.h>
18 #include <linux/of_device.h>
19 #include <linux/of_graph.h>
20 
21 #include <media/v4l2-fwnode.h>
22 #include <media/v4l2-async.h>
23 #include <media/v4l2-common.h>
24 #include <media/v4l2-ctrls.h>
25 #include <media/v4l2-device.h>
26 #include <media/v4l2-event.h>
27 #include <media/v4l2-ioctl.h>
28 #include <media/v4l2-fh.h>
29 #include <media/videobuf2-core.h>
30 #include <media/videobuf2-dma-contig.h>
31 #include "cal_regs.h"
32 
33 #define CAL_MODULE_NAME "cal"
34 
35 #define MAX_WIDTH 1920
36 #define MAX_HEIGHT 1200
37 
38 #define CAL_VERSION "0.1.0"
39 
40 MODULE_DESCRIPTION("TI CAL driver");
41 MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
42 MODULE_LICENSE("GPL v2");
43 MODULE_VERSION(CAL_VERSION);
44 
45 static unsigned video_nr = -1;
46 module_param(video_nr, uint, 0644);
47 MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
48 
49 static unsigned debug;
50 module_param(debug, uint, 0644);
51 MODULE_PARM_DESC(debug, "activates debug info");
52 
53 /* timeperframe: min/max and default */
54 static const struct v4l2_fract
55 	tpf_default = {.numerator = 1001,	.denominator = 30000};
56 
57 #define cal_dbg(level, caldev, fmt, arg...)	\
58 		v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
59 #define cal_info(caldev, fmt, arg...)	\
60 		v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
61 #define cal_err(caldev, fmt, arg...)	\
62 		v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
63 
64 #define ctx_dbg(level, ctx, fmt, arg...)	\
65 		v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
66 #define ctx_info(ctx, fmt, arg...)	\
67 		v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
68 #define ctx_err(ctx, fmt, arg...)	\
69 		v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
70 
71 #define CAL_NUM_INPUT 1
72 #define CAL_NUM_CONTEXT 2
73 
74 #define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
75 
76 #define reg_read(dev, offset) ioread32(dev->base + offset)
77 #define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
78 
79 #define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
80 						    mask)
81 #define reg_write_field(dev, offset, field, mask) { \
82 	u32 val = reg_read(dev, offset); \
83 	set_field(&val, field, mask); \
84 	reg_write(dev, offset, val); }
85 
86 /* ------------------------------------------------------------------
87  *	Basic structures
88  * ------------------------------------------------------------------
89  */
90 
91 struct cal_fmt {
92 	u32	fourcc;
93 	u32	code;
94 	u8	depth;
95 };
96 
97 static struct cal_fmt cal_formats[] = {
98 	{
99 		.fourcc		= V4L2_PIX_FMT_YUYV,
100 		.code		= MEDIA_BUS_FMT_YUYV8_2X8,
101 		.depth		= 16,
102 	}, {
103 		.fourcc		= V4L2_PIX_FMT_UYVY,
104 		.code		= MEDIA_BUS_FMT_UYVY8_2X8,
105 		.depth		= 16,
106 	}, {
107 		.fourcc		= V4L2_PIX_FMT_YVYU,
108 		.code		= MEDIA_BUS_FMT_YVYU8_2X8,
109 		.depth		= 16,
110 	}, {
111 		.fourcc		= V4L2_PIX_FMT_VYUY,
112 		.code		= MEDIA_BUS_FMT_VYUY8_2X8,
113 		.depth		= 16,
114 	}, {
115 		.fourcc		= V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
116 		.code		= MEDIA_BUS_FMT_RGB565_2X8_LE,
117 		.depth		= 16,
118 	}, {
119 		.fourcc		= V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
120 		.code		= MEDIA_BUS_FMT_RGB565_2X8_BE,
121 		.depth		= 16,
122 	}, {
123 		.fourcc		= V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
124 		.code		= MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
125 		.depth		= 16,
126 	}, {
127 		.fourcc		= V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
128 		.code		= MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
129 		.depth		= 16,
130 	}, {
131 		.fourcc		= V4L2_PIX_FMT_RGB24, /* rgb */
132 		.code		= MEDIA_BUS_FMT_RGB888_2X12_LE,
133 		.depth		= 24,
134 	}, {
135 		.fourcc		= V4L2_PIX_FMT_BGR24, /* bgr */
136 		.code		= MEDIA_BUS_FMT_RGB888_2X12_BE,
137 		.depth		= 24,
138 	}, {
139 		.fourcc		= V4L2_PIX_FMT_RGB32, /* argb */
140 		.code		= MEDIA_BUS_FMT_ARGB8888_1X32,
141 		.depth		= 32,
142 	}, {
143 		.fourcc		= V4L2_PIX_FMT_SBGGR8,
144 		.code		= MEDIA_BUS_FMT_SBGGR8_1X8,
145 		.depth		= 8,
146 	}, {
147 		.fourcc		= V4L2_PIX_FMT_SGBRG8,
148 		.code		= MEDIA_BUS_FMT_SGBRG8_1X8,
149 		.depth		= 8,
150 	}, {
151 		.fourcc		= V4L2_PIX_FMT_SGRBG8,
152 		.code		= MEDIA_BUS_FMT_SGRBG8_1X8,
153 		.depth		= 8,
154 	}, {
155 		.fourcc		= V4L2_PIX_FMT_SRGGB8,
156 		.code		= MEDIA_BUS_FMT_SRGGB8_1X8,
157 		.depth		= 8,
158 	}, {
159 		.fourcc		= V4L2_PIX_FMT_SBGGR10,
160 		.code		= MEDIA_BUS_FMT_SBGGR10_1X10,
161 		.depth		= 16,
162 	}, {
163 		.fourcc		= V4L2_PIX_FMT_SGBRG10,
164 		.code		= MEDIA_BUS_FMT_SGBRG10_1X10,
165 		.depth		= 16,
166 	}, {
167 		.fourcc		= V4L2_PIX_FMT_SGRBG10,
168 		.code		= MEDIA_BUS_FMT_SGRBG10_1X10,
169 		.depth		= 16,
170 	}, {
171 		.fourcc		= V4L2_PIX_FMT_SRGGB10,
172 		.code		= MEDIA_BUS_FMT_SRGGB10_1X10,
173 		.depth		= 16,
174 	}, {
175 		.fourcc		= V4L2_PIX_FMT_SBGGR12,
176 		.code		= MEDIA_BUS_FMT_SBGGR12_1X12,
177 		.depth		= 16,
178 	}, {
179 		.fourcc		= V4L2_PIX_FMT_SGBRG12,
180 		.code		= MEDIA_BUS_FMT_SGBRG12_1X12,
181 		.depth		= 16,
182 	}, {
183 		.fourcc		= V4L2_PIX_FMT_SGRBG12,
184 		.code		= MEDIA_BUS_FMT_SGRBG12_1X12,
185 		.depth		= 16,
186 	}, {
187 		.fourcc		= V4L2_PIX_FMT_SRGGB12,
188 		.code		= MEDIA_BUS_FMT_SRGGB12_1X12,
189 		.depth		= 16,
190 	},
191 };
192 
193 /*  Print Four-character-code (FOURCC) */
fourcc_to_str(u32 fmt)194 static char *fourcc_to_str(u32 fmt)
195 {
196 	static char code[5];
197 
198 	code[0] = (unsigned char)(fmt & 0xff);
199 	code[1] = (unsigned char)((fmt >> 8) & 0xff);
200 	code[2] = (unsigned char)((fmt >> 16) & 0xff);
201 	code[3] = (unsigned char)((fmt >> 24) & 0xff);
202 	code[4] = '\0';
203 
204 	return code;
205 }
206 
207 /* buffer for one video frame */
208 struct cal_buffer {
209 	/* common v4l buffer stuff -- must be first */
210 	struct vb2_v4l2_buffer	vb;
211 	struct list_head	list;
212 	const struct cal_fmt	*fmt;
213 };
214 
215 struct cal_dmaqueue {
216 	struct list_head	active;
217 
218 	/* Counters to control fps rate */
219 	int			frame;
220 	int			ini_jiffies;
221 };
222 
223 struct cm_data {
224 	void __iomem		*base;
225 	struct resource		*res;
226 
227 	unsigned int		camerrx_control;
228 
229 	struct platform_device *pdev;
230 };
231 
232 struct cc_data {
233 	void __iomem		*base;
234 	struct resource		*res;
235 
236 	struct platform_device *pdev;
237 };
238 
239 /*
240  * there is one cal_dev structure in the driver, it is shared by
241  * all instances.
242  */
243 struct cal_dev {
244 	int			irq;
245 	void __iomem		*base;
246 	struct resource		*res;
247 	struct platform_device	*pdev;
248 	struct v4l2_device	v4l2_dev;
249 
250 	/* Control Module handle */
251 	struct cm_data		*cm;
252 	/* Camera Core Module handle */
253 	struct cc_data		*cc[CAL_NUM_CSI2_PORTS];
254 
255 	struct cal_ctx		*ctx[CAL_NUM_CONTEXT];
256 };
257 
258 /*
259  * There is one cal_ctx structure for each camera core context.
260  */
261 struct cal_ctx {
262 	struct v4l2_device	v4l2_dev;
263 	struct v4l2_ctrl_handler ctrl_handler;
264 	struct video_device	vdev;
265 	struct v4l2_async_notifier notifier;
266 	struct v4l2_subdev	*sensor;
267 	struct v4l2_fwnode_endpoint	endpoint;
268 
269 	struct v4l2_async_subdev asd;
270 
271 	struct v4l2_fh		fh;
272 	struct cal_dev		*dev;
273 	struct cc_data		*cc;
274 
275 	/* v4l2_ioctl mutex */
276 	struct mutex		mutex;
277 	/* v4l2 buffers lock */
278 	spinlock_t		slock;
279 
280 	/* Several counters */
281 	unsigned long		jiffies;
282 
283 	struct cal_dmaqueue	vidq;
284 
285 	/* Input Number */
286 	int			input;
287 
288 	/* video capture */
289 	const struct cal_fmt	*fmt;
290 	/* Used to store current pixel format */
291 	struct v4l2_format		v_fmt;
292 	/* Used to store current mbus frame format */
293 	struct v4l2_mbus_framefmt	m_fmt;
294 
295 	/* Current subdev enumerated format */
296 	struct cal_fmt		*active_fmt[ARRAY_SIZE(cal_formats)];
297 	int			num_active_fmt;
298 
299 	struct v4l2_fract	timeperframe;
300 	unsigned int		sequence;
301 	unsigned int		external_rate;
302 	struct vb2_queue	vb_vidq;
303 	unsigned int		seq_count;
304 	unsigned int		csi2_port;
305 	unsigned int		virtual_channel;
306 
307 	/* Pointer pointing to current v4l2_buffer */
308 	struct cal_buffer	*cur_frm;
309 	/* Pointer pointing to next v4l2_buffer */
310 	struct cal_buffer	*next_frm;
311 };
312 
find_format_by_pix(struct cal_ctx * ctx,u32 pixelformat)313 static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
314 						u32 pixelformat)
315 {
316 	const struct cal_fmt *fmt;
317 	unsigned int k;
318 
319 	for (k = 0; k < ctx->num_active_fmt; k++) {
320 		fmt = ctx->active_fmt[k];
321 		if (fmt->fourcc == pixelformat)
322 			return fmt;
323 	}
324 
325 	return NULL;
326 }
327 
find_format_by_code(struct cal_ctx * ctx,u32 code)328 static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
329 						 u32 code)
330 {
331 	const struct cal_fmt *fmt;
332 	unsigned int k;
333 
334 	for (k = 0; k < ctx->num_active_fmt; k++) {
335 		fmt = ctx->active_fmt[k];
336 		if (fmt->code == code)
337 			return fmt;
338 	}
339 
340 	return NULL;
341 }
342 
notifier_to_ctx(struct v4l2_async_notifier * n)343 static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
344 {
345 	return container_of(n, struct cal_ctx, notifier);
346 }
347 
get_field(u32 value,u32 mask)348 static inline int get_field(u32 value, u32 mask)
349 {
350 	return (value & mask) >> __ffs(mask);
351 }
352 
set_field(u32 * valp,u32 field,u32 mask)353 static inline void set_field(u32 *valp, u32 field, u32 mask)
354 {
355 	u32 val = *valp;
356 
357 	val &= ~mask;
358 	val |= (field << __ffs(mask)) & mask;
359 	*valp = val;
360 }
361 
362 /*
363  * Control Module block access
364  */
cm_create(struct cal_dev * dev)365 static struct cm_data *cm_create(struct cal_dev *dev)
366 {
367 	struct platform_device *pdev = dev->pdev;
368 	struct cm_data *cm;
369 
370 	cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
371 	if (!cm)
372 		return ERR_PTR(-ENOMEM);
373 
374 	cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
375 						"camerrx_control");
376 	cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
377 	if (IS_ERR(cm->base)) {
378 		cal_err(dev, "failed to ioremap\n");
379 		return ERR_CAST(cm->base);
380 	}
381 
382 	cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
383 		cm->res->name, &cm->res->start, &cm->res->end);
384 
385 	return cm;
386 }
387 
camerarx_phy_enable(struct cal_ctx * ctx)388 static void camerarx_phy_enable(struct cal_ctx *ctx)
389 {
390 	u32 val;
391 
392 	if (!ctx->dev->cm->base) {
393 		ctx_err(ctx, "cm not mapped\n");
394 		return;
395 	}
396 
397 	val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
398 	if (ctx->csi2_port == 1) {
399 		set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
400 		set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
401 		/* enable all lanes by default */
402 		set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
403 		set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
404 	} else if (ctx->csi2_port == 2) {
405 		set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
406 		set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
407 		/* enable all lanes by default */
408 		set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
409 		set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
410 	}
411 	reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
412 }
413 
camerarx_phy_disable(struct cal_ctx * ctx)414 static void camerarx_phy_disable(struct cal_ctx *ctx)
415 {
416 	u32 val;
417 
418 	if (!ctx->dev->cm->base) {
419 		ctx_err(ctx, "cm not mapped\n");
420 		return;
421 	}
422 
423 	val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
424 	if (ctx->csi2_port == 1)
425 		set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
426 	else if (ctx->csi2_port == 2)
427 		set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
428 	reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
429 }
430 
431 /*
432  * Camera Instance access block
433  */
cc_create(struct cal_dev * dev,unsigned int core)434 static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
435 {
436 	struct platform_device *pdev = dev->pdev;
437 	struct cc_data *cc;
438 
439 	cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
440 	if (!cc)
441 		return ERR_PTR(-ENOMEM);
442 
443 	cc->res = platform_get_resource_byname(pdev,
444 					       IORESOURCE_MEM,
445 					       (core == 0) ?
446 						"cal_rx_core0" :
447 						"cal_rx_core1");
448 	cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
449 	if (IS_ERR(cc->base)) {
450 		cal_err(dev, "failed to ioremap\n");
451 		return ERR_CAST(cc->base);
452 	}
453 
454 	cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
455 		cc->res->name, &cc->res->start, &cc->res->end);
456 
457 	return cc;
458 }
459 
460 /*
461  * Get Revision and HW info
462  */
cal_get_hwinfo(struct cal_dev * dev)463 static void cal_get_hwinfo(struct cal_dev *dev)
464 {
465 	u32 revision = 0;
466 	u32 hwinfo = 0;
467 
468 	revision = reg_read(dev, CAL_HL_REVISION);
469 	cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
470 		revision);
471 
472 	hwinfo = reg_read(dev, CAL_HL_HWINFO);
473 	cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
474 		hwinfo);
475 }
476 
cal_runtime_get(struct cal_dev * dev)477 static inline int cal_runtime_get(struct cal_dev *dev)
478 {
479 	return pm_runtime_get_sync(&dev->pdev->dev);
480 }
481 
cal_runtime_put(struct cal_dev * dev)482 static inline void cal_runtime_put(struct cal_dev *dev)
483 {
484 	pm_runtime_put_sync(&dev->pdev->dev);
485 }
486 
cal_quickdump_regs(struct cal_dev * dev)487 static void cal_quickdump_regs(struct cal_dev *dev)
488 {
489 	cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
490 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
491 		       (__force const void *)dev->base,
492 		       resource_size(dev->res), false);
493 
494 	if (dev->ctx[0]) {
495 		cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
496 			 &dev->ctx[0]->cc->res->start);
497 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
498 			       (__force const void *)dev->ctx[0]->cc->base,
499 			       resource_size(dev->ctx[0]->cc->res),
500 			       false);
501 	}
502 
503 	if (dev->ctx[1]) {
504 		cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
505 			 &dev->ctx[1]->cc->res->start);
506 		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
507 			       (__force const void *)dev->ctx[1]->cc->base,
508 			       resource_size(dev->ctx[1]->cc->res),
509 			       false);
510 	}
511 
512 	cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
513 		 &dev->cm->res->start);
514 	print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
515 		       (__force const void *)dev->cm->base,
516 		       resource_size(dev->cm->res), false);
517 }
518 
519 /*
520  * Enable the expected IRQ sources
521  */
enable_irqs(struct cal_ctx * ctx)522 static void enable_irqs(struct cal_ctx *ctx)
523 {
524 	/* Enable IRQ_WDMA_END 0/1 */
525 	reg_write_field(ctx->dev,
526 			CAL_HL_IRQENABLE_SET(2),
527 			CAL_HL_IRQ_ENABLE,
528 			CAL_HL_IRQ_MASK(ctx->csi2_port));
529 	/* Enable IRQ_WDMA_START 0/1 */
530 	reg_write_field(ctx->dev,
531 			CAL_HL_IRQENABLE_SET(3),
532 			CAL_HL_IRQ_ENABLE,
533 			CAL_HL_IRQ_MASK(ctx->csi2_port));
534 	/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
535 	reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
536 }
537 
disable_irqs(struct cal_ctx * ctx)538 static void disable_irqs(struct cal_ctx *ctx)
539 {
540 	/* Disable IRQ_WDMA_END 0/1 */
541 	reg_write_field(ctx->dev,
542 			CAL_HL_IRQENABLE_CLR(2),
543 			CAL_HL_IRQ_CLEAR,
544 			CAL_HL_IRQ_MASK(ctx->csi2_port));
545 	/* Disable IRQ_WDMA_START 0/1 */
546 	reg_write_field(ctx->dev,
547 			CAL_HL_IRQENABLE_CLR(3),
548 			CAL_HL_IRQ_CLEAR,
549 			CAL_HL_IRQ_MASK(ctx->csi2_port));
550 	/* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
551 	reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
552 }
553 
csi2_init(struct cal_ctx * ctx)554 static void csi2_init(struct cal_ctx *ctx)
555 {
556 	int i;
557 	u32 val;
558 
559 	val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
560 	set_field(&val, CAL_GEN_ENABLE,
561 		  CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
562 	set_field(&val, CAL_GEN_ENABLE,
563 		  CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
564 	set_field(&val, CAL_GEN_DISABLE,
565 		  CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
566 	set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
567 	reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
568 	ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
569 		reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
570 
571 	val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
572 	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
573 		  CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
574 	set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
575 		  CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
576 	reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
577 	for (i = 0; i < 10; i++) {
578 		if (reg_read_field(ctx->dev,
579 				   CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
580 				   CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
581 		    CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
582 			break;
583 		usleep_range(1000, 1100);
584 	}
585 	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
586 		reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
587 
588 	val = reg_read(ctx->dev, CAL_CTRL);
589 	set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
590 	set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
591 	set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
592 		  CAL_CTRL_POSTED_WRITES_MASK);
593 	set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
594 	set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
595 	reg_write(ctx->dev, CAL_CTRL, val);
596 	ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
597 }
598 
csi2_lane_config(struct cal_ctx * ctx)599 static void csi2_lane_config(struct cal_ctx *ctx)
600 {
601 	u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
602 	u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
603 	u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
604 	struct v4l2_fwnode_bus_mipi_csi2 *mipi_csi2 =
605 		&ctx->endpoint.bus.mipi_csi2;
606 	int lane;
607 
608 	set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
609 	set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
610 	for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
611 		/*
612 		 * Every lane are one nibble apart starting with the
613 		 * clock followed by the data lanes so shift masks by 4.
614 		 */
615 		lane_mask <<= 4;
616 		polarity_mask <<= 4;
617 		set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
618 		set_field(&val, mipi_csi2->lane_polarities[lane + 1],
619 			  polarity_mask);
620 	}
621 
622 	reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
623 	ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
624 		ctx->csi2_port, val);
625 }
626 
csi2_ppi_enable(struct cal_ctx * ctx)627 static void csi2_ppi_enable(struct cal_ctx *ctx)
628 {
629 	reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
630 			CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
631 }
632 
csi2_ppi_disable(struct cal_ctx * ctx)633 static void csi2_ppi_disable(struct cal_ctx *ctx)
634 {
635 	reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
636 			CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
637 }
638 
csi2_ctx_config(struct cal_ctx * ctx)639 static void csi2_ctx_config(struct cal_ctx *ctx)
640 {
641 	u32 val;
642 
643 	val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
644 	set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
645 	/*
646 	 * DT type: MIPI CSI-2 Specs
647 	 *   0x1: All - DT filter is disabled
648 	 *  0x24: RGB888 1 pixel  = 3 bytes
649 	 *  0x2B: RAW10  4 pixels = 5 bytes
650 	 *  0x2A: RAW8   1 pixel  = 1 byte
651 	 *  0x1E: YUV422 2 pixels = 4 bytes
652 	 */
653 	set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
654 	/* Virtual Channel from the CSI2 sensor usually 0! */
655 	set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
656 	/* NUM_LINES_PER_FRAME => 0 means auto detect */
657 	set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
658 	set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
659 	set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
660 		  CAL_CSI2_CTX_PACK_MODE_MASK);
661 	reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
662 	ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
663 		reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
664 }
665 
pix_proc_config(struct cal_ctx * ctx)666 static void pix_proc_config(struct cal_ctx *ctx)
667 {
668 	u32 val;
669 
670 	val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
671 	set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
672 	set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
673 	set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
674 	set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
675 	set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
676 	set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
677 	reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
678 	ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
679 		reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
680 }
681 
cal_wr_dma_config(struct cal_ctx * ctx,unsigned int width)682 static void cal_wr_dma_config(struct cal_ctx *ctx,
683 			      unsigned int width)
684 {
685 	u32 val;
686 
687 	val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
688 	set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
689 	set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
690 		  CAL_WR_DMA_CTRL_DTAG_MASK);
691 	set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
692 		  CAL_WR_DMA_CTRL_MODE_MASK);
693 	set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
694 		  CAL_WR_DMA_CTRL_PATTERN_MASK);
695 	set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
696 	reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
697 	ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
698 		reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
699 
700 	/*
701 	 * width/16 not sure but giving it a whirl.
702 	 * zero does not work right
703 	 */
704 	reg_write_field(ctx->dev,
705 			CAL_WR_DMA_OFST(ctx->csi2_port),
706 			(width / 16),
707 			CAL_WR_DMA_OFST_MASK);
708 	ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
709 		reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
710 
711 	val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
712 	/* 64 bit word means no skipping */
713 	set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
714 	/*
715 	 * (width*8)/64 this should be size of an entire line
716 	 * in 64bit word but 0 means all data until the end
717 	 * is detected automagically
718 	 */
719 	set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
720 	reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
721 	ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
722 		reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
723 }
724 
cal_wr_dma_addr(struct cal_ctx * ctx,unsigned int dmaaddr)725 static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
726 {
727 	reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
728 }
729 
730 /*
731  * TCLK values are OK at their reset values
732  */
733 #define TCLK_TERM	0
734 #define TCLK_MISS	1
735 #define TCLK_SETTLE	14
736 #define THS_SETTLE	15
737 
csi2_phy_config(struct cal_ctx * ctx)738 static void csi2_phy_config(struct cal_ctx *ctx)
739 {
740 	unsigned int reg0, reg1;
741 	unsigned int ths_term, ths_settle;
742 	unsigned int ddrclkperiod_us;
743 
744 	/*
745 	 * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
746 	 */
747 	ddrclkperiod_us = ctx->external_rate / 2000000;
748 	ddrclkperiod_us = 1000000 / ddrclkperiod_us;
749 	ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
750 
751 	ths_term = 20000 / ddrclkperiod_us;
752 	ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
753 	ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
754 
755 	/*
756 	 * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
757 	 *	Since CtrlClk is fixed at 96Mhz then we get
758 	 *	ths_settle = floor(176.3 / 10.416) - 1 = 15
759 	 * If we ever switch to a dynamic clock then this code might be useful
760 	 *
761 	 * unsigned int ctrlclkperiod_us;
762 	 * ctrlclkperiod_us = 96000000 / 1000000;
763 	 * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
764 	 * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
765 
766 	 * ths_settle = 176300  / ctrlclkperiod_us;
767 	 * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
768 	 */
769 
770 	ths_settle = THS_SETTLE;
771 	ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
772 
773 	reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
774 	set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
775 		  CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
776 	set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
777 	set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
778 
779 	ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
780 	reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
781 
782 	reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
783 	set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
784 	set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
785 	set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
786 	set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
787 
788 	ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
789 	reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
790 }
791 
cal_get_external_info(struct cal_ctx * ctx)792 static int cal_get_external_info(struct cal_ctx *ctx)
793 {
794 	struct v4l2_ctrl *ctrl;
795 
796 	if (!ctx->sensor)
797 		return -ENODEV;
798 
799 	ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
800 	if (!ctrl) {
801 		ctx_err(ctx, "no pixel rate control in subdev: %s\n",
802 			ctx->sensor->name);
803 		return -EPIPE;
804 	}
805 
806 	ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
807 	ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
808 
809 	return 0;
810 }
811 
cal_schedule_next_buffer(struct cal_ctx * ctx)812 static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
813 {
814 	struct cal_dmaqueue *dma_q = &ctx->vidq;
815 	struct cal_buffer *buf;
816 	unsigned long addr;
817 
818 	buf = list_entry(dma_q->active.next, struct cal_buffer, list);
819 	ctx->next_frm = buf;
820 	list_del(&buf->list);
821 
822 	addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
823 	cal_wr_dma_addr(ctx, addr);
824 }
825 
cal_process_buffer_complete(struct cal_ctx * ctx)826 static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
827 {
828 	ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
829 	ctx->cur_frm->vb.field = ctx->m_fmt.field;
830 	ctx->cur_frm->vb.sequence = ctx->sequence++;
831 
832 	vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
833 	ctx->cur_frm = ctx->next_frm;
834 }
835 
836 #define isvcirqset(irq, vc, ff) (irq & \
837 	(CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
838 
839 #define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
840 
cal_irq(int irq_cal,void * data)841 static irqreturn_t cal_irq(int irq_cal, void *data)
842 {
843 	struct cal_dev *dev = (struct cal_dev *)data;
844 	struct cal_ctx *ctx;
845 	struct cal_dmaqueue *dma_q;
846 	u32 irqst2, irqst3;
847 
848 	/* Check which DMA just finished */
849 	irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
850 	if (irqst2) {
851 		/* Clear Interrupt status */
852 		reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
853 
854 		/* Need to check both port */
855 		if (isportirqset(irqst2, 1)) {
856 			ctx = dev->ctx[0];
857 
858 			if (ctx->cur_frm != ctx->next_frm)
859 				cal_process_buffer_complete(ctx);
860 		}
861 
862 		if (isportirqset(irqst2, 2)) {
863 			ctx = dev->ctx[1];
864 
865 			if (ctx->cur_frm != ctx->next_frm)
866 				cal_process_buffer_complete(ctx);
867 		}
868 	}
869 
870 	/* Check which DMA just started */
871 	irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
872 	if (irqst3) {
873 		/* Clear Interrupt status */
874 		reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
875 
876 		/* Need to check both port */
877 		if (isportirqset(irqst3, 1)) {
878 			ctx = dev->ctx[0];
879 			dma_q = &ctx->vidq;
880 
881 			spin_lock(&ctx->slock);
882 			if (!list_empty(&dma_q->active) &&
883 			    ctx->cur_frm == ctx->next_frm)
884 				cal_schedule_next_buffer(ctx);
885 			spin_unlock(&ctx->slock);
886 		}
887 
888 		if (isportirqset(irqst3, 2)) {
889 			ctx = dev->ctx[1];
890 			dma_q = &ctx->vidq;
891 
892 			spin_lock(&ctx->slock);
893 			if (!list_empty(&dma_q->active) &&
894 			    ctx->cur_frm == ctx->next_frm)
895 				cal_schedule_next_buffer(ctx);
896 			spin_unlock(&ctx->slock);
897 		}
898 	}
899 
900 	return IRQ_HANDLED;
901 }
902 
903 /*
904  * video ioctls
905  */
cal_querycap(struct file * file,void * priv,struct v4l2_capability * cap)906 static int cal_querycap(struct file *file, void *priv,
907 			struct v4l2_capability *cap)
908 {
909 	struct cal_ctx *ctx = video_drvdata(file);
910 
911 	strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
912 	strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
913 
914 	snprintf(cap->bus_info, sizeof(cap->bus_info),
915 		 "platform:%s", ctx->v4l2_dev.name);
916 	return 0;
917 }
918 
cal_enum_fmt_vid_cap(struct file * file,void * priv,struct v4l2_fmtdesc * f)919 static int cal_enum_fmt_vid_cap(struct file *file, void  *priv,
920 				struct v4l2_fmtdesc *f)
921 {
922 	struct cal_ctx *ctx = video_drvdata(file);
923 	const struct cal_fmt *fmt = NULL;
924 
925 	if (f->index >= ctx->num_active_fmt)
926 		return -EINVAL;
927 
928 	fmt = ctx->active_fmt[f->index];
929 
930 	f->pixelformat = fmt->fourcc;
931 	f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
932 	return 0;
933 }
934 
__subdev_get_format(struct cal_ctx * ctx,struct v4l2_mbus_framefmt * fmt)935 static int __subdev_get_format(struct cal_ctx *ctx,
936 			       struct v4l2_mbus_framefmt *fmt)
937 {
938 	struct v4l2_subdev_format sd_fmt;
939 	struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
940 	int ret;
941 
942 	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
943 	sd_fmt.pad = 0;
944 
945 	ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
946 	if (ret)
947 		return ret;
948 
949 	*fmt = *mbus_fmt;
950 
951 	ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
952 		fmt->width, fmt->height, fmt->code);
953 
954 	return 0;
955 }
956 
__subdev_set_format(struct cal_ctx * ctx,struct v4l2_mbus_framefmt * fmt)957 static int __subdev_set_format(struct cal_ctx *ctx,
958 			       struct v4l2_mbus_framefmt *fmt)
959 {
960 	struct v4l2_subdev_format sd_fmt;
961 	struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
962 	int ret;
963 
964 	sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
965 	sd_fmt.pad = 0;
966 	*mbus_fmt = *fmt;
967 
968 	ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
969 	if (ret)
970 		return ret;
971 
972 	ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
973 		fmt->width, fmt->height, fmt->code);
974 
975 	return 0;
976 }
977 
cal_calc_format_size(struct cal_ctx * ctx,const struct cal_fmt * fmt,struct v4l2_format * f)978 static int cal_calc_format_size(struct cal_ctx *ctx,
979 				const struct cal_fmt *fmt,
980 				struct v4l2_format *f)
981 {
982 	if (!fmt) {
983 		ctx_dbg(3, ctx, "No cal_fmt provided!\n");
984 		return -EINVAL;
985 	}
986 
987 	v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
988 			      &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
989 	f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
990 						 fmt->depth >> 3);
991 	f->fmt.pix.sizeimage = f->fmt.pix.height *
992 			       f->fmt.pix.bytesperline;
993 
994 	ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
995 		__func__, fourcc_to_str(f->fmt.pix.pixelformat),
996 		f->fmt.pix.width, f->fmt.pix.height,
997 		f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
998 
999 	return 0;
1000 }
1001 
cal_g_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1002 static int cal_g_fmt_vid_cap(struct file *file, void *priv,
1003 			     struct v4l2_format *f)
1004 {
1005 	struct cal_ctx *ctx = video_drvdata(file);
1006 
1007 	*f = ctx->v_fmt;
1008 
1009 	return 0;
1010 }
1011 
cal_try_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1012 static int cal_try_fmt_vid_cap(struct file *file, void *priv,
1013 			       struct v4l2_format *f)
1014 {
1015 	struct cal_ctx *ctx = video_drvdata(file);
1016 	const struct cal_fmt *fmt;
1017 	struct v4l2_subdev_frame_size_enum fse;
1018 	int ret, found;
1019 
1020 	fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1021 	if (!fmt) {
1022 		ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
1023 			f->fmt.pix.pixelformat);
1024 
1025 		/* Just get the first one enumerated */
1026 		fmt = ctx->active_fmt[0];
1027 		f->fmt.pix.pixelformat = fmt->fourcc;
1028 	}
1029 
1030 	f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
1031 
1032 	/* check for/find a valid width/height */
1033 	ret = 0;
1034 	found = false;
1035 	fse.pad = 0;
1036 	fse.code = fmt->code;
1037 	fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1038 	for (fse.index = 0; ; fse.index++) {
1039 		ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
1040 				       NULL, &fse);
1041 		if (ret)
1042 			break;
1043 
1044 		if ((f->fmt.pix.width == fse.max_width) &&
1045 		    (f->fmt.pix.height == fse.max_height)) {
1046 			found = true;
1047 			break;
1048 		} else if ((f->fmt.pix.width >= fse.min_width) &&
1049 			 (f->fmt.pix.width <= fse.max_width) &&
1050 			 (f->fmt.pix.height >= fse.min_height) &&
1051 			 (f->fmt.pix.height <= fse.max_height)) {
1052 			found = true;
1053 			break;
1054 		}
1055 	}
1056 
1057 	if (!found) {
1058 		/* use existing values as default */
1059 		f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
1060 		f->fmt.pix.height =  ctx->v_fmt.fmt.pix.height;
1061 	}
1062 
1063 	/*
1064 	 * Use current colorspace for now, it will get
1065 	 * updated properly during s_fmt
1066 	 */
1067 	f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
1068 	return cal_calc_format_size(ctx, fmt, f);
1069 }
1070 
cal_s_fmt_vid_cap(struct file * file,void * priv,struct v4l2_format * f)1071 static int cal_s_fmt_vid_cap(struct file *file, void *priv,
1072 			     struct v4l2_format *f)
1073 {
1074 	struct cal_ctx *ctx = video_drvdata(file);
1075 	struct vb2_queue *q = &ctx->vb_vidq;
1076 	const struct cal_fmt *fmt;
1077 	struct v4l2_mbus_framefmt mbus_fmt;
1078 	int ret;
1079 
1080 	if (vb2_is_busy(q)) {
1081 		ctx_dbg(3, ctx, "%s device busy\n", __func__);
1082 		return -EBUSY;
1083 	}
1084 
1085 	ret = cal_try_fmt_vid_cap(file, priv, f);
1086 	if (ret < 0)
1087 		return ret;
1088 
1089 	fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
1090 
1091 	v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
1092 
1093 	ret = __subdev_set_format(ctx, &mbus_fmt);
1094 	if (ret)
1095 		return ret;
1096 
1097 	/* Just double check nothing has gone wrong */
1098 	if (mbus_fmt.code != fmt->code) {
1099 		ctx_dbg(3, ctx,
1100 			"%s subdev changed format on us, this should not happen\n",
1101 			__func__);
1102 		return -EINVAL;
1103 	}
1104 
1105 	v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1106 	ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1107 	ctx->v_fmt.fmt.pix.pixelformat  = fmt->fourcc;
1108 	cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1109 	ctx->fmt = fmt;
1110 	ctx->m_fmt = mbus_fmt;
1111 	*f = ctx->v_fmt;
1112 
1113 	return 0;
1114 }
1115 
cal_enum_framesizes(struct file * file,void * fh,struct v4l2_frmsizeenum * fsize)1116 static int cal_enum_framesizes(struct file *file, void *fh,
1117 			       struct v4l2_frmsizeenum *fsize)
1118 {
1119 	struct cal_ctx *ctx = video_drvdata(file);
1120 	const struct cal_fmt *fmt;
1121 	struct v4l2_subdev_frame_size_enum fse;
1122 	int ret;
1123 
1124 	/* check for valid format */
1125 	fmt = find_format_by_pix(ctx, fsize->pixel_format);
1126 	if (!fmt) {
1127 		ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
1128 			fsize->pixel_format);
1129 		return -EINVAL;
1130 	}
1131 
1132 	fse.index = fsize->index;
1133 	fse.pad = 0;
1134 	fse.code = fmt->code;
1135 
1136 	ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
1137 	if (ret)
1138 		return ret;
1139 
1140 	ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
1141 		__func__, fse.index, fse.code, fse.min_width, fse.max_width,
1142 		fse.min_height, fse.max_height);
1143 
1144 	fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
1145 	fsize->discrete.width = fse.max_width;
1146 	fsize->discrete.height = fse.max_height;
1147 
1148 	return 0;
1149 }
1150 
cal_enum_input(struct file * file,void * priv,struct v4l2_input * inp)1151 static int cal_enum_input(struct file *file, void *priv,
1152 			  struct v4l2_input *inp)
1153 {
1154 	if (inp->index >= CAL_NUM_INPUT)
1155 		return -EINVAL;
1156 
1157 	inp->type = V4L2_INPUT_TYPE_CAMERA;
1158 	sprintf(inp->name, "Camera %u", inp->index);
1159 	return 0;
1160 }
1161 
cal_g_input(struct file * file,void * priv,unsigned int * i)1162 static int cal_g_input(struct file *file, void *priv, unsigned int *i)
1163 {
1164 	struct cal_ctx *ctx = video_drvdata(file);
1165 
1166 	*i = ctx->input;
1167 	return 0;
1168 }
1169 
cal_s_input(struct file * file,void * priv,unsigned int i)1170 static int cal_s_input(struct file *file, void *priv, unsigned int i)
1171 {
1172 	struct cal_ctx *ctx = video_drvdata(file);
1173 
1174 	if (i >= CAL_NUM_INPUT)
1175 		return -EINVAL;
1176 
1177 	ctx->input = i;
1178 	return 0;
1179 }
1180 
1181 /* timeperframe is arbitrary and continuous */
cal_enum_frameintervals(struct file * file,void * priv,struct v4l2_frmivalenum * fival)1182 static int cal_enum_frameintervals(struct file *file, void *priv,
1183 				   struct v4l2_frmivalenum *fival)
1184 {
1185 	struct cal_ctx *ctx = video_drvdata(file);
1186 	const struct cal_fmt *fmt;
1187 	struct v4l2_subdev_frame_interval_enum fie = {
1188 		.index = fival->index,
1189 		.width = fival->width,
1190 		.height = fival->height,
1191 		.which = V4L2_SUBDEV_FORMAT_ACTIVE,
1192 	};
1193 	int ret;
1194 
1195 	fmt = find_format_by_pix(ctx, fival->pixel_format);
1196 	if (!fmt)
1197 		return -EINVAL;
1198 
1199 	fie.code = fmt->code;
1200 	ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_interval,
1201 			       NULL, &fie);
1202 	if (ret)
1203 		return ret;
1204 	fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
1205 	fival->discrete = fie.interval;
1206 
1207 	return 0;
1208 }
1209 
1210 /*
1211  * Videobuf operations
1212  */
cal_queue_setup(struct vb2_queue * vq,unsigned int * nbuffers,unsigned int * nplanes,unsigned int sizes[],struct device * alloc_devs[])1213 static int cal_queue_setup(struct vb2_queue *vq,
1214 			   unsigned int *nbuffers, unsigned int *nplanes,
1215 			   unsigned int sizes[], struct device *alloc_devs[])
1216 {
1217 	struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1218 	unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
1219 
1220 	if (vq->num_buffers + *nbuffers < 3)
1221 		*nbuffers = 3 - vq->num_buffers;
1222 
1223 	if (*nplanes) {
1224 		if (sizes[0] < size)
1225 			return -EINVAL;
1226 		size = sizes[0];
1227 	}
1228 
1229 	*nplanes = 1;
1230 	sizes[0] = size;
1231 
1232 	ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
1233 
1234 	return 0;
1235 }
1236 
cal_buffer_prepare(struct vb2_buffer * vb)1237 static int cal_buffer_prepare(struct vb2_buffer *vb)
1238 {
1239 	struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1240 	struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1241 					      vb.vb2_buf);
1242 	unsigned long size;
1243 
1244 	if (WARN_ON(!ctx->fmt))
1245 		return -EINVAL;
1246 
1247 	size = ctx->v_fmt.fmt.pix.sizeimage;
1248 	if (vb2_plane_size(vb, 0) < size) {
1249 		ctx_err(ctx,
1250 			"data will not fit into plane (%lu < %lu)\n",
1251 			vb2_plane_size(vb, 0), size);
1252 		return -EINVAL;
1253 	}
1254 
1255 	vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1256 	return 0;
1257 }
1258 
cal_buffer_queue(struct vb2_buffer * vb)1259 static void cal_buffer_queue(struct vb2_buffer *vb)
1260 {
1261 	struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1262 	struct cal_buffer *buf = container_of(vb, struct cal_buffer,
1263 					      vb.vb2_buf);
1264 	struct cal_dmaqueue *vidq = &ctx->vidq;
1265 	unsigned long flags = 0;
1266 
1267 	/* recheck locking */
1268 	spin_lock_irqsave(&ctx->slock, flags);
1269 	list_add_tail(&buf->list, &vidq->active);
1270 	spin_unlock_irqrestore(&ctx->slock, flags);
1271 }
1272 
cal_start_streaming(struct vb2_queue * vq,unsigned int count)1273 static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
1274 {
1275 	struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1276 	struct cal_dmaqueue *dma_q = &ctx->vidq;
1277 	struct cal_buffer *buf, *tmp;
1278 	unsigned long addr = 0;
1279 	unsigned long flags;
1280 	int ret;
1281 
1282 	spin_lock_irqsave(&ctx->slock, flags);
1283 	if (list_empty(&dma_q->active)) {
1284 		spin_unlock_irqrestore(&ctx->slock, flags);
1285 		ctx_dbg(3, ctx, "buffer queue is empty\n");
1286 		return -EIO;
1287 	}
1288 
1289 	buf = list_entry(dma_q->active.next, struct cal_buffer, list);
1290 	ctx->cur_frm = buf;
1291 	ctx->next_frm = buf;
1292 	list_del(&buf->list);
1293 	spin_unlock_irqrestore(&ctx->slock, flags);
1294 
1295 	addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
1296 	ctx->sequence = 0;
1297 
1298 	ret = cal_get_external_info(ctx);
1299 	if (ret < 0)
1300 		goto err;
1301 
1302 	cal_runtime_get(ctx->dev);
1303 
1304 	enable_irqs(ctx);
1305 	camerarx_phy_enable(ctx);
1306 	csi2_init(ctx);
1307 	csi2_phy_config(ctx);
1308 	csi2_lane_config(ctx);
1309 	csi2_ctx_config(ctx);
1310 	pix_proc_config(ctx);
1311 	cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
1312 	cal_wr_dma_addr(ctx, addr);
1313 	csi2_ppi_enable(ctx);
1314 
1315 	ret = v4l2_subdev_call(ctx->sensor, video, s_stream, 1);
1316 	if (ret) {
1317 		ctx_err(ctx, "stream on failed in subdev\n");
1318 		cal_runtime_put(ctx->dev);
1319 		goto err;
1320 	}
1321 
1322 	if (debug >= 4)
1323 		cal_quickdump_regs(ctx->dev);
1324 
1325 	return 0;
1326 
1327 err:
1328 	list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1329 		list_del(&buf->list);
1330 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
1331 	}
1332 	return ret;
1333 }
1334 
cal_stop_streaming(struct vb2_queue * vq)1335 static void cal_stop_streaming(struct vb2_queue *vq)
1336 {
1337 	struct cal_ctx *ctx = vb2_get_drv_priv(vq);
1338 	struct cal_dmaqueue *dma_q = &ctx->vidq;
1339 	struct cal_buffer *buf, *tmp;
1340 	unsigned long flags;
1341 
1342 	if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
1343 		ctx_err(ctx, "stream off failed in subdev\n");
1344 
1345 	csi2_ppi_disable(ctx);
1346 	disable_irqs(ctx);
1347 
1348 	/* Release all active buffers */
1349 	spin_lock_irqsave(&ctx->slock, flags);
1350 	list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
1351 		list_del(&buf->list);
1352 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1353 	}
1354 
1355 	if (ctx->cur_frm == ctx->next_frm) {
1356 		vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1357 	} else {
1358 		vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1359 		vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
1360 				VB2_BUF_STATE_ERROR);
1361 	}
1362 	ctx->cur_frm = NULL;
1363 	ctx->next_frm = NULL;
1364 	spin_unlock_irqrestore(&ctx->slock, flags);
1365 
1366 	cal_runtime_put(ctx->dev);
1367 }
1368 
1369 static const struct vb2_ops cal_video_qops = {
1370 	.queue_setup		= cal_queue_setup,
1371 	.buf_prepare		= cal_buffer_prepare,
1372 	.buf_queue		= cal_buffer_queue,
1373 	.start_streaming	= cal_start_streaming,
1374 	.stop_streaming		= cal_stop_streaming,
1375 	.wait_prepare		= vb2_ops_wait_prepare,
1376 	.wait_finish		= vb2_ops_wait_finish,
1377 };
1378 
1379 static const struct v4l2_file_operations cal_fops = {
1380 	.owner		= THIS_MODULE,
1381 	.open           = v4l2_fh_open,
1382 	.release        = vb2_fop_release,
1383 	.read           = vb2_fop_read,
1384 	.poll		= vb2_fop_poll,
1385 	.unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
1386 	.mmap           = vb2_fop_mmap,
1387 };
1388 
1389 static const struct v4l2_ioctl_ops cal_ioctl_ops = {
1390 	.vidioc_querycap      = cal_querycap,
1391 	.vidioc_enum_fmt_vid_cap  = cal_enum_fmt_vid_cap,
1392 	.vidioc_g_fmt_vid_cap     = cal_g_fmt_vid_cap,
1393 	.vidioc_try_fmt_vid_cap   = cal_try_fmt_vid_cap,
1394 	.vidioc_s_fmt_vid_cap     = cal_s_fmt_vid_cap,
1395 	.vidioc_enum_framesizes   = cal_enum_framesizes,
1396 	.vidioc_reqbufs       = vb2_ioctl_reqbufs,
1397 	.vidioc_create_bufs   = vb2_ioctl_create_bufs,
1398 	.vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
1399 	.vidioc_querybuf      = vb2_ioctl_querybuf,
1400 	.vidioc_qbuf          = vb2_ioctl_qbuf,
1401 	.vidioc_dqbuf         = vb2_ioctl_dqbuf,
1402 	.vidioc_enum_input    = cal_enum_input,
1403 	.vidioc_g_input       = cal_g_input,
1404 	.vidioc_s_input       = cal_s_input,
1405 	.vidioc_enum_frameintervals = cal_enum_frameintervals,
1406 	.vidioc_streamon      = vb2_ioctl_streamon,
1407 	.vidioc_streamoff     = vb2_ioctl_streamoff,
1408 	.vidioc_log_status    = v4l2_ctrl_log_status,
1409 	.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
1410 	.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
1411 };
1412 
1413 static const struct video_device cal_videodev = {
1414 	.name		= CAL_MODULE_NAME,
1415 	.fops		= &cal_fops,
1416 	.ioctl_ops	= &cal_ioctl_ops,
1417 	.minor		= -1,
1418 	.release	= video_device_release_empty,
1419 	.device_caps	= V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
1420 			  V4L2_CAP_READWRITE,
1421 };
1422 
1423 /* -----------------------------------------------------------------
1424  *	Initialization and module stuff
1425  * ------------------------------------------------------------------
1426  */
1427 static int cal_complete_ctx(struct cal_ctx *ctx);
1428 
cal_async_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * subdev,struct v4l2_async_subdev * asd)1429 static int cal_async_bound(struct v4l2_async_notifier *notifier,
1430 			   struct v4l2_subdev *subdev,
1431 			   struct v4l2_async_subdev *asd)
1432 {
1433 	struct cal_ctx *ctx = notifier_to_ctx(notifier);
1434 	struct v4l2_subdev_mbus_code_enum mbus_code;
1435 	int ret = 0;
1436 	int i, j, k;
1437 
1438 	if (ctx->sensor) {
1439 		ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
1440 			 subdev->name);
1441 		return 0;
1442 	}
1443 
1444 	ctx->sensor = subdev;
1445 	ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
1446 
1447 	/* Enumerate sub device formats and enable all matching local formats */
1448 	ctx->num_active_fmt = 0;
1449 	for (j = 0, i = 0; ret != -EINVAL; ++j) {
1450 		struct cal_fmt *fmt;
1451 
1452 		memset(&mbus_code, 0, sizeof(mbus_code));
1453 		mbus_code.index = j;
1454 		ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
1455 				       NULL, &mbus_code);
1456 		if (ret)
1457 			continue;
1458 
1459 		ctx_dbg(2, ctx,
1460 			"subdev %s: code: %04x idx: %d\n",
1461 			subdev->name, mbus_code.code, j);
1462 
1463 		for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
1464 			fmt = &cal_formats[k];
1465 
1466 			if (mbus_code.code == fmt->code) {
1467 				ctx->active_fmt[i] = fmt;
1468 				ctx_dbg(2, ctx,
1469 					"matched fourcc: %s: code: %04x idx: %d\n",
1470 					fourcc_to_str(fmt->fourcc),
1471 					fmt->code, i);
1472 				ctx->num_active_fmt = ++i;
1473 			}
1474 		}
1475 	}
1476 
1477 	if (i == 0) {
1478 		ctx_err(ctx, "No suitable format reported by subdev %s\n",
1479 			subdev->name);
1480 		return -EINVAL;
1481 	}
1482 
1483 	cal_complete_ctx(ctx);
1484 
1485 	return 0;
1486 }
1487 
cal_async_complete(struct v4l2_async_notifier * notifier)1488 static int cal_async_complete(struct v4l2_async_notifier *notifier)
1489 {
1490 	struct cal_ctx *ctx = notifier_to_ctx(notifier);
1491 	const struct cal_fmt *fmt;
1492 	struct v4l2_mbus_framefmt mbus_fmt;
1493 	int ret;
1494 
1495 	ret = __subdev_get_format(ctx, &mbus_fmt);
1496 	if (ret)
1497 		return ret;
1498 
1499 	fmt = find_format_by_code(ctx, mbus_fmt.code);
1500 	if (!fmt) {
1501 		ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
1502 			mbus_fmt.code);
1503 		return -EINVAL;
1504 	}
1505 
1506 	/* Save current subdev format */
1507 	v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
1508 	ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1509 	ctx->v_fmt.fmt.pix.pixelformat  = fmt->fourcc;
1510 	cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
1511 	ctx->fmt = fmt;
1512 	ctx->m_fmt = mbus_fmt;
1513 
1514 	return 0;
1515 }
1516 
1517 static const struct v4l2_async_notifier_operations cal_async_ops = {
1518 	.bound = cal_async_bound,
1519 	.complete = cal_async_complete,
1520 };
1521 
cal_complete_ctx(struct cal_ctx * ctx)1522 static int cal_complete_ctx(struct cal_ctx *ctx)
1523 {
1524 	struct video_device *vfd;
1525 	struct vb2_queue *q;
1526 	int ret;
1527 
1528 	ctx->timeperframe = tpf_default;
1529 	ctx->external_rate = 192000000;
1530 
1531 	/* initialize locks */
1532 	spin_lock_init(&ctx->slock);
1533 	mutex_init(&ctx->mutex);
1534 
1535 	/* initialize queue */
1536 	q = &ctx->vb_vidq;
1537 	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
1538 	q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
1539 	q->drv_priv = ctx;
1540 	q->buf_struct_size = sizeof(struct cal_buffer);
1541 	q->ops = &cal_video_qops;
1542 	q->mem_ops = &vb2_dma_contig_memops;
1543 	q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1544 	q->lock = &ctx->mutex;
1545 	q->min_buffers_needed = 3;
1546 	q->dev = ctx->v4l2_dev.dev;
1547 
1548 	ret = vb2_queue_init(q);
1549 	if (ret)
1550 		return ret;
1551 
1552 	/* init video dma queues */
1553 	INIT_LIST_HEAD(&ctx->vidq.active);
1554 
1555 	vfd = &ctx->vdev;
1556 	*vfd = cal_videodev;
1557 	vfd->v4l2_dev = &ctx->v4l2_dev;
1558 	vfd->queue = q;
1559 
1560 	/*
1561 	 * Provide a mutex to v4l2 core. It will be used to protect
1562 	 * all fops and v4l2 ioctls.
1563 	 */
1564 	vfd->lock = &ctx->mutex;
1565 	video_set_drvdata(vfd, ctx);
1566 
1567 	ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
1568 	if (ret < 0)
1569 		return ret;
1570 
1571 	v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
1572 		  video_device_node_name(vfd));
1573 
1574 	return 0;
1575 }
1576 
1577 static struct device_node *
of_get_next_port(const struct device_node * parent,struct device_node * prev)1578 of_get_next_port(const struct device_node *parent,
1579 		 struct device_node *prev)
1580 {
1581 	struct device_node *port = NULL;
1582 
1583 	if (!parent)
1584 		return NULL;
1585 
1586 	if (!prev) {
1587 		struct device_node *ports;
1588 		/*
1589 		 * It's the first call, we have to find a port subnode
1590 		 * within this node or within an optional 'ports' node.
1591 		 */
1592 		ports = of_get_child_by_name(parent, "ports");
1593 		if (ports)
1594 			parent = ports;
1595 
1596 		port = of_get_child_by_name(parent, "port");
1597 
1598 		/* release the 'ports' node */
1599 		of_node_put(ports);
1600 	} else {
1601 		struct device_node *ports;
1602 
1603 		ports = of_get_parent(prev);
1604 		if (!ports)
1605 			return NULL;
1606 
1607 		do {
1608 			port = of_get_next_child(ports, prev);
1609 			if (!port) {
1610 				of_node_put(ports);
1611 				return NULL;
1612 			}
1613 			prev = port;
1614 		} while (!of_node_name_eq(port, "port"));
1615 		of_node_put(ports);
1616 	}
1617 
1618 	return port;
1619 }
1620 
1621 static struct device_node *
of_get_next_endpoint(const struct device_node * parent,struct device_node * prev)1622 of_get_next_endpoint(const struct device_node *parent,
1623 		     struct device_node *prev)
1624 {
1625 	struct device_node *ep = NULL;
1626 
1627 	if (!parent)
1628 		return NULL;
1629 
1630 	do {
1631 		ep = of_get_next_child(parent, prev);
1632 		if (!ep)
1633 			return NULL;
1634 		prev = ep;
1635 	} while (!of_node_name_eq(ep, "endpoint"));
1636 
1637 	return ep;
1638 }
1639 
of_cal_create_instance(struct cal_ctx * ctx,int inst)1640 static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
1641 {
1642 	struct platform_device *pdev = ctx->dev->pdev;
1643 	struct device_node *ep_node, *port, *sensor_node, *parent;
1644 	struct v4l2_fwnode_endpoint *endpoint;
1645 	struct v4l2_async_subdev *asd;
1646 	u32 regval = 0;
1647 	int ret, index, found_port = 0, lane;
1648 
1649 	parent = pdev->dev.of_node;
1650 
1651 	asd = &ctx->asd;
1652 	endpoint = &ctx->endpoint;
1653 
1654 	ep_node = NULL;
1655 	port = NULL;
1656 	sensor_node = NULL;
1657 	ret = -EINVAL;
1658 
1659 	ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
1660 	for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
1661 		port = of_get_next_port(parent, port);
1662 		if (!port) {
1663 			ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
1664 				index);
1665 			goto cleanup_exit;
1666 		}
1667 
1668 		/* Match the slice number with <REG> */
1669 		of_property_read_u32(port, "reg", &regval);
1670 		ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
1671 			index, inst, regval);
1672 		if ((regval == inst) && (index == inst)) {
1673 			found_port = 1;
1674 			break;
1675 		}
1676 	}
1677 
1678 	if (!found_port) {
1679 		ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
1680 			inst);
1681 		goto cleanup_exit;
1682 	}
1683 
1684 	ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
1685 		inst);
1686 
1687 	ep_node = of_get_next_endpoint(port, ep_node);
1688 	if (!ep_node) {
1689 		ctx_dbg(3, ctx, "can't get next endpoint\n");
1690 		goto cleanup_exit;
1691 	}
1692 
1693 	sensor_node = of_graph_get_remote_port_parent(ep_node);
1694 	if (!sensor_node) {
1695 		ctx_dbg(3, ctx, "can't get remote parent\n");
1696 		goto cleanup_exit;
1697 	}
1698 	asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
1699 	asd->match.fwnode = of_fwnode_handle(sensor_node);
1700 
1701 	v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint);
1702 
1703 	if (endpoint->bus_type != V4L2_MBUS_CSI2_DPHY) {
1704 		ctx_err(ctx, "Port:%d sub-device %pOFn is not a CSI2 device\n",
1705 			inst, sensor_node);
1706 		goto cleanup_exit;
1707 	}
1708 
1709 	/* Store Virtual Channel number */
1710 	ctx->virtual_channel = endpoint->base.id;
1711 
1712 	ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
1713 	ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
1714 	ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
1715 	ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
1716 	ctx_dbg(3, ctx, "num_data_lanes=%d\n",
1717 		endpoint->bus.mipi_csi2.num_data_lanes);
1718 	ctx_dbg(3, ctx, "data_lanes= <\n");
1719 	for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
1720 		ctx_dbg(3, ctx, "\t%d\n",
1721 			endpoint->bus.mipi_csi2.data_lanes[lane]);
1722 	ctx_dbg(3, ctx, "\t>\n");
1723 
1724 	ctx_dbg(1, ctx, "Port: %d found sub-device %pOFn\n",
1725 		inst, sensor_node);
1726 
1727 	v4l2_async_notifier_init(&ctx->notifier);
1728 
1729 	ret = v4l2_async_notifier_add_subdev(&ctx->notifier, asd);
1730 	if (ret) {
1731 		ctx_err(ctx, "Error adding asd\n");
1732 		goto cleanup_exit;
1733 	}
1734 
1735 	ctx->notifier.ops = &cal_async_ops;
1736 	ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
1737 					   &ctx->notifier);
1738 	if (ret) {
1739 		ctx_err(ctx, "Error registering async notifier\n");
1740 		v4l2_async_notifier_cleanup(&ctx->notifier);
1741 		ret = -EINVAL;
1742 	}
1743 
1744 	/*
1745 	 * On success we need to keep reference on sensor_node, or
1746 	 * if notifier_cleanup was called above, sensor_node was
1747 	 * already put.
1748 	 */
1749 	sensor_node = NULL;
1750 
1751 cleanup_exit:
1752 	of_node_put(sensor_node);
1753 	of_node_put(ep_node);
1754 	of_node_put(port);
1755 
1756 	return ret;
1757 }
1758 
cal_create_instance(struct cal_dev * dev,int inst)1759 static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
1760 {
1761 	struct cal_ctx *ctx;
1762 	struct v4l2_ctrl_handler *hdl;
1763 	int ret;
1764 
1765 	ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
1766 	if (!ctx)
1767 		return NULL;
1768 
1769 	/* save the cal_dev * for future ref */
1770 	ctx->dev = dev;
1771 
1772 	snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
1773 		 "%s-%03d", CAL_MODULE_NAME, inst);
1774 	ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
1775 	if (ret)
1776 		goto err_exit;
1777 
1778 	hdl = &ctx->ctrl_handler;
1779 	ret = v4l2_ctrl_handler_init(hdl, 11);
1780 	if (ret) {
1781 		ctx_err(ctx, "Failed to init ctrl handler\n");
1782 		goto unreg_dev;
1783 	}
1784 	ctx->v4l2_dev.ctrl_handler = hdl;
1785 
1786 	/* Make sure Camera Core H/W register area is available */
1787 	ctx->cc = dev->cc[inst];
1788 
1789 	/* Store the instance id */
1790 	ctx->csi2_port = inst + 1;
1791 
1792 	ret = of_cal_create_instance(ctx, inst);
1793 	if (ret) {
1794 		ret = -EINVAL;
1795 		goto free_hdl;
1796 	}
1797 	return ctx;
1798 
1799 free_hdl:
1800 	v4l2_ctrl_handler_free(hdl);
1801 unreg_dev:
1802 	v4l2_device_unregister(&ctx->v4l2_dev);
1803 err_exit:
1804 	return NULL;
1805 }
1806 
cal_probe(struct platform_device * pdev)1807 static int cal_probe(struct platform_device *pdev)
1808 {
1809 	struct cal_dev *dev;
1810 	struct cal_ctx *ctx;
1811 	int ret;
1812 	int irq;
1813 	int i;
1814 
1815 	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1816 	if (!dev)
1817 		return -ENOMEM;
1818 
1819 	/* set pseudo v4l2 device name so we can use v4l2_printk */
1820 	strscpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
1821 		sizeof(dev->v4l2_dev.name));
1822 
1823 	/* save pdev pointer */
1824 	dev->pdev = pdev;
1825 
1826 	dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1827 						"cal_top");
1828 	dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
1829 	if (IS_ERR(dev->base))
1830 		return PTR_ERR(dev->base);
1831 
1832 	cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
1833 		dev->res->name, &dev->res->start, &dev->res->end);
1834 
1835 	irq = platform_get_irq(pdev, 0);
1836 	cal_dbg(1, dev, "got irq# %d\n", irq);
1837 	ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
1838 			       dev);
1839 	if (ret)
1840 		return ret;
1841 
1842 	platform_set_drvdata(pdev, dev);
1843 
1844 	dev->cm = cm_create(dev);
1845 	if (IS_ERR(dev->cm))
1846 		return PTR_ERR(dev->cm);
1847 
1848 	dev->cc[0] = cc_create(dev, 0);
1849 	if (IS_ERR(dev->cc[0]))
1850 		return PTR_ERR(dev->cc[0]);
1851 
1852 	dev->cc[1] = cc_create(dev, 1);
1853 	if (IS_ERR(dev->cc[1]))
1854 		return PTR_ERR(dev->cc[1]);
1855 
1856 	dev->ctx[0] = NULL;
1857 	dev->ctx[1] = NULL;
1858 
1859 	dev->ctx[0] = cal_create_instance(dev, 0);
1860 	dev->ctx[1] = cal_create_instance(dev, 1);
1861 	if (!dev->ctx[0] && !dev->ctx[1]) {
1862 		cal_err(dev, "Neither port is configured, no point in staying up\n");
1863 		return -ENODEV;
1864 	}
1865 
1866 	pm_runtime_enable(&pdev->dev);
1867 
1868 	ret = cal_runtime_get(dev);
1869 	if (ret)
1870 		goto runtime_disable;
1871 
1872 	/* Just check we can actually access the module */
1873 	cal_get_hwinfo(dev);
1874 
1875 	cal_runtime_put(dev);
1876 
1877 	return 0;
1878 
1879 runtime_disable:
1880 	pm_runtime_disable(&pdev->dev);
1881 	for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1882 		ctx = dev->ctx[i];
1883 		if (ctx) {
1884 			v4l2_async_notifier_unregister(&ctx->notifier);
1885 			v4l2_async_notifier_cleanup(&ctx->notifier);
1886 			v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1887 			v4l2_device_unregister(&ctx->v4l2_dev);
1888 		}
1889 	}
1890 
1891 	return ret;
1892 }
1893 
cal_remove(struct platform_device * pdev)1894 static int cal_remove(struct platform_device *pdev)
1895 {
1896 	struct cal_dev *dev =
1897 		(struct cal_dev *)platform_get_drvdata(pdev);
1898 	struct cal_ctx *ctx;
1899 	int i;
1900 
1901 	cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
1902 
1903 	cal_runtime_get(dev);
1904 
1905 	for (i = 0; i < CAL_NUM_CONTEXT; i++) {
1906 		ctx = dev->ctx[i];
1907 		if (ctx) {
1908 			ctx_dbg(1, ctx, "unregistering %s\n",
1909 				video_device_node_name(&ctx->vdev));
1910 			camerarx_phy_disable(ctx);
1911 			v4l2_async_notifier_unregister(&ctx->notifier);
1912 			v4l2_async_notifier_cleanup(&ctx->notifier);
1913 			v4l2_ctrl_handler_free(&ctx->ctrl_handler);
1914 			v4l2_device_unregister(&ctx->v4l2_dev);
1915 			video_unregister_device(&ctx->vdev);
1916 		}
1917 	}
1918 
1919 	cal_runtime_put(dev);
1920 	pm_runtime_disable(&pdev->dev);
1921 
1922 	return 0;
1923 }
1924 
1925 #if defined(CONFIG_OF)
1926 static const struct of_device_id cal_of_match[] = {
1927 	{ .compatible = "ti,dra72-cal", },
1928 	{},
1929 };
1930 MODULE_DEVICE_TABLE(of, cal_of_match);
1931 #endif
1932 
1933 static struct platform_driver cal_pdrv = {
1934 	.probe		= cal_probe,
1935 	.remove		= cal_remove,
1936 	.driver		= {
1937 		.name	= CAL_MODULE_NAME,
1938 		.of_match_table = of_match_ptr(cal_of_match),
1939 	},
1940 };
1941 
1942 module_platform_driver(cal_pdrv);
1943