1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017,2020 Intel Corporation
4  *
5  * Based partially on Intel IPU4 driver written by
6  *  Sakari Ailus <sakari.ailus@linux.intel.com>
7  *  Samu Onkalo <samu.onkalo@intel.com>
8  *  Jouni Högander <jouni.hogander@intel.com>
9  *  Jouni Ukkonen <jouni.ukkonen@intel.com>
10  *  Antti Laakso <antti.laakso@intel.com>
11  * et al.
12  */
13 
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/mm.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
25 #include <media/v4l2-ctrls.h>
26 #include <media/v4l2-device.h>
27 #include <media/v4l2-event.h>
28 #include <media/v4l2-fwnode.h>
29 #include <media/v4l2-ioctl.h>
30 #include <media/videobuf2-dma-sg.h>
31 
32 #include "ipu3-cio2.h"
33 
34 struct ipu3_cio2_fmt {
35 	u32 mbus_code;
36 	u32 fourcc;
37 	u8 mipicode;
38 	u8 bpp;
39 };
40 
41 /*
42  * These are raw formats used in Intel's third generation of
43  * Image Processing Unit known as IPU3.
44  * 10bit raw bayer packed, 32 bytes for every 25 pixels,
45  * last LSB 6 bits unused.
46  */
47 static const struct ipu3_cio2_fmt formats[] = {
48 	{	/* put default entry at beginning */
49 		.mbus_code	= MEDIA_BUS_FMT_SGRBG10_1X10,
50 		.fourcc		= V4L2_PIX_FMT_IPU3_SGRBG10,
51 		.mipicode	= 0x2b,
52 		.bpp		= 10,
53 	}, {
54 		.mbus_code	= MEDIA_BUS_FMT_SGBRG10_1X10,
55 		.fourcc		= V4L2_PIX_FMT_IPU3_SGBRG10,
56 		.mipicode	= 0x2b,
57 		.bpp		= 10,
58 	}, {
59 		.mbus_code	= MEDIA_BUS_FMT_SBGGR10_1X10,
60 		.fourcc		= V4L2_PIX_FMT_IPU3_SBGGR10,
61 		.mipicode	= 0x2b,
62 		.bpp		= 10,
63 	}, {
64 		.mbus_code	= MEDIA_BUS_FMT_SRGGB10_1X10,
65 		.fourcc		= V4L2_PIX_FMT_IPU3_SRGGB10,
66 		.mipicode	= 0x2b,
67 		.bpp		= 10,
68 	}, {
69 		.mbus_code	= MEDIA_BUS_FMT_Y10_1X10,
70 		.fourcc		= V4L2_PIX_FMT_IPU3_Y10,
71 		.mipicode	= 0x2b,
72 		.bpp		= 10,
73 	},
74 };
75 
76 /*
77  * cio2_find_format - lookup color format by fourcc or/and media bus code
78  * @pixelformat: fourcc to match, ignored if null
79  * @mbus_code: media bus code to match, ignored if null
80  */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)81 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
82 						    const u32 *mbus_code)
83 {
84 	unsigned int i;
85 
86 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
87 		if (pixelformat && *pixelformat != formats[i].fourcc)
88 			continue;
89 		if (mbus_code && *mbus_code != formats[i].mbus_code)
90 			continue;
91 
92 		return &formats[i];
93 	}
94 
95 	return NULL;
96 }
97 
cio2_bytesperline(const unsigned int width)98 static inline u32 cio2_bytesperline(const unsigned int width)
99 {
100 	/*
101 	 * 64 bytes for every 50 pixels, the line length
102 	 * in bytes is multiple of 64 (line end alignment).
103 	 */
104 	return DIV_ROUND_UP(width, 50) * 64;
105 }
106 
107 /**************** FBPT operations ****************/
108 
cio2_fbpt_exit_dummy(struct cio2_device * cio2)109 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
110 {
111 	struct device *dev = &cio2->pci_dev->dev;
112 
113 	if (cio2->dummy_lop) {
114 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
115 				  cio2->dummy_lop_bus_addr);
116 		cio2->dummy_lop = NULL;
117 	}
118 	if (cio2->dummy_page) {
119 		dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
120 				  cio2->dummy_page_bus_addr);
121 		cio2->dummy_page = NULL;
122 	}
123 }
124 
cio2_fbpt_init_dummy(struct cio2_device * cio2)125 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
126 {
127 	struct device *dev = &cio2->pci_dev->dev;
128 	unsigned int i;
129 
130 	cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
131 					      &cio2->dummy_page_bus_addr,
132 					      GFP_KERNEL);
133 	cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
134 					     &cio2->dummy_lop_bus_addr,
135 					     GFP_KERNEL);
136 	if (!cio2->dummy_page || !cio2->dummy_lop) {
137 		cio2_fbpt_exit_dummy(cio2);
138 		return -ENOMEM;
139 	}
140 	/*
141 	 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
142 	 * Initialize each entry to dummy_page bus base address.
143 	 */
144 	for (i = 0; i < CIO2_LOP_ENTRIES; i++)
145 		cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
146 
147 	return 0;
148 }
149 
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])150 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
151 				   struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
152 {
153 	/*
154 	 * The CPU first initializes some fields in fbpt, then sets
155 	 * the VALID bit, this barrier is to ensure that the DMA(device)
156 	 * does not see the VALID bit enabled before other fields are
157 	 * initialized; otherwise it could lead to havoc.
158 	 */
159 	dma_wmb();
160 
161 	/*
162 	 * Request interrupts for start and completion
163 	 * Valid bit is applicable only to 1st entry
164 	 */
165 	entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
166 		CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
167 }
168 
169 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])170 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
171 				       struct cio2_fbpt_entry
172 				       entry[CIO2_MAX_LOPS])
173 {
174 	unsigned int i;
175 
176 	entry[0].first_entry.first_page_offset = 0;
177 	entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
178 	entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
179 
180 	for (i = 0; i < CIO2_MAX_LOPS; i++)
181 		entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
182 
183 	cio2_fbpt_entry_enable(cio2, entry);
184 }
185 
186 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])187 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
188 				     struct cio2_buffer *b,
189 				     struct cio2_fbpt_entry
190 				     entry[CIO2_MAX_LOPS])
191 {
192 	struct vb2_buffer *vb = &b->vbb.vb2_buf;
193 	unsigned int length = vb->planes[0].length;
194 	int remaining, i;
195 
196 	entry[0].first_entry.first_page_offset = b->offset;
197 	remaining = length + entry[0].first_entry.first_page_offset;
198 	entry[1].second_entry.num_of_pages = PFN_UP(remaining);
199 	/*
200 	 * last_page_available_bytes has the offset of the last byte in the
201 	 * last page which is still accessible by DMA. DMA cannot access
202 	 * beyond this point. Valid range for this is from 0 to 4095.
203 	 * 0 indicates 1st byte in the page is DMA accessible.
204 	 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
205 	 * is available for DMA transfer.
206 	 */
207 	remaining = offset_in_page(remaining) ?: PAGE_SIZE;
208 	entry[1].second_entry.last_page_available_bytes = remaining - 1;
209 	/* Fill FBPT */
210 	remaining = length;
211 	i = 0;
212 	while (remaining > 0) {
213 		entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
214 		remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
215 		entry++;
216 		i++;
217 	}
218 
219 	/*
220 	 * The first not meaningful FBPT entry should point to a valid LOP
221 	 */
222 	entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
223 
224 	cio2_fbpt_entry_enable(cio2, entry);
225 }
226 
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)227 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
228 {
229 	struct device *dev = &cio2->pci_dev->dev;
230 
231 	q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
232 				     GFP_KERNEL);
233 	if (!q->fbpt)
234 		return -ENOMEM;
235 
236 	return 0;
237 }
238 
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)239 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
240 {
241 	dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
242 }
243 
244 /**************** CSI2 hardware setup ****************/
245 
246 /*
247  * The CSI2 receiver has several parameters affecting
248  * the receiver timings. These depend on the MIPI bus frequency
249  * F in Hz (sensor transmitter rate) as follows:
250  *     register value = (A/1e9 + B * UI) / COUNT_ACC
251  * where
252  *      UI = 1 / (2 * F) in seconds
253  *      COUNT_ACC = counter accuracy in seconds
254  *      For IPU3 COUNT_ACC = 0.0625
255  *
256  * A and B are coefficients from the table below,
257  * depending whether the register minimum or maximum value is
258  * calculated.
259  *                                     Minimum     Maximum
260  * Clock lane                          A     B     A     B
261  * reg_rx_csi_dly_cnt_termen_clane     0     0    38     0
262  * reg_rx_csi_dly_cnt_settle_clane    95    -8   300   -16
263  * Data lanes
264  * reg_rx_csi_dly_cnt_termen_dlane0    0     0    35     4
265  * reg_rx_csi_dly_cnt_settle_dlane0   85    -2   145    -6
266  * reg_rx_csi_dly_cnt_termen_dlane1    0     0    35     4
267  * reg_rx_csi_dly_cnt_settle_dlane1   85    -2   145    -6
268  * reg_rx_csi_dly_cnt_termen_dlane2    0     0    35     4
269  * reg_rx_csi_dly_cnt_settle_dlane2   85    -2   145    -6
270  * reg_rx_csi_dly_cnt_termen_dlane3    0     0    35     4
271  * reg_rx_csi_dly_cnt_settle_dlane3   85    -2   145    -6
272  *
273  * We use the minimum values of both A and B.
274  */
275 
276 /*
277  * shift for keeping value range suitable for 32-bit integer arithmetic
278  */
279 #define LIMIT_SHIFT	8
280 
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)281 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
282 {
283 	const u32 accinv = 16; /* invert of counter resolution */
284 	const u32 uiinv = 500000000; /* 1e9 / 2 */
285 	s32 r;
286 
287 	freq >>= LIMIT_SHIFT;
288 
289 	if (WARN_ON(freq <= 0 || freq > S32_MAX))
290 		return def;
291 	/*
292 	 * b could be 0, -2 or -8, so |accinv * b| is always
293 	 * less than (1 << ds) and thus |r| < 500000000.
294 	 */
295 	r = accinv * b * (uiinv >> LIMIT_SHIFT);
296 	r = r / (s32)freq;
297 	/* max value of a is 95 */
298 	r += accinv * a;
299 
300 	return r;
301 };
302 
303 /* Calculate the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing,unsigned int bpp,unsigned int lanes)304 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
305 				 struct cio2_csi2_timing *timing,
306 				 unsigned int bpp, unsigned int lanes)
307 {
308 	struct device *dev = &cio2->pci_dev->dev;
309 	s64 freq;
310 
311 	if (!q->sensor)
312 		return -ENODEV;
313 
314 	freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
315 	if (freq < 0) {
316 		dev_err(dev, "error %lld, invalid link_freq\n", freq);
317 		return freq;
318 	}
319 
320 	timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
321 					    CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
322 					    freq,
323 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
324 	timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
325 					    CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
326 					    freq,
327 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
328 	timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
329 					    CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
330 					    freq,
331 					    CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
332 	timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
333 					    CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
334 					    freq,
335 					    CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
336 
337 	dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
338 	dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
339 	dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
340 	dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
341 
342 	return 0;
343 };
344 
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)345 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
346 {
347 	static const int NUM_VCS = 4;
348 	static const int SID;	/* Stream id */
349 	static const int ENTRY;
350 	static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
351 					CIO2_FBPT_SUBENTRY_UNIT);
352 	const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
353 	const struct ipu3_cio2_fmt *fmt;
354 	void __iomem *const base = cio2->base;
355 	u8 lanes, csi2bus = q->csi2.port;
356 	u8 sensor_vc = SENSOR_VIR_CH_DFLT;
357 	struct cio2_csi2_timing timing;
358 	int i, r;
359 
360 	fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
361 	if (!fmt)
362 		return -EINVAL;
363 
364 	lanes = q->csi2.lanes;
365 
366 	r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
367 	if (r)
368 		return r;
369 
370 	writel(timing.clk_termen, q->csi_rx_base +
371 		CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
372 	writel(timing.clk_settle, q->csi_rx_base +
373 		CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
374 
375 	for (i = 0; i < lanes; i++) {
376 		writel(timing.dat_termen, q->csi_rx_base +
377 			CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
378 		writel(timing.dat_settle, q->csi_rx_base +
379 			CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
380 	}
381 
382 	writel(CIO2_PBM_WMCTRL1_MIN_2CK |
383 	       CIO2_PBM_WMCTRL1_MID1_2CK |
384 	       CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
385 	writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
386 	       CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
387 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
388 	       CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
389 	       CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
390 	       CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
391 	writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
392 	       CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
393 	       CIO2_PBM_ARB_CTRL_LE_EN |
394 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
395 	       CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
396 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
397 	       CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
398 	       base + CIO2_REG_PBM_ARB_CTRL);
399 	writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
400 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
401 	writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
402 	       q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
403 
404 	writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
405 	writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
406 
407 	/* Configure MIPI backend */
408 	for (i = 0; i < NUM_VCS; i++)
409 		writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
410 
411 	/* There are 16 short packet LUT entry */
412 	for (i = 0; i < 16; i++)
413 		writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
414 		       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
415 	writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
416 	       q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
417 
418 	writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
419 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
420 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
421 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
422 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
423 	writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
424 
425 	writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
426 	       CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
427 	       base + CIO2_REG_INT_EN);
428 
429 	writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
430 	       << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
431 	       base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
432 	writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
433 	       sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
434 	       fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
435 	       q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
436 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
437 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
438 	writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
439 
440 	writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
441 	writel(CIO2_CGC_PRIM_TGE |
442 	       CIO2_CGC_SIDE_TGE |
443 	       CIO2_CGC_XOSC_TGE |
444 	       CIO2_CGC_D3I3_TGE |
445 	       CIO2_CGC_CSI2_INTERFRAME_TGE |
446 	       CIO2_CGC_CSI2_PORT_DCGE |
447 	       CIO2_CGC_SIDE_DCGE |
448 	       CIO2_CGC_PRIM_DCGE |
449 	       CIO2_CGC_ROSC_DCGE |
450 	       CIO2_CGC_XOSC_DCGE |
451 	       CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
452 	       CIO2_CGC_CSI_CLKGATE_HOLDOFF
453 	       << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
454 	writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
455 	writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
456 	       CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
457 	       CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
458 	       CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
459 	       base + CIO2_REG_LTRVAL01);
460 	writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
461 	       CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
462 	       CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
463 	       CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
464 	       base + CIO2_REG_LTRVAL23);
465 
466 	for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
467 		writel(0, base + CIO2_REG_CDMABA(i));
468 		writel(0, base + CIO2_REG_CDMAC0(i));
469 		writel(0, base + CIO2_REG_CDMAC1(i));
470 	}
471 
472 	/* Enable DMA */
473 	writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
474 
475 	writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
476 	       FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
477 	       CIO2_CDMAC0_DMA_INTR_ON_FE |
478 	       CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
479 	       CIO2_CDMAC0_DMA_EN |
480 	       CIO2_CDMAC0_DMA_INTR_ON_FS |
481 	       CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
482 
483 	writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
484 	       base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
485 
486 	writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
487 
488 	writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
489 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
490 	       CIO2_PXM_FRF_CFG_MSK_ECC_RE |
491 	       CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
492 	       base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
493 
494 	/* Clear interrupts */
495 	writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
496 	writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
497 	writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
498 	writel(~0, base + CIO2_REG_INT_STS);
499 
500 	/* Enable devices, starting from the last device in the pipe */
501 	writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
502 	writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
503 
504 	return 0;
505 }
506 
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)507 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
508 {
509 	struct device *dev = &cio2->pci_dev->dev;
510 	void __iomem *const base = cio2->base;
511 	unsigned int i;
512 	u32 value;
513 	int ret;
514 
515 	/* Disable CSI receiver and MIPI backend devices */
516 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
517 	writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
518 	writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
519 	writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
520 
521 	/* Halt DMA */
522 	writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
523 	ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
524 				 value, value & CIO2_CDMAC0_DMA_HALTED,
525 				 4000, 2000000);
526 	if (ret)
527 		dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
528 
529 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
530 		writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
531 		       CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
532 		writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
533 		       CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
534 	}
535 }
536 
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)537 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
538 {
539 	struct device *dev = &cio2->pci_dev->dev;
540 	struct cio2_queue *q = cio2->cur_queue;
541 	struct cio2_fbpt_entry *entry;
542 	u64 ns = ktime_get_ns();
543 
544 	if (dma_chan >= CIO2_QUEUES) {
545 		dev_err(dev, "bad DMA channel %i\n", dma_chan);
546 		return;
547 	}
548 
549 	entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
550 	if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
551 		dev_warn(dev, "no ready buffers found on DMA channel %u\n",
552 			 dma_chan);
553 		return;
554 	}
555 
556 	/* Find out which buffer(s) are ready */
557 	do {
558 		struct cio2_buffer *b;
559 
560 		b = q->bufs[q->bufs_first];
561 		if (b) {
562 			unsigned int received = entry[1].second_entry.num_of_bytes;
563 			unsigned long payload =
564 				vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
565 
566 			q->bufs[q->bufs_first] = NULL;
567 			atomic_dec(&q->bufs_queued);
568 			dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
569 
570 			b->vbb.vb2_buf.timestamp = ns;
571 			b->vbb.field = V4L2_FIELD_NONE;
572 			b->vbb.sequence = atomic_read(&q->frame_sequence);
573 			if (payload != received)
574 				dev_warn(dev,
575 					 "payload length is %lu, received %u\n",
576 					 payload, received);
577 			vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
578 		}
579 		atomic_inc(&q->frame_sequence);
580 		cio2_fbpt_entry_init_dummy(cio2, entry);
581 		q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
582 		entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
583 	} while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
584 }
585 
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)586 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
587 {
588 	/*
589 	 * For the user space camera control algorithms it is essential
590 	 * to know when the reception of a frame has begun. That's often
591 	 * the best timing information to get from the hardware.
592 	 */
593 	struct v4l2_event event = {
594 		.type = V4L2_EVENT_FRAME_SYNC,
595 		.u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
596 	};
597 
598 	v4l2_event_queue(q->subdev.devnode, &event);
599 }
600 
601 static const char *const cio2_irq_errs[] = {
602 	"single packet header error corrected",
603 	"multiple packet header errors detected",
604 	"payload checksum (CRC) error",
605 	"fifo overflow",
606 	"reserved short packet data type detected",
607 	"reserved long packet data type detected",
608 	"incomplete long packet detected",
609 	"frame sync error",
610 	"line sync error",
611 	"DPHY start of transmission error",
612 	"DPHY synchronization error",
613 	"escape mode error",
614 	"escape mode trigger event",
615 	"escape mode ultra-low power state for data lane(s)",
616 	"escape mode ultra-low power state exit for clock lane",
617 	"inter-frame short packet discarded",
618 	"inter-frame long packet discarded",
619 	"non-matching Long Packet stalled",
620 };
621 
cio2_irq_log_irq_errs(struct device * dev,u8 port,u32 status)622 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
623 {
624 	unsigned long csi2_status = status;
625 	unsigned int i;
626 
627 	for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
628 		dev_err(dev, "CSI-2 receiver port %i: %s\n",
629 			port, cio2_irq_errs[i]);
630 
631 	if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
632 		dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
633 			 csi2_status, port);
634 }
635 
636 static const char *const cio2_port_errs[] = {
637 	"ECC recoverable",
638 	"DPHY not recoverable",
639 	"ECC not recoverable",
640 	"CRC error",
641 	"INTERFRAMEDATA",
642 	"PKT2SHORT",
643 	"PKT2LONG",
644 };
645 
cio2_irq_log_port_errs(struct device * dev,u8 port,u32 status)646 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
647 {
648 	unsigned long port_status = status;
649 	unsigned int i;
650 
651 	for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
652 		dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
653 }
654 
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)655 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
656 {
657 	struct device *dev = &cio2->pci_dev->dev;
658 	void __iomem *const base = cio2->base;
659 
660 	if (int_status & CIO2_INT_IOOE) {
661 		/*
662 		 * Interrupt on Output Error:
663 		 * 1) SRAM is full and FS received, or
664 		 * 2) An invalid bit detected by DMA.
665 		 */
666 		u32 oe_status, oe_clear;
667 
668 		oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
669 		oe_status = oe_clear;
670 
671 		if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
672 			dev_err(dev, "DMA output error: 0x%x\n",
673 				(oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
674 				>> CIO2_INT_EXT_OE_DMAOE_SHIFT);
675 			oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
676 		}
677 		if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
678 			dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
679 				(oe_status & CIO2_INT_EXT_OE_OES_MASK)
680 				>> CIO2_INT_EXT_OE_OES_SHIFT);
681 			oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
682 		}
683 		writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
684 		if (oe_status)
685 			dev_warn(dev, "unknown interrupt 0x%x on OE\n",
686 				 oe_status);
687 		int_status &= ~CIO2_INT_IOOE;
688 	}
689 
690 	if (int_status & CIO2_INT_IOC_MASK) {
691 		/* DMA IO done -- frame ready */
692 		u32 clr = 0;
693 		unsigned int d;
694 
695 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
696 			if (int_status & CIO2_INT_IOC(d)) {
697 				clr |= CIO2_INT_IOC(d);
698 				cio2_buffer_done(cio2, d);
699 			}
700 		int_status &= ~clr;
701 	}
702 
703 	if (int_status & CIO2_INT_IOS_IOLN_MASK) {
704 		/* DMA IO starts or reached specified line */
705 		u32 clr = 0;
706 		unsigned int d;
707 
708 		for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
709 			if (int_status & CIO2_INT_IOS_IOLN(d)) {
710 				clr |= CIO2_INT_IOS_IOLN(d);
711 				if (d == CIO2_DMA_CHAN)
712 					cio2_queue_event_sof(cio2,
713 							     cio2->cur_queue);
714 			}
715 		int_status &= ~clr;
716 	}
717 
718 	if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
719 		/* CSI2 receiver (error) interrupt */
720 		unsigned int port;
721 		u32 ie_status;
722 
723 		ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
724 
725 		for (port = 0; port < CIO2_NUM_PORTS; port++) {
726 			u32 port_status = (ie_status >> (port * 8)) & 0xff;
727 
728 			cio2_irq_log_port_errs(dev, port, port_status);
729 
730 			if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
731 				void __iomem *csi_rx_base =
732 						base + CIO2_REG_PIPE_BASE(port);
733 				u32 csi2_status;
734 
735 				csi2_status = readl(csi_rx_base +
736 						CIO2_REG_IRQCTRL_STATUS);
737 
738 				cio2_irq_log_irq_errs(dev, port, csi2_status);
739 
740 				writel(csi2_status,
741 				       csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
742 			}
743 		}
744 
745 		writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
746 
747 		int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
748 	}
749 
750 	if (int_status)
751 		dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
752 }
753 
cio2_irq(int irq,void * cio2_ptr)754 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
755 {
756 	struct cio2_device *cio2 = cio2_ptr;
757 	void __iomem *const base = cio2->base;
758 	struct device *dev = &cio2->pci_dev->dev;
759 	u32 int_status;
760 
761 	int_status = readl(base + CIO2_REG_INT_STS);
762 	dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
763 	if (!int_status)
764 		return IRQ_NONE;
765 
766 	do {
767 		writel(int_status, base + CIO2_REG_INT_STS);
768 		cio2_irq_handle_once(cio2, int_status);
769 		int_status = readl(base + CIO2_REG_INT_STS);
770 		if (int_status)
771 			dev_dbg(dev, "pending status 0x%x\n", int_status);
772 	} while (int_status);
773 
774 	return IRQ_HANDLED;
775 }
776 
777 /**************** Videobuf2 interface ****************/
778 
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)779 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
780 					enum vb2_buffer_state state)
781 {
782 	unsigned int i;
783 
784 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
785 		if (q->bufs[i]) {
786 			atomic_dec(&q->bufs_queued);
787 			vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
788 					state);
789 			q->bufs[i] = NULL;
790 		}
791 	}
792 }
793 
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])794 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
795 				unsigned int *num_buffers,
796 				unsigned int *num_planes,
797 				unsigned int sizes[],
798 				struct device *alloc_devs[])
799 {
800 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
801 	struct device *dev = &cio2->pci_dev->dev;
802 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
803 	unsigned int i;
804 
805 	if (*num_planes && *num_planes < q->format.num_planes)
806 		return -EINVAL;
807 
808 	for (i = 0; i < q->format.num_planes; ++i) {
809 		if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
810 			return -EINVAL;
811 		sizes[i] = q->format.plane_fmt[i].sizeimage;
812 		alloc_devs[i] = dev;
813 	}
814 
815 	*num_planes = q->format.num_planes;
816 	*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
817 
818 	/* Initialize buffer queue */
819 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
820 		q->bufs[i] = NULL;
821 		cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
822 	}
823 	atomic_set(&q->bufs_queued, 0);
824 	q->bufs_first = 0;
825 	q->bufs_next = 0;
826 
827 	return 0;
828 }
829 
830 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)831 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
832 {
833 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
834 	struct device *dev = &cio2->pci_dev->dev;
835 	struct cio2_buffer *b = to_cio2_buffer(vb);
836 	unsigned int pages = PFN_UP(vb->planes[0].length);
837 	unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
838 	struct sg_table *sg;
839 	struct sg_dma_page_iter sg_iter;
840 	unsigned int i, j;
841 
842 	if (lops <= 0 || lops > CIO2_MAX_LOPS) {
843 		dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
844 			vb->planes[0].length);
845 		return -ENOSPC;		/* Should never happen */
846 	}
847 
848 	memset(b->lop, 0, sizeof(b->lop));
849 	/* Allocate LOP table */
850 	for (i = 0; i < lops; i++) {
851 		b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
852 					       &b->lop_bus_addr[i], GFP_KERNEL);
853 		if (!b->lop[i])
854 			goto fail;
855 	}
856 
857 	/* Fill LOP */
858 	sg = vb2_dma_sg_plane_desc(vb, 0);
859 	if (!sg)
860 		return -ENOMEM;
861 
862 	if (sg->nents && sg->sgl)
863 		b->offset = sg->sgl->offset;
864 
865 	i = j = 0;
866 	for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
867 		if (!pages--)
868 			break;
869 		b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
870 		j++;
871 		if (j == CIO2_LOP_ENTRIES) {
872 			i++;
873 			j = 0;
874 		}
875 	}
876 
877 	b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
878 	return 0;
879 fail:
880 	while (i--)
881 		dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
882 	return -ENOMEM;
883 }
884 
885 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)886 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
887 {
888 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
889 	struct device *dev = &cio2->pci_dev->dev;
890 	struct cio2_queue *q =
891 		container_of(vb->vb2_queue, struct cio2_queue, vbq);
892 	struct cio2_buffer *b = to_cio2_buffer(vb);
893 	struct cio2_fbpt_entry *entry;
894 	unsigned long flags;
895 	unsigned int i, j, next = q->bufs_next;
896 	int bufs_queued = atomic_inc_return(&q->bufs_queued);
897 	u32 fbpt_rp;
898 
899 	dev_dbg(dev, "queue buffer %d\n", vb->index);
900 
901 	/*
902 	 * This code queues the buffer to the CIO2 DMA engine, which starts
903 	 * running once streaming has started. It is possible that this code
904 	 * gets pre-empted due to increased CPU load. Upon this, the driver
905 	 * does not get an opportunity to queue new buffers to the CIO2 DMA
906 	 * engine. When the DMA engine encounters an FBPT entry without the
907 	 * VALID bit set, the DMA engine halts, which requires a restart of
908 	 * the DMA engine and sensor, to continue streaming.
909 	 * This is not desired and is highly unlikely given that there are
910 	 * 32 FBPT entries that the DMA engine needs to process, to run into
911 	 * an FBPT entry, without the VALID bit set. We try to mitigate this
912 	 * by disabling interrupts for the duration of this queueing.
913 	 */
914 	local_irq_save(flags);
915 
916 	fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
917 		   >> CIO2_CDMARI_FBPT_RP_SHIFT)
918 		   & CIO2_CDMARI_FBPT_RP_MASK;
919 
920 	/*
921 	 * fbpt_rp is the fbpt entry that the dma is currently working
922 	 * on, but since it could jump to next entry at any time,
923 	 * assume that we might already be there.
924 	 */
925 	fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
926 
927 	if (bufs_queued <= 1 || fbpt_rp == next)
928 		/* Buffers were drained */
929 		next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
930 
931 	for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
932 		/*
933 		 * We have allocated CIO2_MAX_BUFFERS circularly for the
934 		 * hw, the user has requested N buffer queue. The driver
935 		 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
936 		 * user queues a buffer, there necessarily is a free buffer.
937 		 */
938 		if (!q->bufs[next]) {
939 			q->bufs[next] = b;
940 			entry = &q->fbpt[next * CIO2_MAX_LOPS];
941 			cio2_fbpt_entry_init_buf(cio2, b, entry);
942 			local_irq_restore(flags);
943 			q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
944 			for (j = 0; j < vb->num_planes; j++)
945 				vb2_set_plane_payload(vb, j,
946 					q->format.plane_fmt[j].sizeimage);
947 			return;
948 		}
949 
950 		dev_dbg(dev, "entry %i was full!\n", next);
951 		next = (next + 1) % CIO2_MAX_BUFFERS;
952 	}
953 
954 	local_irq_restore(flags);
955 	dev_err(dev, "error: all cio2 entries were full!\n");
956 	atomic_dec(&q->bufs_queued);
957 	vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
958 }
959 
960 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)961 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
962 {
963 	struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
964 	struct device *dev = &cio2->pci_dev->dev;
965 	struct cio2_buffer *b = to_cio2_buffer(vb);
966 	unsigned int i;
967 
968 	/* Free LOP table */
969 	for (i = 0; i < CIO2_MAX_LOPS; i++) {
970 		if (b->lop[i])
971 			dma_free_coherent(dev, PAGE_SIZE,
972 					  b->lop[i], b->lop_bus_addr[i]);
973 	}
974 }
975 
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)976 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
977 {
978 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
979 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
980 	struct device *dev = &cio2->pci_dev->dev;
981 	int r;
982 
983 	cio2->cur_queue = q;
984 	atomic_set(&q->frame_sequence, 0);
985 
986 	r = pm_runtime_resume_and_get(dev);
987 	if (r < 0) {
988 		dev_info(dev, "failed to set power %d\n", r);
989 		return r;
990 	}
991 
992 	r = video_device_pipeline_start(&q->vdev, &q->pipe);
993 	if (r)
994 		goto fail_pipeline;
995 
996 	r = cio2_hw_init(cio2, q);
997 	if (r)
998 		goto fail_hw;
999 
1000 	/* Start streaming on sensor */
1001 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1002 	if (r)
1003 		goto fail_csi2_subdev;
1004 
1005 	cio2->streaming = true;
1006 
1007 	return 0;
1008 
1009 fail_csi2_subdev:
1010 	cio2_hw_exit(cio2, q);
1011 fail_hw:
1012 	video_device_pipeline_stop(&q->vdev);
1013 fail_pipeline:
1014 	dev_dbg(dev, "failed to start streaming (%d)\n", r);
1015 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1016 	pm_runtime_put(dev);
1017 
1018 	return r;
1019 }
1020 
cio2_vb2_stop_streaming(struct vb2_queue * vq)1021 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1022 {
1023 	struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1024 	struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1025 	struct device *dev = &cio2->pci_dev->dev;
1026 
1027 	if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1028 		dev_err(dev, "failed to stop sensor streaming\n");
1029 
1030 	cio2_hw_exit(cio2, q);
1031 	synchronize_irq(cio2->pci_dev->irq);
1032 	cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1033 	video_device_pipeline_stop(&q->vdev);
1034 	pm_runtime_put(dev);
1035 	cio2->streaming = false;
1036 }
1037 
1038 static const struct vb2_ops cio2_vb2_ops = {
1039 	.buf_init = cio2_vb2_buf_init,
1040 	.buf_queue = cio2_vb2_buf_queue,
1041 	.buf_cleanup = cio2_vb2_buf_cleanup,
1042 	.queue_setup = cio2_vb2_queue_setup,
1043 	.start_streaming = cio2_vb2_start_streaming,
1044 	.stop_streaming = cio2_vb2_stop_streaming,
1045 	.wait_prepare = vb2_ops_wait_prepare,
1046 	.wait_finish = vb2_ops_wait_finish,
1047 };
1048 
1049 /**************** V4L2 interface ****************/
1050 
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1051 static int cio2_v4l2_querycap(struct file *file, void *fh,
1052 			      struct v4l2_capability *cap)
1053 {
1054 	strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1055 	strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1056 
1057 	return 0;
1058 }
1059 
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1060 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1061 			      struct v4l2_fmtdesc *f)
1062 {
1063 	if (f->index >= ARRAY_SIZE(formats))
1064 		return -EINVAL;
1065 
1066 	f->pixelformat = formats[f->index].fourcc;
1067 
1068 	return 0;
1069 }
1070 
1071 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1072 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1073 {
1074 	struct cio2_queue *q = file_to_cio2_queue(file);
1075 
1076 	f->fmt.pix_mp = q->format;
1077 
1078 	return 0;
1079 }
1080 
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1081 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1082 {
1083 	const struct ipu3_cio2_fmt *fmt;
1084 	struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1085 
1086 	fmt = cio2_find_format(&mpix->pixelformat, NULL);
1087 	if (!fmt)
1088 		fmt = &formats[0];
1089 
1090 	/* Only supports up to 4224x3136 */
1091 	if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1092 		mpix->width = CIO2_IMAGE_MAX_WIDTH;
1093 	if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1094 		mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1095 
1096 	mpix->num_planes = 1;
1097 	mpix->pixelformat = fmt->fourcc;
1098 	mpix->colorspace = V4L2_COLORSPACE_RAW;
1099 	mpix->field = V4L2_FIELD_NONE;
1100 	mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1101 	mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1102 							mpix->height;
1103 
1104 	/* use default */
1105 	mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1106 	mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1107 	mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1108 
1109 	return 0;
1110 }
1111 
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1112 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1113 {
1114 	struct cio2_queue *q = file_to_cio2_queue(file);
1115 
1116 	cio2_v4l2_try_fmt(file, fh, f);
1117 	q->format = f->fmt.pix_mp;
1118 
1119 	return 0;
1120 }
1121 
1122 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1123 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1124 {
1125 	if (input->index > 0)
1126 		return -EINVAL;
1127 
1128 	strscpy(input->name, "camera", sizeof(input->name));
1129 	input->type = V4L2_INPUT_TYPE_CAMERA;
1130 
1131 	return 0;
1132 }
1133 
1134 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1135 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1136 {
1137 	*input = 0;
1138 
1139 	return 0;
1140 }
1141 
1142 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1143 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1144 {
1145 	return input == 0 ? 0 : -EINVAL;
1146 }
1147 
1148 static const struct v4l2_file_operations cio2_v4l2_fops = {
1149 	.owner = THIS_MODULE,
1150 	.unlocked_ioctl = video_ioctl2,
1151 	.open = v4l2_fh_open,
1152 	.release = vb2_fop_release,
1153 	.poll = vb2_fop_poll,
1154 	.mmap = vb2_fop_mmap,
1155 };
1156 
1157 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1158 	.vidioc_querycap = cio2_v4l2_querycap,
1159 	.vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1160 	.vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1161 	.vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1162 	.vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1163 	.vidioc_reqbufs = vb2_ioctl_reqbufs,
1164 	.vidioc_create_bufs = vb2_ioctl_create_bufs,
1165 	.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1166 	.vidioc_querybuf = vb2_ioctl_querybuf,
1167 	.vidioc_qbuf = vb2_ioctl_qbuf,
1168 	.vidioc_dqbuf = vb2_ioctl_dqbuf,
1169 	.vidioc_streamon = vb2_ioctl_streamon,
1170 	.vidioc_streamoff = vb2_ioctl_streamoff,
1171 	.vidioc_expbuf = vb2_ioctl_expbuf,
1172 	.vidioc_enum_input = cio2_video_enum_input,
1173 	.vidioc_g_input	= cio2_video_g_input,
1174 	.vidioc_s_input	= cio2_video_s_input,
1175 };
1176 
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1177 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1178 				       struct v4l2_fh *fh,
1179 				       struct v4l2_event_subscription *sub)
1180 {
1181 	if (sub->type != V4L2_EVENT_FRAME_SYNC)
1182 		return -EINVAL;
1183 
1184 	/* Line number. For now only zero accepted. */
1185 	if (sub->id != 0)
1186 		return -EINVAL;
1187 
1188 	return v4l2_event_subscribe(fh, sub, 0, NULL);
1189 }
1190 
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1191 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1192 {
1193 	struct v4l2_mbus_framefmt *format;
1194 	const struct v4l2_mbus_framefmt fmt_default = {
1195 		.width = 1936,
1196 		.height = 1096,
1197 		.code = formats[0].mbus_code,
1198 		.field = V4L2_FIELD_NONE,
1199 		.colorspace = V4L2_COLORSPACE_RAW,
1200 		.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1201 		.quantization = V4L2_QUANTIZATION_DEFAULT,
1202 		.xfer_func = V4L2_XFER_FUNC_DEFAULT,
1203 	};
1204 
1205 	/* Initialize try_fmt */
1206 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1207 	*format = fmt_default;
1208 
1209 	/* same as sink */
1210 	format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1211 	*format = fmt_default;
1212 
1213 	return 0;
1214 }
1215 
1216 /*
1217  * cio2_subdev_get_fmt - Handle get format by pads subdev method
1218  * @sd : pointer to v4l2 subdev structure
1219  * @cfg: V4L2 subdev pad config
1220  * @fmt: pointer to v4l2 subdev format structure
1221  * return -EINVAL or zero on success
1222  */
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1223 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1224 			       struct v4l2_subdev_state *sd_state,
1225 			       struct v4l2_subdev_format *fmt)
1226 {
1227 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1228 
1229 	mutex_lock(&q->subdev_lock);
1230 
1231 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1232 		fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1233 							  fmt->pad);
1234 	else
1235 		fmt->format = q->subdev_fmt;
1236 
1237 	mutex_unlock(&q->subdev_lock);
1238 
1239 	return 0;
1240 }
1241 
1242 /*
1243  * cio2_subdev_set_fmt - Handle set format by pads subdev method
1244  * @sd : pointer to v4l2 subdev structure
1245  * @cfg: V4L2 subdev pad config
1246  * @fmt: pointer to v4l2 subdev format structure
1247  * return -EINVAL or zero on success
1248  */
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)1249 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1250 			       struct v4l2_subdev_state *sd_state,
1251 			       struct v4l2_subdev_format *fmt)
1252 {
1253 	struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1254 	struct v4l2_mbus_framefmt *mbus;
1255 	u32 mbus_code = fmt->format.code;
1256 	unsigned int i;
1257 
1258 	/*
1259 	 * Only allow setting sink pad format;
1260 	 * source always propagates from sink
1261 	 */
1262 	if (fmt->pad == CIO2_PAD_SOURCE)
1263 		return cio2_subdev_get_fmt(sd, sd_state, fmt);
1264 
1265 	if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1266 		mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1267 	else
1268 		mbus = &q->subdev_fmt;
1269 
1270 	fmt->format.code = formats[0].mbus_code;
1271 
1272 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
1273 		if (formats[i].mbus_code == mbus_code) {
1274 			fmt->format.code = mbus_code;
1275 			break;
1276 		}
1277 	}
1278 
1279 	fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1280 	fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1281 	fmt->format.field = V4L2_FIELD_NONE;
1282 
1283 	mutex_lock(&q->subdev_lock);
1284 	*mbus = fmt->format;
1285 	mutex_unlock(&q->subdev_lock);
1286 
1287 	return 0;
1288 }
1289 
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)1290 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1291 				      struct v4l2_subdev_state *sd_state,
1292 				      struct v4l2_subdev_mbus_code_enum *code)
1293 {
1294 	if (code->index >= ARRAY_SIZE(formats))
1295 		return -EINVAL;
1296 
1297 	code->code = formats[code->index].mbus_code;
1298 	return 0;
1299 }
1300 
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1301 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1302 						struct v4l2_subdev_format *fmt)
1303 {
1304 	if (is_media_entity_v4l2_subdev(pad->entity)) {
1305 		struct v4l2_subdev *sd =
1306 			media_entity_to_v4l2_subdev(pad->entity);
1307 
1308 		fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1309 		fmt->pad = pad->index;
1310 		return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1311 	}
1312 
1313 	return -EINVAL;
1314 }
1315 
cio2_video_link_validate(struct media_link * link)1316 static int cio2_video_link_validate(struct media_link *link)
1317 {
1318 	struct media_entity *entity = link->sink->entity;
1319 	struct video_device *vd = media_entity_to_video_device(entity);
1320 	struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1321 	struct cio2_device *cio2 = video_get_drvdata(vd);
1322 	struct device *dev = &cio2->pci_dev->dev;
1323 	struct v4l2_subdev_format source_fmt;
1324 	int ret;
1325 
1326 	if (!media_pad_remote_pad_first(entity->pads)) {
1327 		dev_info(dev, "video node %s pad not connected\n", vd->name);
1328 		return -ENOTCONN;
1329 	}
1330 
1331 	ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1332 	if (ret < 0)
1333 		return 0;
1334 
1335 	if (source_fmt.format.width != q->format.width ||
1336 	    source_fmt.format.height != q->format.height) {
1337 		dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1338 			q->format.width, q->format.height,
1339 			source_fmt.format.width, source_fmt.format.height);
1340 		return -EINVAL;
1341 	}
1342 
1343 	if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1344 		return -EINVAL;
1345 
1346 	return 0;
1347 }
1348 
1349 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1350 	.subscribe_event = cio2_subdev_subscribe_event,
1351 	.unsubscribe_event = v4l2_event_subdev_unsubscribe,
1352 };
1353 
1354 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1355 	.open = cio2_subdev_open,
1356 };
1357 
1358 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1359 	.link_validate = v4l2_subdev_link_validate_default,
1360 	.get_fmt = cio2_subdev_get_fmt,
1361 	.set_fmt = cio2_subdev_set_fmt,
1362 	.enum_mbus_code = cio2_subdev_enum_mbus_code,
1363 };
1364 
1365 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1366 	.core = &cio2_subdev_core_ops,
1367 	.pad = &cio2_subdev_pad_ops,
1368 };
1369 
1370 /******* V4L2 sub-device asynchronous registration callbacks***********/
1371 
1372 struct sensor_async_subdev {
1373 	struct v4l2_async_subdev asd;
1374 	struct csi2_bus_info csi2;
1375 };
1376 
1377 #define to_sensor_asd(asd)	container_of(asd, struct sensor_async_subdev, asd)
1378 
1379 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1380 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1381 			       struct v4l2_subdev *sd,
1382 			       struct v4l2_async_subdev *asd)
1383 {
1384 	struct cio2_device *cio2 = to_cio2_device(notifier);
1385 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1386 	struct cio2_queue *q;
1387 
1388 	if (cio2->queue[s_asd->csi2.port].sensor)
1389 		return -EBUSY;
1390 
1391 	q = &cio2->queue[s_asd->csi2.port];
1392 
1393 	q->csi2 = s_asd->csi2;
1394 	q->sensor = sd;
1395 	q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1396 
1397 	return 0;
1398 }
1399 
1400 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1401 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1402 				 struct v4l2_subdev *sd,
1403 				 struct v4l2_async_subdev *asd)
1404 {
1405 	struct cio2_device *cio2 = to_cio2_device(notifier);
1406 	struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1407 
1408 	cio2->queue[s_asd->csi2.port].sensor = NULL;
1409 }
1410 
1411 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1412 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1413 {
1414 	struct cio2_device *cio2 = to_cio2_device(notifier);
1415 	struct device *dev = &cio2->pci_dev->dev;
1416 	struct sensor_async_subdev *s_asd;
1417 	struct v4l2_async_subdev *asd;
1418 	struct cio2_queue *q;
1419 	unsigned int pad;
1420 	int ret;
1421 
1422 	list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1423 		s_asd = to_sensor_asd(asd);
1424 		q = &cio2->queue[s_asd->csi2.port];
1425 
1426 		for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1427 			if (q->sensor->entity.pads[pad].flags &
1428 						MEDIA_PAD_FL_SOURCE)
1429 				break;
1430 
1431 		if (pad == q->sensor->entity.num_pads) {
1432 			dev_err(dev, "failed to find src pad for %s\n",
1433 				q->sensor->name);
1434 			return -ENXIO;
1435 		}
1436 
1437 		ret = media_create_pad_link(
1438 				&q->sensor->entity, pad,
1439 				&q->subdev.entity, CIO2_PAD_SINK,
1440 				0);
1441 		if (ret) {
1442 			dev_err(dev, "failed to create link for %s\n",
1443 				q->sensor->name);
1444 			return ret;
1445 		}
1446 	}
1447 
1448 	return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1449 }
1450 
1451 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1452 	.bound = cio2_notifier_bound,
1453 	.unbind = cio2_notifier_unbind,
1454 	.complete = cio2_notifier_complete,
1455 };
1456 
cio2_parse_firmware(struct cio2_device * cio2)1457 static int cio2_parse_firmware(struct cio2_device *cio2)
1458 {
1459 	struct device *dev = &cio2->pci_dev->dev;
1460 	unsigned int i;
1461 	int ret;
1462 
1463 	for (i = 0; i < CIO2_NUM_PORTS; i++) {
1464 		struct v4l2_fwnode_endpoint vep = {
1465 			.bus_type = V4L2_MBUS_CSI2_DPHY
1466 		};
1467 		struct sensor_async_subdev *s_asd;
1468 		struct fwnode_handle *ep;
1469 
1470 		ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1471 						FWNODE_GRAPH_ENDPOINT_NEXT);
1472 		if (!ep)
1473 			continue;
1474 
1475 		ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1476 		if (ret)
1477 			goto err_parse;
1478 
1479 		s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1480 							struct
1481 							sensor_async_subdev);
1482 		if (IS_ERR(s_asd)) {
1483 			ret = PTR_ERR(s_asd);
1484 			goto err_parse;
1485 		}
1486 
1487 		s_asd->csi2.port = vep.base.port;
1488 		s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1489 
1490 		fwnode_handle_put(ep);
1491 
1492 		continue;
1493 
1494 err_parse:
1495 		fwnode_handle_put(ep);
1496 		return ret;
1497 	}
1498 
1499 	/*
1500 	 * Proceed even without sensors connected to allow the device to
1501 	 * suspend.
1502 	 */
1503 	cio2->notifier.ops = &cio2_async_ops;
1504 	ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
1505 	if (ret)
1506 		dev_err(dev, "failed to register async notifier : %d\n", ret);
1507 
1508 	return ret;
1509 }
1510 
1511 /**************** Queue initialization ****************/
1512 static const struct media_entity_operations cio2_media_ops = {
1513 	.link_validate = v4l2_subdev_link_validate,
1514 };
1515 
1516 static const struct media_entity_operations cio2_video_entity_ops = {
1517 	.link_validate = cio2_video_link_validate,
1518 };
1519 
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1520 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1521 {
1522 	static const u32 default_width = 1936;
1523 	static const u32 default_height = 1096;
1524 	const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1525 	struct device *dev = &cio2->pci_dev->dev;
1526 	struct video_device *vdev = &q->vdev;
1527 	struct vb2_queue *vbq = &q->vbq;
1528 	struct v4l2_subdev *subdev = &q->subdev;
1529 	struct v4l2_mbus_framefmt *fmt;
1530 	int r;
1531 
1532 	/* Initialize miscellaneous variables */
1533 	mutex_init(&q->lock);
1534 	mutex_init(&q->subdev_lock);
1535 
1536 	/* Initialize formats to default values */
1537 	fmt = &q->subdev_fmt;
1538 	fmt->width = default_width;
1539 	fmt->height = default_height;
1540 	fmt->code = dflt_fmt.mbus_code;
1541 	fmt->field = V4L2_FIELD_NONE;
1542 
1543 	q->format.width = default_width;
1544 	q->format.height = default_height;
1545 	q->format.pixelformat = dflt_fmt.fourcc;
1546 	q->format.colorspace = V4L2_COLORSPACE_RAW;
1547 	q->format.field = V4L2_FIELD_NONE;
1548 	q->format.num_planes = 1;
1549 	q->format.plane_fmt[0].bytesperline =
1550 				cio2_bytesperline(q->format.width);
1551 	q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1552 						q->format.height;
1553 
1554 	/* Initialize fbpt */
1555 	r = cio2_fbpt_init(cio2, q);
1556 	if (r)
1557 		goto fail_fbpt;
1558 
1559 	/* Initialize media entities */
1560 	q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1561 		MEDIA_PAD_FL_MUST_CONNECT;
1562 	q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1563 	subdev->entity.ops = &cio2_media_ops;
1564 	subdev->internal_ops = &cio2_subdev_internal_ops;
1565 	r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1566 	if (r) {
1567 		dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1568 		goto fail_subdev_media_entity;
1569 	}
1570 
1571 	q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1572 	vdev->entity.ops = &cio2_video_entity_ops;
1573 	r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1574 	if (r) {
1575 		dev_err(dev, "failed initialize videodev media entity (%d)\n",
1576 			r);
1577 		goto fail_vdev_media_entity;
1578 	}
1579 
1580 	/* Initialize subdev */
1581 	v4l2_subdev_init(subdev, &cio2_subdev_ops);
1582 	subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1583 	subdev->owner = THIS_MODULE;
1584 	snprintf(subdev->name, sizeof(subdev->name),
1585 		 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1586 	subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1587 	v4l2_set_subdevdata(subdev, cio2);
1588 	r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1589 	if (r) {
1590 		dev_err(dev, "failed initialize subdev (%d)\n", r);
1591 		goto fail_subdev;
1592 	}
1593 
1594 	/* Initialize vbq */
1595 	vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1596 	vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1597 	vbq->ops = &cio2_vb2_ops;
1598 	vbq->mem_ops = &vb2_dma_sg_memops;
1599 	vbq->buf_struct_size = sizeof(struct cio2_buffer);
1600 	vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1601 	vbq->min_buffers_needed = 1;
1602 	vbq->drv_priv = cio2;
1603 	vbq->lock = &q->lock;
1604 	r = vb2_queue_init(vbq);
1605 	if (r) {
1606 		dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1607 		goto fail_subdev;
1608 	}
1609 
1610 	/* Initialize vdev */
1611 	snprintf(vdev->name, sizeof(vdev->name),
1612 		 "%s %td", CIO2_NAME, q - cio2->queue);
1613 	vdev->release = video_device_release_empty;
1614 	vdev->fops = &cio2_v4l2_fops;
1615 	vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1616 	vdev->lock = &cio2->lock;
1617 	vdev->v4l2_dev = &cio2->v4l2_dev;
1618 	vdev->queue = &q->vbq;
1619 	vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1620 	video_set_drvdata(vdev, cio2);
1621 	r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1622 	if (r) {
1623 		dev_err(dev, "failed to register video device (%d)\n", r);
1624 		goto fail_vdev;
1625 	}
1626 
1627 	/* Create link from CIO2 subdev to output node */
1628 	r = media_create_pad_link(
1629 		&subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1630 		MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1631 	if (r)
1632 		goto fail_link;
1633 
1634 	return 0;
1635 
1636 fail_link:
1637 	vb2_video_unregister_device(&q->vdev);
1638 fail_vdev:
1639 	v4l2_device_unregister_subdev(subdev);
1640 fail_subdev:
1641 	media_entity_cleanup(&vdev->entity);
1642 fail_vdev_media_entity:
1643 	media_entity_cleanup(&subdev->entity);
1644 fail_subdev_media_entity:
1645 	cio2_fbpt_exit(q, dev);
1646 fail_fbpt:
1647 	mutex_destroy(&q->subdev_lock);
1648 	mutex_destroy(&q->lock);
1649 
1650 	return r;
1651 }
1652 
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1653 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1654 {
1655 	vb2_video_unregister_device(&q->vdev);
1656 	media_entity_cleanup(&q->vdev.entity);
1657 	v4l2_device_unregister_subdev(&q->subdev);
1658 	media_entity_cleanup(&q->subdev.entity);
1659 	cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1660 	mutex_destroy(&q->subdev_lock);
1661 	mutex_destroy(&q->lock);
1662 }
1663 
cio2_queues_init(struct cio2_device * cio2)1664 static int cio2_queues_init(struct cio2_device *cio2)
1665 {
1666 	int i, r;
1667 
1668 	for (i = 0; i < CIO2_QUEUES; i++) {
1669 		r = cio2_queue_init(cio2, &cio2->queue[i]);
1670 		if (r)
1671 			break;
1672 	}
1673 
1674 	if (i == CIO2_QUEUES)
1675 		return 0;
1676 
1677 	for (i--; i >= 0; i--)
1678 		cio2_queue_exit(cio2, &cio2->queue[i]);
1679 
1680 	return r;
1681 }
1682 
cio2_queues_exit(struct cio2_device * cio2)1683 static void cio2_queues_exit(struct cio2_device *cio2)
1684 {
1685 	unsigned int i;
1686 
1687 	for (i = 0; i < CIO2_QUEUES; i++)
1688 		cio2_queue_exit(cio2, &cio2->queue[i]);
1689 }
1690 
cio2_check_fwnode_graph(struct fwnode_handle * fwnode)1691 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1692 {
1693 	struct fwnode_handle *endpoint;
1694 
1695 	if (IS_ERR_OR_NULL(fwnode))
1696 		return -EINVAL;
1697 
1698 	endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1699 	if (endpoint) {
1700 		fwnode_handle_put(endpoint);
1701 		return 0;
1702 	}
1703 
1704 	return cio2_check_fwnode_graph(fwnode->secondary);
1705 }
1706 
1707 /**************** PCI interface ****************/
1708 
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1709 static int cio2_pci_probe(struct pci_dev *pci_dev,
1710 			  const struct pci_device_id *id)
1711 {
1712 	struct device *dev = &pci_dev->dev;
1713 	struct fwnode_handle *fwnode = dev_fwnode(dev);
1714 	struct cio2_device *cio2;
1715 	int r;
1716 
1717 	/*
1718 	 * On some platforms no connections to sensors are defined in firmware,
1719 	 * if the device has no endpoints then we can try to build those as
1720 	 * software_nodes parsed from SSDB.
1721 	 */
1722 	r = cio2_check_fwnode_graph(fwnode);
1723 	if (r) {
1724 		if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1725 			dev_err(dev, "fwnode graph has no endpoints connected\n");
1726 			return -EINVAL;
1727 		}
1728 
1729 		r = cio2_bridge_init(pci_dev);
1730 		if (r)
1731 			return r;
1732 	}
1733 
1734 	cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1735 	if (!cio2)
1736 		return -ENOMEM;
1737 	cio2->pci_dev = pci_dev;
1738 
1739 	r = pcim_enable_device(pci_dev);
1740 	if (r) {
1741 		dev_err(dev, "failed to enable device (%d)\n", r);
1742 		return r;
1743 	}
1744 
1745 	dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1746 		 pci_dev->device, pci_dev->revision);
1747 
1748 	r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1749 	if (r) {
1750 		dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1751 		return -ENODEV;
1752 	}
1753 
1754 	cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1755 
1756 	pci_set_drvdata(pci_dev, cio2);
1757 
1758 	pci_set_master(pci_dev);
1759 
1760 	r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1761 	if (r) {
1762 		dev_err(dev, "failed to set DMA mask (%d)\n", r);
1763 		return -ENODEV;
1764 	}
1765 
1766 	r = pci_enable_msi(pci_dev);
1767 	if (r) {
1768 		dev_err(dev, "failed to enable MSI (%d)\n", r);
1769 		return r;
1770 	}
1771 
1772 	r = cio2_fbpt_init_dummy(cio2);
1773 	if (r)
1774 		return r;
1775 
1776 	mutex_init(&cio2->lock);
1777 
1778 	cio2->media_dev.dev = dev;
1779 	strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1780 		sizeof(cio2->media_dev.model));
1781 	cio2->media_dev.hw_revision = 0;
1782 
1783 	media_device_init(&cio2->media_dev);
1784 	r = media_device_register(&cio2->media_dev);
1785 	if (r < 0)
1786 		goto fail_mutex_destroy;
1787 
1788 	cio2->v4l2_dev.mdev = &cio2->media_dev;
1789 	r = v4l2_device_register(dev, &cio2->v4l2_dev);
1790 	if (r) {
1791 		dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1792 		goto fail_media_device_unregister;
1793 	}
1794 
1795 	r = cio2_queues_init(cio2);
1796 	if (r)
1797 		goto fail_v4l2_device_unregister;
1798 
1799 	v4l2_async_nf_init(&cio2->notifier);
1800 
1801 	/* Register notifier for subdevices we care */
1802 	r = cio2_parse_firmware(cio2);
1803 	if (r)
1804 		goto fail_clean_notifier;
1805 
1806 	r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1807 			     CIO2_NAME, cio2);
1808 	if (r) {
1809 		dev_err(dev, "failed to request IRQ (%d)\n", r);
1810 		goto fail_clean_notifier;
1811 	}
1812 
1813 	pm_runtime_put_noidle(dev);
1814 	pm_runtime_allow(dev);
1815 
1816 	return 0;
1817 
1818 fail_clean_notifier:
1819 	v4l2_async_nf_unregister(&cio2->notifier);
1820 	v4l2_async_nf_cleanup(&cio2->notifier);
1821 	cio2_queues_exit(cio2);
1822 fail_v4l2_device_unregister:
1823 	v4l2_device_unregister(&cio2->v4l2_dev);
1824 fail_media_device_unregister:
1825 	media_device_unregister(&cio2->media_dev);
1826 	media_device_cleanup(&cio2->media_dev);
1827 fail_mutex_destroy:
1828 	mutex_destroy(&cio2->lock);
1829 	cio2_fbpt_exit_dummy(cio2);
1830 
1831 	return r;
1832 }
1833 
cio2_pci_remove(struct pci_dev * pci_dev)1834 static void cio2_pci_remove(struct pci_dev *pci_dev)
1835 {
1836 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1837 
1838 	media_device_unregister(&cio2->media_dev);
1839 	v4l2_async_nf_unregister(&cio2->notifier);
1840 	v4l2_async_nf_cleanup(&cio2->notifier);
1841 	cio2_queues_exit(cio2);
1842 	cio2_fbpt_exit_dummy(cio2);
1843 	v4l2_device_unregister(&cio2->v4l2_dev);
1844 	media_device_cleanup(&cio2->media_dev);
1845 	mutex_destroy(&cio2->lock);
1846 }
1847 
cio2_runtime_suspend(struct device * dev)1848 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1849 {
1850 	struct pci_dev *pci_dev = to_pci_dev(dev);
1851 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1852 	void __iomem *const base = cio2->base;
1853 	u16 pm;
1854 
1855 	writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1856 	dev_dbg(dev, "cio2 runtime suspend.\n");
1857 
1858 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1859 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1860 	pm |= CIO2_PMCSR_D3;
1861 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1862 
1863 	return 0;
1864 }
1865 
cio2_runtime_resume(struct device * dev)1866 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1867 {
1868 	struct pci_dev *pci_dev = to_pci_dev(dev);
1869 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1870 	void __iomem *const base = cio2->base;
1871 	u16 pm;
1872 
1873 	writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1874 	dev_dbg(dev, "cio2 runtime resume.\n");
1875 
1876 	pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1877 	pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1878 	pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1879 
1880 	return 0;
1881 }
1882 
1883 /*
1884  * Helper function to advance all the elements of a circular buffer by "start"
1885  * positions
1886  */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1887 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1888 {
1889 	struct {
1890 		size_t begin, end;
1891 	} arr[2] = {
1892 		{ 0, start - 1 },
1893 		{ start, elems - 1 },
1894 	};
1895 
1896 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1897 
1898 	/* Loop as long as we have out-of-place entries */
1899 	while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1900 		size_t size0, i;
1901 
1902 		/*
1903 		 * Find the number of entries that can be arranged on this
1904 		 * iteration.
1905 		 */
1906 		size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1907 
1908 		/* Swap the entries in two parts of the array. */
1909 		for (i = 0; i < size0; i++) {
1910 			u8 *d = ptr + elem_size * (arr[1].begin + i);
1911 			u8 *s = ptr + elem_size * (arr[0].begin + i);
1912 			size_t j;
1913 
1914 			for (j = 0; j < elem_size; j++)
1915 				swap(d[j], s[j]);
1916 		}
1917 
1918 		if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1919 			/* The end of the first array remains unarranged. */
1920 			arr[0].begin += size0;
1921 		} else {
1922 			/*
1923 			 * The first array is fully arranged so we proceed
1924 			 * handling the next one.
1925 			 */
1926 			arr[0].begin = arr[1].begin;
1927 			arr[0].end = arr[1].begin + size0 - 1;
1928 			arr[1].begin += size0;
1929 		}
1930 	}
1931 }
1932 
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1933 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1934 {
1935 	unsigned int i, j;
1936 
1937 	for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1938 		i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1939 		if (q->bufs[j])
1940 			break;
1941 
1942 	if (i == CIO2_MAX_BUFFERS)
1943 		return;
1944 
1945 	if (j) {
1946 		arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1947 			CIO2_MAX_BUFFERS, j);
1948 		arrange(q->bufs, sizeof(struct cio2_buffer *),
1949 			CIO2_MAX_BUFFERS, j);
1950 	}
1951 
1952 	/*
1953 	 * DMA clears the valid bit when accessing the buffer.
1954 	 * When stopping stream in suspend callback, some of the buffers
1955 	 * may be in invalid state. After resume, when DMA meets the invalid
1956 	 * buffer, it will halt and stop receiving new data.
1957 	 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1958 	 */
1959 	for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1960 		cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1961 }
1962 
cio2_suspend(struct device * dev)1963 static int __maybe_unused cio2_suspend(struct device *dev)
1964 {
1965 	struct pci_dev *pci_dev = to_pci_dev(dev);
1966 	struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1967 	struct cio2_queue *q = cio2->cur_queue;
1968 	int r;
1969 
1970 	dev_dbg(dev, "cio2 suspend\n");
1971 	if (!cio2->streaming)
1972 		return 0;
1973 
1974 	/* Stop stream */
1975 	r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1976 	if (r) {
1977 		dev_err(dev, "failed to stop sensor streaming\n");
1978 		return r;
1979 	}
1980 
1981 	cio2_hw_exit(cio2, q);
1982 	synchronize_irq(pci_dev->irq);
1983 
1984 	pm_runtime_force_suspend(dev);
1985 
1986 	/*
1987 	 * Upon resume, hw starts to process the fbpt entries from beginning,
1988 	 * so relocate the queued buffs to the fbpt head before suspend.
1989 	 */
1990 	cio2_fbpt_rearrange(cio2, q);
1991 	q->bufs_first = 0;
1992 	q->bufs_next = 0;
1993 
1994 	return 0;
1995 }
1996 
cio2_resume(struct device * dev)1997 static int __maybe_unused cio2_resume(struct device *dev)
1998 {
1999 	struct cio2_device *cio2 = dev_get_drvdata(dev);
2000 	struct cio2_queue *q = cio2->cur_queue;
2001 	int r;
2002 
2003 	dev_dbg(dev, "cio2 resume\n");
2004 	if (!cio2->streaming)
2005 		return 0;
2006 	/* Start stream */
2007 	r = pm_runtime_force_resume(dev);
2008 	if (r < 0) {
2009 		dev_err(dev, "failed to set power %d\n", r);
2010 		return r;
2011 	}
2012 
2013 	r = cio2_hw_init(cio2, q);
2014 	if (r) {
2015 		dev_err(dev, "fail to init cio2 hw\n");
2016 		return r;
2017 	}
2018 
2019 	r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2020 	if (r) {
2021 		dev_err(dev, "fail to start sensor streaming\n");
2022 		cio2_hw_exit(cio2, q);
2023 	}
2024 
2025 	return r;
2026 }
2027 
2028 static const struct dev_pm_ops cio2_pm_ops = {
2029 	SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2030 	SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2031 };
2032 
2033 static const struct pci_device_id cio2_pci_id_table[] = {
2034 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2035 	{ }
2036 };
2037 
2038 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2039 
2040 static struct pci_driver cio2_pci_driver = {
2041 	.name = CIO2_NAME,
2042 	.id_table = cio2_pci_id_table,
2043 	.probe = cio2_pci_probe,
2044 	.remove = cio2_pci_remove,
2045 	.driver = {
2046 		.pm = &cio2_pm_ops,
2047 	},
2048 };
2049 
2050 module_pci_driver(cio2_pci_driver);
2051 
2052 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2053 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2054 MODULE_AUTHOR("Jian Xu Zheng");
2055 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2056 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2057 MODULE_LICENSE("GPL v2");
2058 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2059