1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Intel Corporation
4 *
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
11 * et al.
12 *
13 */
14
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/property.h>
21 #include <linux/vmalloc.h>
22 #include <media/v4l2-ctrls.h>
23 #include <media/v4l2-device.h>
24 #include <media/v4l2-event.h>
25 #include <media/v4l2-fwnode.h>
26 #include <media/v4l2-ioctl.h>
27 #include <media/videobuf2-dma-sg.h>
28
29 #include "ipu3-cio2.h"
30
31 struct ipu3_cio2_fmt {
32 u32 mbus_code;
33 u32 fourcc;
34 u8 mipicode;
35 };
36
37 /*
38 * These are raw formats used in Intel's third generation of
39 * Image Processing Unit known as IPU3.
40 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
41 * last LSB 6 bits unused.
42 */
43 static const struct ipu3_cio2_fmt formats[] = {
44 { /* put default entry at beginning */
45 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
46 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
47 .mipicode = 0x2b,
48 }, {
49 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
50 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
51 .mipicode = 0x2b,
52 }, {
53 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
54 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
55 .mipicode = 0x2b,
56 }, {
57 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
58 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
59 .mipicode = 0x2b,
60 },
61 };
62
63 /*
64 * cio2_find_format - lookup color format by fourcc or/and media bus code
65 * @pixelformat: fourcc to match, ignored if null
66 * @mbus_code: media bus code to match, ignored if null
67 */
cio2_find_format(const u32 * pixelformat,const u32 * mbus_code)68 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
69 const u32 *mbus_code)
70 {
71 unsigned int i;
72
73 for (i = 0; i < ARRAY_SIZE(formats); i++) {
74 if (pixelformat && *pixelformat != formats[i].fourcc)
75 continue;
76 if (mbus_code && *mbus_code != formats[i].mbus_code)
77 continue;
78
79 return &formats[i];
80 }
81
82 return NULL;
83 }
84
cio2_bytesperline(const unsigned int width)85 static inline u32 cio2_bytesperline(const unsigned int width)
86 {
87 /*
88 * 64 bytes for every 50 pixels, the line length
89 * in bytes is multiple of 64 (line end alignment).
90 */
91 return DIV_ROUND_UP(width, 50) * 64;
92 }
93
94 /**************** FBPT operations ****************/
95
cio2_fbpt_exit_dummy(struct cio2_device * cio2)96 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
97 {
98 if (cio2->dummy_lop) {
99 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
100 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
101 cio2->dummy_lop = NULL;
102 }
103 if (cio2->dummy_page) {
104 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
105 cio2->dummy_page, cio2->dummy_page_bus_addr);
106 cio2->dummy_page = NULL;
107 }
108 }
109
cio2_fbpt_init_dummy(struct cio2_device * cio2)110 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
111 {
112 unsigned int i;
113
114 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
115 CIO2_PAGE_SIZE,
116 &cio2->dummy_page_bus_addr,
117 GFP_KERNEL);
118 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
119 CIO2_PAGE_SIZE,
120 &cio2->dummy_lop_bus_addr,
121 GFP_KERNEL);
122 if (!cio2->dummy_page || !cio2->dummy_lop) {
123 cio2_fbpt_exit_dummy(cio2);
124 return -ENOMEM;
125 }
126 /*
127 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
128 * Initialize each entry to dummy_page bus base address.
129 */
130 for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
131 cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
132
133 return 0;
134 }
135
cio2_fbpt_entry_enable(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])136 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
137 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
138 {
139 /*
140 * The CPU first initializes some fields in fbpt, then sets
141 * the VALID bit, this barrier is to ensure that the DMA(device)
142 * does not see the VALID bit enabled before other fields are
143 * initialized; otherwise it could lead to havoc.
144 */
145 dma_wmb();
146
147 /*
148 * Request interrupts for start and completion
149 * Valid bit is applicable only to 1st entry
150 */
151 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
152 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
153 }
154
155 /* Initialize fpbt entries to point to dummy frame */
cio2_fbpt_entry_init_dummy(struct cio2_device * cio2,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])156 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
157 struct cio2_fbpt_entry
158 entry[CIO2_MAX_LOPS])
159 {
160 unsigned int i;
161
162 entry[0].first_entry.first_page_offset = 0;
163 entry[1].second_entry.num_of_pages =
164 CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
165 entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
166
167 for (i = 0; i < CIO2_MAX_LOPS; i++)
168 entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
169
170 cio2_fbpt_entry_enable(cio2, entry);
171 }
172
173 /* Initialize fpbt entries to point to a given buffer */
cio2_fbpt_entry_init_buf(struct cio2_device * cio2,struct cio2_buffer * b,struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])174 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
175 struct cio2_buffer *b,
176 struct cio2_fbpt_entry
177 entry[CIO2_MAX_LOPS])
178 {
179 struct vb2_buffer *vb = &b->vbb.vb2_buf;
180 unsigned int length = vb->planes[0].length;
181 int remaining, i;
182
183 entry[0].first_entry.first_page_offset = b->offset;
184 remaining = length + entry[0].first_entry.first_page_offset;
185 entry[1].second_entry.num_of_pages =
186 DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
187 /*
188 * last_page_available_bytes has the offset of the last byte in the
189 * last page which is still accessible by DMA. DMA cannot access
190 * beyond this point. Valid range for this is from 0 to 4095.
191 * 0 indicates 1st byte in the page is DMA accessible.
192 * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
193 * is available for DMA transfer.
194 */
195 entry[1].second_entry.last_page_available_bytes =
196 (remaining & ~PAGE_MASK) ?
197 (remaining & ~PAGE_MASK) - 1 :
198 CIO2_PAGE_SIZE - 1;
199 /* Fill FBPT */
200 remaining = length;
201 i = 0;
202 while (remaining > 0) {
203 entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
204 remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
205 entry++;
206 i++;
207 }
208
209 /*
210 * The first not meaningful FBPT entry should point to a valid LOP
211 */
212 entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
213
214 cio2_fbpt_entry_enable(cio2, entry);
215 }
216
cio2_fbpt_init(struct cio2_device * cio2,struct cio2_queue * q)217 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
218 {
219 struct device *dev = &cio2->pci_dev->dev;
220
221 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
222 GFP_KERNEL);
223 if (!q->fbpt)
224 return -ENOMEM;
225
226 memset(q->fbpt, 0, CIO2_FBPT_SIZE);
227
228 return 0;
229 }
230
cio2_fbpt_exit(struct cio2_queue * q,struct device * dev)231 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
232 {
233 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
234 }
235
236 /**************** CSI2 hardware setup ****************/
237
238 /*
239 * The CSI2 receiver has several parameters affecting
240 * the receiver timings. These depend on the MIPI bus frequency
241 * F in Hz (sensor transmitter rate) as follows:
242 * register value = (A/1e9 + B * UI) / COUNT_ACC
243 * where
244 * UI = 1 / (2 * F) in seconds
245 * COUNT_ACC = counter accuracy in seconds
246 * For IPU3 COUNT_ACC = 0.0625
247 *
248 * A and B are coefficients from the table below,
249 * depending whether the register minimum or maximum value is
250 * calculated.
251 * Minimum Maximum
252 * Clock lane A B A B
253 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
254 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
255 * Data lanes
256 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
257 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
258 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
259 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
260 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
261 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
262 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
263 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
264 *
265 * We use the minimum values of both A and B.
266 */
267
268 /*
269 * shift for keeping value range suitable for 32-bit integer arithmetics
270 */
271 #define LIMIT_SHIFT 8
272
cio2_rx_timing(s32 a,s32 b,s64 freq,int def)273 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
274 {
275 const u32 accinv = 16; /* invert of counter resolution */
276 const u32 uiinv = 500000000; /* 1e9 / 2 */
277 s32 r;
278
279 freq >>= LIMIT_SHIFT;
280
281 if (WARN_ON(freq <= 0 || freq > S32_MAX))
282 return def;
283 /*
284 * b could be 0, -2 or -8, so |accinv * b| is always
285 * less than (1 << ds) and thus |r| < 500000000.
286 */
287 r = accinv * b * (uiinv >> LIMIT_SHIFT);
288 r = r / (s32)freq;
289 /* max value of a is 95 */
290 r += accinv * a;
291
292 return r;
293 };
294
295 /* Calculate the the delay value for termination enable of clock lane HS Rx */
cio2_csi2_calc_timing(struct cio2_device * cio2,struct cio2_queue * q,struct cio2_csi2_timing * timing)296 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
297 struct cio2_csi2_timing *timing)
298 {
299 struct device *dev = &cio2->pci_dev->dev;
300 struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
301 struct v4l2_ctrl *link_freq;
302 s64 freq;
303 int r;
304
305 if (!q->sensor)
306 return -ENODEV;
307
308 link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
309 if (!link_freq) {
310 dev_err(dev, "failed to find LINK_FREQ\n");
311 return -EPIPE;
312 }
313
314 qm.index = v4l2_ctrl_g_ctrl(link_freq);
315 r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
316 if (r) {
317 dev_err(dev, "failed to get menu item\n");
318 return r;
319 }
320
321 if (!qm.value) {
322 dev_err(dev, "error invalid link_freq\n");
323 return -EINVAL;
324 }
325 freq = qm.value;
326
327 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
328 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
329 freq,
330 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
331 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
332 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
333 freq,
334 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
335 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
336 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
337 freq,
338 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
339 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
340 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
341 freq,
342 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
343
344 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
345 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
346 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
347 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
348
349 return 0;
350 };
351
cio2_hw_init(struct cio2_device * cio2,struct cio2_queue * q)352 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
353 {
354 static const int NUM_VCS = 4;
355 static const int SID; /* Stream id */
356 static const int ENTRY;
357 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
358 CIO2_FBPT_SUBENTRY_UNIT);
359 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
360 const struct ipu3_cio2_fmt *fmt;
361 void __iomem *const base = cio2->base;
362 u8 lanes, csi2bus = q->csi2.port;
363 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
364 struct cio2_csi2_timing timing;
365 int i, r;
366
367 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
368 if (!fmt)
369 return -EINVAL;
370
371 lanes = q->csi2.lanes;
372
373 r = cio2_csi2_calc_timing(cio2, q, &timing);
374 if (r)
375 return r;
376
377 writel(timing.clk_termen, q->csi_rx_base +
378 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
379 writel(timing.clk_settle, q->csi_rx_base +
380 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
381
382 for (i = 0; i < lanes; i++) {
383 writel(timing.dat_termen, q->csi_rx_base +
384 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
385 writel(timing.dat_settle, q->csi_rx_base +
386 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
387 }
388
389 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
390 CIO2_PBM_WMCTRL1_MID1_2CK |
391 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
392 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
393 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
394 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
395 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
396 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
397 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
398 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
399 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
400 CIO2_PBM_ARB_CTRL_LE_EN |
401 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
402 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
403 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
404 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
405 base + CIO2_REG_PBM_ARB_CTRL);
406 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
407 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
408 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
409 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
410
411 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
412 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
413
414 /* Configure MIPI backend */
415 for (i = 0; i < NUM_VCS; i++)
416 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
417
418 /* There are 16 short packet LUT entry */
419 for (i = 0; i < 16; i++)
420 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
421 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
422 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
423 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
424
425 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
426 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
427 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
428 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
429 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
430 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
431
432 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
433 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
434 base + CIO2_REG_INT_EN);
435
436 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
437 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
438 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
439 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
440 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
441 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
442 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
443 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
444 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
445 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
446
447 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
448 writel(CIO2_CGC_PRIM_TGE |
449 CIO2_CGC_SIDE_TGE |
450 CIO2_CGC_XOSC_TGE |
451 CIO2_CGC_D3I3_TGE |
452 CIO2_CGC_CSI2_INTERFRAME_TGE |
453 CIO2_CGC_CSI2_PORT_DCGE |
454 CIO2_CGC_SIDE_DCGE |
455 CIO2_CGC_PRIM_DCGE |
456 CIO2_CGC_ROSC_DCGE |
457 CIO2_CGC_XOSC_DCGE |
458 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
459 CIO2_CGC_CSI_CLKGATE_HOLDOFF
460 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
461 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
462 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466 base + CIO2_REG_LTRVAL01);
467 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
468 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
469 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
470 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
471 base + CIO2_REG_LTRVAL23);
472
473 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
474 writel(0, base + CIO2_REG_CDMABA(i));
475 writel(0, base + CIO2_REG_CDMAC0(i));
476 writel(0, base + CIO2_REG_CDMAC1(i));
477 }
478
479 /* Enable DMA */
480 writel(q->fbpt_bus_addr >> PAGE_SHIFT,
481 base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
482
483 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
484 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
485 CIO2_CDMAC0_DMA_INTR_ON_FE |
486 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
487 CIO2_CDMAC0_DMA_EN |
488 CIO2_CDMAC0_DMA_INTR_ON_FS |
489 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
490
491 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
492 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
493
494 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
495
496 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
497 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
498 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
499 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
500 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
501
502 /* Clear interrupts */
503 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
504 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
505 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
506 writel(~0, base + CIO2_REG_INT_STS);
507
508 /* Enable devices, starting from the last device in the pipe */
509 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
510 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
511
512 return 0;
513 }
514
cio2_hw_exit(struct cio2_device * cio2,struct cio2_queue * q)515 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
516 {
517 void __iomem *base = cio2->base;
518 unsigned int i, maxloops = 1000;
519
520 /* Disable CSI receiver and MIPI backend devices */
521 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
522 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
523 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
524 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
525
526 /* Halt DMA */
527 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
528 do {
529 if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
530 CIO2_CDMAC0_DMA_HALTED)
531 break;
532 usleep_range(1000, 2000);
533 } while (--maxloops);
534 if (!maxloops)
535 dev_err(&cio2->pci_dev->dev,
536 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
537
538 for (i = 0; i < CIO2_NUM_PORTS; i++) {
539 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
540 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
541 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
542 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
543 }
544 }
545
cio2_buffer_done(struct cio2_device * cio2,unsigned int dma_chan)546 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
547 {
548 struct device *dev = &cio2->pci_dev->dev;
549 struct cio2_queue *q = cio2->cur_queue;
550 int buffers_found = 0;
551 u64 ns = ktime_get_ns();
552
553 if (dma_chan >= CIO2_QUEUES) {
554 dev_err(dev, "bad DMA channel %i\n", dma_chan);
555 return;
556 }
557
558 /* Find out which buffer(s) are ready */
559 do {
560 struct cio2_fbpt_entry *const entry =
561 &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
562 struct cio2_buffer *b;
563
564 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
565 break;
566
567 b = q->bufs[q->bufs_first];
568 if (b) {
569 unsigned int bytes = entry[1].second_entry.num_of_bytes;
570
571 q->bufs[q->bufs_first] = NULL;
572 atomic_dec(&q->bufs_queued);
573 dev_dbg(&cio2->pci_dev->dev,
574 "buffer %i done\n", b->vbb.vb2_buf.index);
575
576 b->vbb.vb2_buf.timestamp = ns;
577 b->vbb.field = V4L2_FIELD_NONE;
578 b->vbb.sequence = atomic_read(&q->frame_sequence);
579 if (b->vbb.vb2_buf.planes[0].length != bytes)
580 dev_warn(dev, "buffer length is %d received %d\n",
581 b->vbb.vb2_buf.planes[0].length,
582 bytes);
583 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
584 }
585 atomic_inc(&q->frame_sequence);
586 cio2_fbpt_entry_init_dummy(cio2, entry);
587 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
588 buffers_found++;
589 } while (1);
590
591 if (buffers_found == 0)
592 dev_warn(&cio2->pci_dev->dev,
593 "no ready buffers found on DMA channel %u\n",
594 dma_chan);
595 }
596
cio2_queue_event_sof(struct cio2_device * cio2,struct cio2_queue * q)597 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
598 {
599 /*
600 * For the user space camera control algorithms it is essential
601 * to know when the reception of a frame has begun. That's often
602 * the best timing information to get from the hardware.
603 */
604 struct v4l2_event event = {
605 .type = V4L2_EVENT_FRAME_SYNC,
606 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
607 };
608
609 v4l2_event_queue(q->subdev.devnode, &event);
610 }
611
612 static const char *const cio2_irq_errs[] = {
613 "single packet header error corrected",
614 "multiple packet header errors detected",
615 "payload checksum (CRC) error",
616 "fifo overflow",
617 "reserved short packet data type detected",
618 "reserved long packet data type detected",
619 "incomplete long packet detected",
620 "frame sync error",
621 "line sync error",
622 "DPHY start of transmission error",
623 "DPHY synchronization error",
624 "escape mode error",
625 "escape mode trigger event",
626 "escape mode ultra-low power state for data lane(s)",
627 "escape mode ultra-low power state exit for clock lane",
628 "inter-frame short packet discarded",
629 "inter-frame long packet discarded",
630 "non-matching Long Packet stalled",
631 };
632
633 static const char *const cio2_port_errs[] = {
634 "ECC recoverable",
635 "DPHY not recoverable",
636 "ECC not recoverable",
637 "CRC error",
638 "INTERFRAMEDATA",
639 "PKT2SHORT",
640 "PKT2LONG",
641 };
642
cio2_irq_handle_once(struct cio2_device * cio2,u32 int_status)643 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
644 {
645 void __iomem *const base = cio2->base;
646 struct device *dev = &cio2->pci_dev->dev;
647
648 if (int_status & CIO2_INT_IOOE) {
649 /*
650 * Interrupt on Output Error:
651 * 1) SRAM is full and FS received, or
652 * 2) An invalid bit detected by DMA.
653 */
654 u32 oe_status, oe_clear;
655
656 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
657 oe_status = oe_clear;
658
659 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
660 dev_err(dev, "DMA output error: 0x%x\n",
661 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
662 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
663 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
664 }
665 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
666 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
667 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
668 >> CIO2_INT_EXT_OE_OES_SHIFT);
669 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
670 }
671 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
672 if (oe_status)
673 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
674 oe_status);
675 int_status &= ~CIO2_INT_IOOE;
676 }
677
678 if (int_status & CIO2_INT_IOC_MASK) {
679 /* DMA IO done -- frame ready */
680 u32 clr = 0;
681 unsigned int d;
682
683 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
684 if (int_status & CIO2_INT_IOC(d)) {
685 clr |= CIO2_INT_IOC(d);
686 cio2_buffer_done(cio2, d);
687 }
688 int_status &= ~clr;
689 }
690
691 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
692 /* DMA IO starts or reached specified line */
693 u32 clr = 0;
694 unsigned int d;
695
696 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
697 if (int_status & CIO2_INT_IOS_IOLN(d)) {
698 clr |= CIO2_INT_IOS_IOLN(d);
699 if (d == CIO2_DMA_CHAN)
700 cio2_queue_event_sof(cio2,
701 cio2->cur_queue);
702 }
703 int_status &= ~clr;
704 }
705
706 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
707 /* CSI2 receiver (error) interrupt */
708 u32 ie_status, ie_clear;
709 unsigned int port;
710
711 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
712 ie_status = ie_clear;
713
714 for (port = 0; port < CIO2_NUM_PORTS; port++) {
715 u32 port_status = (ie_status >> (port * 8)) & 0xff;
716 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
717 void __iomem *const csi_rx_base =
718 base + CIO2_REG_PIPE_BASE(port);
719 unsigned int i;
720
721 while (port_status & err_mask) {
722 i = ffs(port_status) - 1;
723 dev_err(dev, "port %i error %s\n",
724 port, cio2_port_errs[i]);
725 ie_status &= ~BIT(port * 8 + i);
726 port_status &= ~BIT(i);
727 }
728
729 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
730 u32 csi2_status, csi2_clear;
731
732 csi2_status = readl(csi_rx_base +
733 CIO2_REG_IRQCTRL_STATUS);
734 csi2_clear = csi2_status;
735 err_mask =
736 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
737
738 while (csi2_status & err_mask) {
739 i = ffs(csi2_status) - 1;
740 dev_err(dev,
741 "CSI-2 receiver port %i: %s\n",
742 port, cio2_irq_errs[i]);
743 csi2_status &= ~BIT(i);
744 }
745
746 writel(csi2_clear,
747 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
748 if (csi2_status)
749 dev_warn(dev,
750 "unknown CSI2 error 0x%x on port %i\n",
751 csi2_status, port);
752
753 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
754 }
755 }
756
757 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
758 if (ie_status)
759 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
760 ie_status);
761
762 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
763 }
764
765 if (int_status)
766 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
767 }
768
cio2_irq(int irq,void * cio2_ptr)769 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
770 {
771 struct cio2_device *cio2 = cio2_ptr;
772 void __iomem *const base = cio2->base;
773 struct device *dev = &cio2->pci_dev->dev;
774 u32 int_status;
775
776 int_status = readl(base + CIO2_REG_INT_STS);
777 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
778 if (!int_status)
779 return IRQ_NONE;
780
781 do {
782 writel(int_status, base + CIO2_REG_INT_STS);
783 cio2_irq_handle_once(cio2, int_status);
784 int_status = readl(base + CIO2_REG_INT_STS);
785 if (int_status)
786 dev_dbg(dev, "pending status 0x%x\n", int_status);
787 } while (int_status);
788
789 return IRQ_HANDLED;
790 }
791
792 /**************** Videobuf2 interface ****************/
793
cio2_vb2_return_all_buffers(struct cio2_queue * q,enum vb2_buffer_state state)794 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
795 enum vb2_buffer_state state)
796 {
797 unsigned int i;
798
799 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
800 if (q->bufs[i]) {
801 atomic_dec(&q->bufs_queued);
802 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
803 state);
804 }
805 }
806 }
807
cio2_vb2_queue_setup(struct vb2_queue * vq,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],struct device * alloc_devs[])808 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
809 unsigned int *num_buffers,
810 unsigned int *num_planes,
811 unsigned int sizes[],
812 struct device *alloc_devs[])
813 {
814 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
815 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
816 unsigned int i;
817
818 *num_planes = q->format.num_planes;
819
820 for (i = 0; i < *num_planes; ++i) {
821 sizes[i] = q->format.plane_fmt[i].sizeimage;
822 alloc_devs[i] = &cio2->pci_dev->dev;
823 }
824
825 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
826
827 /* Initialize buffer queue */
828 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
829 q->bufs[i] = NULL;
830 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
831 }
832 atomic_set(&q->bufs_queued, 0);
833 q->bufs_first = 0;
834 q->bufs_next = 0;
835
836 return 0;
837 }
838
839 /* Called after each buffer is allocated */
cio2_vb2_buf_init(struct vb2_buffer * vb)840 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
841 {
842 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
843 struct device *dev = &cio2->pci_dev->dev;
844 struct cio2_buffer *b =
845 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
846 static const unsigned int entries_per_page =
847 CIO2_PAGE_SIZE / sizeof(u32);
848 unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
849 unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
850 struct sg_table *sg;
851 struct sg_page_iter sg_iter;
852 int i, j;
853
854 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
855 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
856 vb->planes[0].length);
857 return -ENOSPC; /* Should never happen */
858 }
859
860 memset(b->lop, 0, sizeof(b->lop));
861 /* Allocate LOP table */
862 for (i = 0; i < lops; i++) {
863 b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
864 &b->lop_bus_addr[i], GFP_KERNEL);
865 if (!b->lop[i])
866 goto fail;
867 }
868
869 /* Fill LOP */
870 sg = vb2_dma_sg_plane_desc(vb, 0);
871 if (!sg)
872 return -ENOMEM;
873
874 if (sg->nents && sg->sgl)
875 b->offset = sg->sgl->offset;
876
877 i = j = 0;
878 for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) {
879 if (!pages--)
880 break;
881 b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
882 j++;
883 if (j == entries_per_page) {
884 i++;
885 j = 0;
886 }
887 }
888
889 b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
890 return 0;
891 fail:
892 for (i--; i >= 0; i--)
893 dma_free_coherent(dev, CIO2_PAGE_SIZE,
894 b->lop[i], b->lop_bus_addr[i]);
895 return -ENOMEM;
896 }
897
898 /* Transfer buffer ownership to cio2 */
cio2_vb2_buf_queue(struct vb2_buffer * vb)899 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
900 {
901 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
902 struct cio2_queue *q =
903 container_of(vb->vb2_queue, struct cio2_queue, vbq);
904 struct cio2_buffer *b =
905 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
906 struct cio2_fbpt_entry *entry;
907 unsigned long flags;
908 unsigned int i, j, next = q->bufs_next;
909 int bufs_queued = atomic_inc_return(&q->bufs_queued);
910 u32 fbpt_rp;
911
912 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
913
914 /*
915 * This code queues the buffer to the CIO2 DMA engine, which starts
916 * running once streaming has started. It is possible that this code
917 * gets pre-empted due to increased CPU load. Upon this, the driver
918 * does not get an opportunity to queue new buffers to the CIO2 DMA
919 * engine. When the DMA engine encounters an FBPT entry without the
920 * VALID bit set, the DMA engine halts, which requires a restart of
921 * the DMA engine and sensor, to continue streaming.
922 * This is not desired and is highly unlikely given that there are
923 * 32 FBPT entries that the DMA engine needs to process, to run into
924 * an FBPT entry, without the VALID bit set. We try to mitigate this
925 * by disabling interrupts for the duration of this queueing.
926 */
927 local_irq_save(flags);
928
929 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
930 >> CIO2_CDMARI_FBPT_RP_SHIFT)
931 & CIO2_CDMARI_FBPT_RP_MASK;
932
933 /*
934 * fbpt_rp is the fbpt entry that the dma is currently working
935 * on, but since it could jump to next entry at any time,
936 * assume that we might already be there.
937 */
938 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
939
940 if (bufs_queued <= 1 || fbpt_rp == next)
941 /* Buffers were drained */
942 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
943
944 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
945 /*
946 * We have allocated CIO2_MAX_BUFFERS circularly for the
947 * hw, the user has requested N buffer queue. The driver
948 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
949 * user queues a buffer, there necessarily is a free buffer.
950 */
951 if (!q->bufs[next]) {
952 q->bufs[next] = b;
953 entry = &q->fbpt[next * CIO2_MAX_LOPS];
954 cio2_fbpt_entry_init_buf(cio2, b, entry);
955 local_irq_restore(flags);
956 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
957 for (j = 0; j < vb->num_planes; j++)
958 vb2_set_plane_payload(vb, j,
959 q->format.plane_fmt[j].sizeimage);
960 return;
961 }
962
963 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
964 next = (next + 1) % CIO2_MAX_BUFFERS;
965 }
966
967 local_irq_restore(flags);
968 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
969 atomic_dec(&q->bufs_queued);
970 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
971 }
972
973 /* Called when each buffer is freed */
cio2_vb2_buf_cleanup(struct vb2_buffer * vb)974 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
975 {
976 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
977 struct cio2_buffer *b =
978 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
979 unsigned int i;
980
981 /* Free LOP table */
982 for (i = 0; i < CIO2_MAX_LOPS; i++) {
983 if (b->lop[i])
984 dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
985 b->lop[i], b->lop_bus_addr[i]);
986 }
987 }
988
cio2_vb2_start_streaming(struct vb2_queue * vq,unsigned int count)989 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
990 {
991 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
992 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
993 int r;
994
995 cio2->cur_queue = q;
996 atomic_set(&q->frame_sequence, 0);
997
998 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
999 if (r < 0) {
1000 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
1001 pm_runtime_put_noidle(&cio2->pci_dev->dev);
1002 return r;
1003 }
1004
1005 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
1006 if (r)
1007 goto fail_pipeline;
1008
1009 r = cio2_hw_init(cio2, q);
1010 if (r)
1011 goto fail_hw;
1012
1013 /* Start streaming on sensor */
1014 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1015 if (r)
1016 goto fail_csi2_subdev;
1017
1018 cio2->streaming = true;
1019
1020 return 0;
1021
1022 fail_csi2_subdev:
1023 cio2_hw_exit(cio2, q);
1024 fail_hw:
1025 media_pipeline_stop(&q->vdev.entity);
1026 fail_pipeline:
1027 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
1028 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1029 pm_runtime_put(&cio2->pci_dev->dev);
1030
1031 return r;
1032 }
1033
cio2_vb2_stop_streaming(struct vb2_queue * vq)1034 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1035 {
1036 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1037 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1038
1039 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1040 dev_err(&cio2->pci_dev->dev,
1041 "failed to stop sensor streaming\n");
1042
1043 cio2_hw_exit(cio2, q);
1044 synchronize_irq(cio2->pci_dev->irq);
1045 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1046 media_pipeline_stop(&q->vdev.entity);
1047 pm_runtime_put(&cio2->pci_dev->dev);
1048 cio2->streaming = false;
1049 }
1050
1051 static const struct vb2_ops cio2_vb2_ops = {
1052 .buf_init = cio2_vb2_buf_init,
1053 .buf_queue = cio2_vb2_buf_queue,
1054 .buf_cleanup = cio2_vb2_buf_cleanup,
1055 .queue_setup = cio2_vb2_queue_setup,
1056 .start_streaming = cio2_vb2_start_streaming,
1057 .stop_streaming = cio2_vb2_stop_streaming,
1058 .wait_prepare = vb2_ops_wait_prepare,
1059 .wait_finish = vb2_ops_wait_finish,
1060 };
1061
1062 /**************** V4L2 interface ****************/
1063
cio2_v4l2_querycap(struct file * file,void * fh,struct v4l2_capability * cap)1064 static int cio2_v4l2_querycap(struct file *file, void *fh,
1065 struct v4l2_capability *cap)
1066 {
1067 struct cio2_device *cio2 = video_drvdata(file);
1068
1069 strlcpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1070 strlcpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1071 snprintf(cap->bus_info, sizeof(cap->bus_info),
1072 "PCI:%s", pci_name(cio2->pci_dev));
1073
1074 return 0;
1075 }
1076
cio2_v4l2_enum_fmt(struct file * file,void * fh,struct v4l2_fmtdesc * f)1077 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1078 struct v4l2_fmtdesc *f)
1079 {
1080 if (f->index >= ARRAY_SIZE(formats))
1081 return -EINVAL;
1082
1083 f->pixelformat = formats[f->index].fourcc;
1084
1085 return 0;
1086 }
1087
1088 /* The format is validated in cio2_video_link_validate() */
cio2_v4l2_g_fmt(struct file * file,void * fh,struct v4l2_format * f)1089 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1090 {
1091 struct cio2_queue *q = file_to_cio2_queue(file);
1092
1093 f->fmt.pix_mp = q->format;
1094
1095 return 0;
1096 }
1097
cio2_v4l2_try_fmt(struct file * file,void * fh,struct v4l2_format * f)1098 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1099 {
1100 const struct ipu3_cio2_fmt *fmt;
1101 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1102
1103 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1104 if (!fmt)
1105 fmt = &formats[0];
1106
1107 /* Only supports up to 4224x3136 */
1108 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1109 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1110 if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
1111 mpix->height = CIO2_IMAGE_MAX_LENGTH;
1112
1113 mpix->num_planes = 1;
1114 mpix->pixelformat = fmt->fourcc;
1115 mpix->colorspace = V4L2_COLORSPACE_RAW;
1116 mpix->field = V4L2_FIELD_NONE;
1117 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1118 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1119 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1120 mpix->height;
1121 memset(mpix->plane_fmt[0].reserved, 0,
1122 sizeof(mpix->plane_fmt[0].reserved));
1123
1124 /* use default */
1125 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1126 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1127 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1128
1129 return 0;
1130 }
1131
cio2_v4l2_s_fmt(struct file * file,void * fh,struct v4l2_format * f)1132 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1133 {
1134 struct cio2_queue *q = file_to_cio2_queue(file);
1135
1136 cio2_v4l2_try_fmt(file, fh, f);
1137 q->format = f->fmt.pix_mp;
1138
1139 return 0;
1140 }
1141
1142 static int
cio2_video_enum_input(struct file * file,void * fh,struct v4l2_input * input)1143 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1144 {
1145 if (input->index > 0)
1146 return -EINVAL;
1147
1148 strlcpy(input->name, "camera", sizeof(input->name));
1149 input->type = V4L2_INPUT_TYPE_CAMERA;
1150
1151 return 0;
1152 }
1153
1154 static int
cio2_video_g_input(struct file * file,void * fh,unsigned int * input)1155 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1156 {
1157 *input = 0;
1158
1159 return 0;
1160 }
1161
1162 static int
cio2_video_s_input(struct file * file,void * fh,unsigned int input)1163 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1164 {
1165 return input == 0 ? 0 : -EINVAL;
1166 }
1167
1168 static const struct v4l2_file_operations cio2_v4l2_fops = {
1169 .owner = THIS_MODULE,
1170 .unlocked_ioctl = video_ioctl2,
1171 .open = v4l2_fh_open,
1172 .release = vb2_fop_release,
1173 .poll = vb2_fop_poll,
1174 .mmap = vb2_fop_mmap,
1175 };
1176
1177 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1178 .vidioc_querycap = cio2_v4l2_querycap,
1179 .vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt,
1180 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1181 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1182 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1183 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1184 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1185 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1186 .vidioc_querybuf = vb2_ioctl_querybuf,
1187 .vidioc_qbuf = vb2_ioctl_qbuf,
1188 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1189 .vidioc_streamon = vb2_ioctl_streamon,
1190 .vidioc_streamoff = vb2_ioctl_streamoff,
1191 .vidioc_expbuf = vb2_ioctl_expbuf,
1192 .vidioc_enum_input = cio2_video_enum_input,
1193 .vidioc_g_input = cio2_video_g_input,
1194 .vidioc_s_input = cio2_video_s_input,
1195 };
1196
cio2_subdev_subscribe_event(struct v4l2_subdev * sd,struct v4l2_fh * fh,struct v4l2_event_subscription * sub)1197 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1198 struct v4l2_fh *fh,
1199 struct v4l2_event_subscription *sub)
1200 {
1201 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1202 return -EINVAL;
1203
1204 /* Line number. For now only zero accepted. */
1205 if (sub->id != 0)
1206 return -EINVAL;
1207
1208 return v4l2_event_subscribe(fh, sub, 0, NULL);
1209 }
1210
cio2_subdev_open(struct v4l2_subdev * sd,struct v4l2_subdev_fh * fh)1211 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1212 {
1213 struct v4l2_mbus_framefmt *format;
1214 const struct v4l2_mbus_framefmt fmt_default = {
1215 .width = 1936,
1216 .height = 1096,
1217 .code = formats[0].mbus_code,
1218 .field = V4L2_FIELD_NONE,
1219 .colorspace = V4L2_COLORSPACE_RAW,
1220 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1221 .quantization = V4L2_QUANTIZATION_DEFAULT,
1222 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1223 };
1224
1225 /* Initialize try_fmt */
1226 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1227 *format = fmt_default;
1228
1229 /* same as sink */
1230 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1231 *format = fmt_default;
1232
1233 return 0;
1234 }
1235
1236 /*
1237 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1238 * @sd : pointer to v4l2 subdev structure
1239 * @cfg: V4L2 subdev pad config
1240 * @fmt: pointer to v4l2 subdev format structure
1241 * return -EINVAL or zero on success
1242 */
cio2_subdev_get_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)1243 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1244 struct v4l2_subdev_pad_config *cfg,
1245 struct v4l2_subdev_format *fmt)
1246 {
1247 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1248 struct v4l2_subdev_format format;
1249 int ret;
1250
1251 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1252 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1253 return 0;
1254 }
1255
1256 if (fmt->pad == CIO2_PAD_SINK) {
1257 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
1258 ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
1259 &format);
1260
1261 if (ret)
1262 return ret;
1263 /* update colorspace etc */
1264 q->subdev_fmt.colorspace = format.format.colorspace;
1265 q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
1266 q->subdev_fmt.quantization = format.format.quantization;
1267 q->subdev_fmt.xfer_func = format.format.xfer_func;
1268 }
1269
1270 fmt->format = q->subdev_fmt;
1271
1272 return 0;
1273 }
1274
1275 /*
1276 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1277 * @sd : pointer to v4l2 subdev structure
1278 * @cfg: V4L2 subdev pad config
1279 * @fmt: pointer to v4l2 subdev format structure
1280 * return -EINVAL or zero on success
1281 */
cio2_subdev_set_fmt(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_format * fmt)1282 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1283 struct v4l2_subdev_pad_config *cfg,
1284 struct v4l2_subdev_format *fmt)
1285 {
1286 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1287
1288 /*
1289 * Only allow setting sink pad format;
1290 * source always propagates from sink
1291 */
1292 if (fmt->pad == CIO2_PAD_SOURCE)
1293 return cio2_subdev_get_fmt(sd, cfg, fmt);
1294
1295 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
1296 *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
1297 } else {
1298 /* It's the sink, allow changing frame size */
1299 q->subdev_fmt.width = fmt->format.width;
1300 q->subdev_fmt.height = fmt->format.height;
1301 q->subdev_fmt.code = fmt->format.code;
1302 fmt->format = q->subdev_fmt;
1303 }
1304
1305 return 0;
1306 }
1307
cio2_subdev_enum_mbus_code(struct v4l2_subdev * sd,struct v4l2_subdev_pad_config * cfg,struct v4l2_subdev_mbus_code_enum * code)1308 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1309 struct v4l2_subdev_pad_config *cfg,
1310 struct v4l2_subdev_mbus_code_enum *code)
1311 {
1312 if (code->index >= ARRAY_SIZE(formats))
1313 return -EINVAL;
1314
1315 code->code = formats[code->index].mbus_code;
1316 return 0;
1317 }
1318
cio2_subdev_link_validate_get_format(struct media_pad * pad,struct v4l2_subdev_format * fmt)1319 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1320 struct v4l2_subdev_format *fmt)
1321 {
1322 if (is_media_entity_v4l2_subdev(pad->entity)) {
1323 struct v4l2_subdev *sd =
1324 media_entity_to_v4l2_subdev(pad->entity);
1325
1326 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1327 fmt->pad = pad->index;
1328 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1329 }
1330
1331 return -EINVAL;
1332 }
1333
cio2_video_link_validate(struct media_link * link)1334 static int cio2_video_link_validate(struct media_link *link)
1335 {
1336 struct video_device *vd = container_of(link->sink->entity,
1337 struct video_device, entity);
1338 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1339 struct cio2_device *cio2 = video_get_drvdata(vd);
1340 struct v4l2_subdev_format source_fmt;
1341 int ret;
1342
1343 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1344 dev_info(&cio2->pci_dev->dev,
1345 "video node %s pad not connected\n", vd->name);
1346 return -ENOTCONN;
1347 }
1348
1349 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1350 if (ret < 0)
1351 return 0;
1352
1353 if (source_fmt.format.width != q->format.width ||
1354 source_fmt.format.height != q->format.height) {
1355 dev_err(&cio2->pci_dev->dev,
1356 "Wrong width or height %ux%u (%ux%u expected)\n",
1357 q->format.width, q->format.height,
1358 source_fmt.format.width, source_fmt.format.height);
1359 return -EINVAL;
1360 }
1361
1362 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1363 return -EINVAL;
1364
1365 return 0;
1366 }
1367
1368 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1369 .subscribe_event = cio2_subdev_subscribe_event,
1370 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1371 };
1372
1373 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1374 .open = cio2_subdev_open,
1375 };
1376
1377 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1378 .link_validate = v4l2_subdev_link_validate_default,
1379 .get_fmt = cio2_subdev_get_fmt,
1380 .set_fmt = cio2_subdev_set_fmt,
1381 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1382 };
1383
1384 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1385 .core = &cio2_subdev_core_ops,
1386 .pad = &cio2_subdev_pad_ops,
1387 };
1388
1389 /******* V4L2 sub-device asynchronous registration callbacks***********/
1390
1391 struct sensor_async_subdev {
1392 struct v4l2_async_subdev asd;
1393 struct csi2_bus_info csi2;
1394 };
1395
1396 /* The .bound() notifier callback when a match is found */
cio2_notifier_bound(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1397 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1398 struct v4l2_subdev *sd,
1399 struct v4l2_async_subdev *asd)
1400 {
1401 struct cio2_device *cio2 = container_of(notifier,
1402 struct cio2_device, notifier);
1403 struct sensor_async_subdev *s_asd = container_of(asd,
1404 struct sensor_async_subdev, asd);
1405 struct cio2_queue *q;
1406
1407 if (cio2->queue[s_asd->csi2.port].sensor)
1408 return -EBUSY;
1409
1410 q = &cio2->queue[s_asd->csi2.port];
1411
1412 q->csi2 = s_asd->csi2;
1413 q->sensor = sd;
1414 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1415
1416 return 0;
1417 }
1418
1419 /* The .unbind callback */
cio2_notifier_unbind(struct v4l2_async_notifier * notifier,struct v4l2_subdev * sd,struct v4l2_async_subdev * asd)1420 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1421 struct v4l2_subdev *sd,
1422 struct v4l2_async_subdev *asd)
1423 {
1424 struct cio2_device *cio2 = container_of(notifier,
1425 struct cio2_device, notifier);
1426 struct sensor_async_subdev *s_asd = container_of(asd,
1427 struct sensor_async_subdev, asd);
1428
1429 cio2->queue[s_asd->csi2.port].sensor = NULL;
1430 }
1431
1432 /* .complete() is called after all subdevices have been located */
cio2_notifier_complete(struct v4l2_async_notifier * notifier)1433 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1434 {
1435 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1436 notifier);
1437 struct sensor_async_subdev *s_asd;
1438 struct cio2_queue *q;
1439 unsigned int i, pad;
1440 int ret;
1441
1442 for (i = 0; i < notifier->num_subdevs; i++) {
1443 s_asd = container_of(cio2->notifier.subdevs[i],
1444 struct sensor_async_subdev, asd);
1445 q = &cio2->queue[s_asd->csi2.port];
1446
1447 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1448 if (q->sensor->entity.pads[pad].flags &
1449 MEDIA_PAD_FL_SOURCE)
1450 break;
1451
1452 if (pad == q->sensor->entity.num_pads) {
1453 dev_err(&cio2->pci_dev->dev,
1454 "failed to find src pad for %s\n",
1455 q->sensor->name);
1456 return -ENXIO;
1457 }
1458
1459 ret = media_create_pad_link(
1460 &q->sensor->entity, pad,
1461 &q->subdev.entity, CIO2_PAD_SINK,
1462 0);
1463 if (ret) {
1464 dev_err(&cio2->pci_dev->dev,
1465 "failed to create link for %s\n",
1466 cio2->queue[i].sensor->name);
1467 return ret;
1468 }
1469 }
1470
1471 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1472 }
1473
1474 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1475 .bound = cio2_notifier_bound,
1476 .unbind = cio2_notifier_unbind,
1477 .complete = cio2_notifier_complete,
1478 };
1479
cio2_fwnode_parse(struct device * dev,struct v4l2_fwnode_endpoint * vep,struct v4l2_async_subdev * asd)1480 static int cio2_fwnode_parse(struct device *dev,
1481 struct v4l2_fwnode_endpoint *vep,
1482 struct v4l2_async_subdev *asd)
1483 {
1484 struct sensor_async_subdev *s_asd =
1485 container_of(asd, struct sensor_async_subdev, asd);
1486
1487 if (vep->bus_type != V4L2_MBUS_CSI2) {
1488 dev_err(dev, "Only CSI2 bus type is currently supported\n");
1489 return -EINVAL;
1490 }
1491
1492 s_asd->csi2.port = vep->base.port;
1493 s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes;
1494
1495 return 0;
1496 }
1497
cio2_notifier_init(struct cio2_device * cio2)1498 static int cio2_notifier_init(struct cio2_device *cio2)
1499 {
1500 int ret;
1501
1502 ret = v4l2_async_notifier_parse_fwnode_endpoints(
1503 &cio2->pci_dev->dev, &cio2->notifier,
1504 sizeof(struct sensor_async_subdev),
1505 cio2_fwnode_parse);
1506 if (ret < 0)
1507 return ret;
1508
1509 if (!cio2->notifier.num_subdevs)
1510 return -ENODEV; /* no endpoint */
1511
1512 cio2->notifier.ops = &cio2_async_ops;
1513 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
1514 if (ret) {
1515 dev_err(&cio2->pci_dev->dev,
1516 "failed to register async notifier : %d\n", ret);
1517 v4l2_async_notifier_cleanup(&cio2->notifier);
1518 }
1519
1520 return ret;
1521 }
1522
cio2_notifier_exit(struct cio2_device * cio2)1523 static void cio2_notifier_exit(struct cio2_device *cio2)
1524 {
1525 v4l2_async_notifier_unregister(&cio2->notifier);
1526 v4l2_async_notifier_cleanup(&cio2->notifier);
1527 }
1528
1529 /**************** Queue initialization ****************/
1530 static const struct media_entity_operations cio2_media_ops = {
1531 .link_validate = v4l2_subdev_link_validate,
1532 };
1533
1534 static const struct media_entity_operations cio2_video_entity_ops = {
1535 .link_validate = cio2_video_link_validate,
1536 };
1537
cio2_queue_init(struct cio2_device * cio2,struct cio2_queue * q)1538 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1539 {
1540 static const u32 default_width = 1936;
1541 static const u32 default_height = 1096;
1542 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1543
1544 struct video_device *vdev = &q->vdev;
1545 struct vb2_queue *vbq = &q->vbq;
1546 struct v4l2_subdev *subdev = &q->subdev;
1547 struct v4l2_mbus_framefmt *fmt;
1548 int r;
1549
1550 /* Initialize miscellaneous variables */
1551 mutex_init(&q->lock);
1552
1553 /* Initialize formats to default values */
1554 fmt = &q->subdev_fmt;
1555 fmt->width = default_width;
1556 fmt->height = default_height;
1557 fmt->code = dflt_fmt.mbus_code;
1558 fmt->field = V4L2_FIELD_NONE;
1559
1560 q->format.width = default_width;
1561 q->format.height = default_height;
1562 q->format.pixelformat = dflt_fmt.fourcc;
1563 q->format.colorspace = V4L2_COLORSPACE_RAW;
1564 q->format.field = V4L2_FIELD_NONE;
1565 q->format.num_planes = 1;
1566 q->format.plane_fmt[0].bytesperline =
1567 cio2_bytesperline(q->format.width);
1568 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1569 q->format.height;
1570
1571 /* Initialize fbpt */
1572 r = cio2_fbpt_init(cio2, q);
1573 if (r)
1574 goto fail_fbpt;
1575
1576 /* Initialize media entities */
1577 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1578 MEDIA_PAD_FL_MUST_CONNECT;
1579 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1580 subdev->entity.ops = &cio2_media_ops;
1581 subdev->internal_ops = &cio2_subdev_internal_ops;
1582 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1583 if (r) {
1584 dev_err(&cio2->pci_dev->dev,
1585 "failed initialize subdev media entity (%d)\n", r);
1586 goto fail_subdev_media_entity;
1587 }
1588
1589 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1590 vdev->entity.ops = &cio2_video_entity_ops;
1591 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1592 if (r) {
1593 dev_err(&cio2->pci_dev->dev,
1594 "failed initialize videodev media entity (%d)\n", r);
1595 goto fail_vdev_media_entity;
1596 }
1597
1598 /* Initialize subdev */
1599 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1600 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1601 subdev->owner = THIS_MODULE;
1602 snprintf(subdev->name, sizeof(subdev->name),
1603 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1604 v4l2_set_subdevdata(subdev, cio2);
1605 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1606 if (r) {
1607 dev_err(&cio2->pci_dev->dev,
1608 "failed initialize subdev (%d)\n", r);
1609 goto fail_subdev;
1610 }
1611
1612 /* Initialize vbq */
1613 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1614 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1615 vbq->ops = &cio2_vb2_ops;
1616 vbq->mem_ops = &vb2_dma_sg_memops;
1617 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1618 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1619 vbq->min_buffers_needed = 1;
1620 vbq->drv_priv = cio2;
1621 vbq->lock = &q->lock;
1622 r = vb2_queue_init(vbq);
1623 if (r) {
1624 dev_err(&cio2->pci_dev->dev,
1625 "failed to initialize videobuf2 queue (%d)\n", r);
1626 goto fail_vbq;
1627 }
1628
1629 /* Initialize vdev */
1630 snprintf(vdev->name, sizeof(vdev->name),
1631 "%s %td", CIO2_NAME, q - cio2->queue);
1632 vdev->release = video_device_release_empty;
1633 vdev->fops = &cio2_v4l2_fops;
1634 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1635 vdev->lock = &cio2->lock;
1636 vdev->v4l2_dev = &cio2->v4l2_dev;
1637 vdev->queue = &q->vbq;
1638 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1639 video_set_drvdata(vdev, cio2);
1640 r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
1641 if (r) {
1642 dev_err(&cio2->pci_dev->dev,
1643 "failed to register video device (%d)\n", r);
1644 goto fail_vdev;
1645 }
1646
1647 /* Create link from CIO2 subdev to output node */
1648 r = media_create_pad_link(
1649 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1650 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1651 if (r)
1652 goto fail_link;
1653
1654 return 0;
1655
1656 fail_link:
1657 video_unregister_device(&q->vdev);
1658 fail_vdev:
1659 vb2_queue_release(vbq);
1660 fail_vbq:
1661 v4l2_device_unregister_subdev(subdev);
1662 fail_subdev:
1663 media_entity_cleanup(&vdev->entity);
1664 fail_vdev_media_entity:
1665 media_entity_cleanup(&subdev->entity);
1666 fail_subdev_media_entity:
1667 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1668 fail_fbpt:
1669 mutex_destroy(&q->lock);
1670
1671 return r;
1672 }
1673
cio2_queue_exit(struct cio2_device * cio2,struct cio2_queue * q)1674 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1675 {
1676 video_unregister_device(&q->vdev);
1677 media_entity_cleanup(&q->vdev.entity);
1678 vb2_queue_release(&q->vbq);
1679 v4l2_device_unregister_subdev(&q->subdev);
1680 media_entity_cleanup(&q->subdev.entity);
1681 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1682 mutex_destroy(&q->lock);
1683 }
1684
cio2_queues_init(struct cio2_device * cio2)1685 static int cio2_queues_init(struct cio2_device *cio2)
1686 {
1687 int i, r;
1688
1689 for (i = 0; i < CIO2_QUEUES; i++) {
1690 r = cio2_queue_init(cio2, &cio2->queue[i]);
1691 if (r)
1692 break;
1693 }
1694
1695 if (i == CIO2_QUEUES)
1696 return 0;
1697
1698 for (i--; i >= 0; i--)
1699 cio2_queue_exit(cio2, &cio2->queue[i]);
1700
1701 return r;
1702 }
1703
cio2_queues_exit(struct cio2_device * cio2)1704 static void cio2_queues_exit(struct cio2_device *cio2)
1705 {
1706 unsigned int i;
1707
1708 for (i = 0; i < CIO2_QUEUES; i++)
1709 cio2_queue_exit(cio2, &cio2->queue[i]);
1710 }
1711
1712 /**************** PCI interface ****************/
1713
cio2_pci_config_setup(struct pci_dev * dev)1714 static int cio2_pci_config_setup(struct pci_dev *dev)
1715 {
1716 u16 pci_command;
1717 int r = pci_enable_msi(dev);
1718
1719 if (r) {
1720 dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
1721 return r;
1722 }
1723
1724 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1725 pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
1726 PCI_COMMAND_INTX_DISABLE;
1727 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1728
1729 return 0;
1730 }
1731
cio2_pci_probe(struct pci_dev * pci_dev,const struct pci_device_id * id)1732 static int cio2_pci_probe(struct pci_dev *pci_dev,
1733 const struct pci_device_id *id)
1734 {
1735 struct cio2_device *cio2;
1736 void __iomem *const *iomap;
1737 int r;
1738
1739 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1740 if (!cio2)
1741 return -ENOMEM;
1742 cio2->pci_dev = pci_dev;
1743
1744 r = pcim_enable_device(pci_dev);
1745 if (r) {
1746 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1747 return r;
1748 }
1749
1750 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1751 pci_dev->device, pci_dev->revision);
1752
1753 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1754 if (r) {
1755 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1756 return -ENODEV;
1757 }
1758
1759 iomap = pcim_iomap_table(pci_dev);
1760 if (!iomap) {
1761 dev_err(&pci_dev->dev, "failed to iomap table\n");
1762 return -ENODEV;
1763 }
1764
1765 cio2->base = iomap[CIO2_PCI_BAR];
1766
1767 pci_set_drvdata(pci_dev, cio2);
1768
1769 pci_set_master(pci_dev);
1770
1771 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1772 if (r) {
1773 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1774 return -ENODEV;
1775 }
1776
1777 r = cio2_pci_config_setup(pci_dev);
1778 if (r)
1779 return -ENODEV;
1780
1781 r = cio2_fbpt_init_dummy(cio2);
1782 if (r)
1783 return r;
1784
1785 mutex_init(&cio2->lock);
1786
1787 cio2->media_dev.dev = &cio2->pci_dev->dev;
1788 strlcpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1789 sizeof(cio2->media_dev.model));
1790 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1791 "PCI:%s", pci_name(cio2->pci_dev));
1792 cio2->media_dev.hw_revision = 0;
1793
1794 media_device_init(&cio2->media_dev);
1795 r = media_device_register(&cio2->media_dev);
1796 if (r < 0)
1797 goto fail_mutex_destroy;
1798
1799 cio2->v4l2_dev.mdev = &cio2->media_dev;
1800 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1801 if (r) {
1802 dev_err(&pci_dev->dev,
1803 "failed to register V4L2 device (%d)\n", r);
1804 goto fail_media_device_unregister;
1805 }
1806
1807 r = cio2_queues_init(cio2);
1808 if (r)
1809 goto fail_v4l2_device_unregister;
1810
1811 /* Register notifier for subdevices we care */
1812 r = cio2_notifier_init(cio2);
1813 if (r)
1814 goto fail_cio2_queue_exit;
1815
1816 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1817 IRQF_SHARED, CIO2_NAME, cio2);
1818 if (r) {
1819 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
1820 goto fail;
1821 }
1822
1823 pm_runtime_put_noidle(&pci_dev->dev);
1824 pm_runtime_allow(&pci_dev->dev);
1825
1826 return 0;
1827
1828 fail:
1829 cio2_notifier_exit(cio2);
1830 fail_cio2_queue_exit:
1831 cio2_queues_exit(cio2);
1832 fail_v4l2_device_unregister:
1833 v4l2_device_unregister(&cio2->v4l2_dev);
1834 fail_media_device_unregister:
1835 media_device_unregister(&cio2->media_dev);
1836 media_device_cleanup(&cio2->media_dev);
1837 fail_mutex_destroy:
1838 mutex_destroy(&cio2->lock);
1839 cio2_fbpt_exit_dummy(cio2);
1840
1841 return r;
1842 }
1843
cio2_pci_remove(struct pci_dev * pci_dev)1844 static void cio2_pci_remove(struct pci_dev *pci_dev)
1845 {
1846 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1847 unsigned int i;
1848
1849 cio2_notifier_exit(cio2);
1850 cio2_fbpt_exit_dummy(cio2);
1851 for (i = 0; i < CIO2_QUEUES; i++)
1852 cio2_queue_exit(cio2, &cio2->queue[i]);
1853 v4l2_device_unregister(&cio2->v4l2_dev);
1854 media_device_unregister(&cio2->media_dev);
1855 media_device_cleanup(&cio2->media_dev);
1856 mutex_destroy(&cio2->lock);
1857 }
1858
cio2_runtime_suspend(struct device * dev)1859 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1860 {
1861 struct pci_dev *pci_dev = to_pci_dev(dev);
1862 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1863 void __iomem *const base = cio2->base;
1864 u16 pm;
1865
1866 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1867 dev_dbg(dev, "cio2 runtime suspend.\n");
1868
1869 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1870 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1871 pm |= CIO2_PMCSR_D3;
1872 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1873
1874 return 0;
1875 }
1876
cio2_runtime_resume(struct device * dev)1877 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1878 {
1879 struct pci_dev *pci_dev = to_pci_dev(dev);
1880 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1881 void __iomem *const base = cio2->base;
1882 u16 pm;
1883
1884 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1885 dev_dbg(dev, "cio2 runtime resume.\n");
1886
1887 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1888 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1889 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1890
1891 return 0;
1892 }
1893
1894 /*
1895 * Helper function to advance all the elements of a circular buffer by "start"
1896 * positions
1897 */
arrange(void * ptr,size_t elem_size,size_t elems,size_t start)1898 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1899 {
1900 struct {
1901 size_t begin, end;
1902 } arr[2] = {
1903 { 0, start - 1 },
1904 { start, elems - 1 },
1905 };
1906
1907 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1908
1909 /* Loop as long as we have out-of-place entries */
1910 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1911 size_t size0, i;
1912
1913 /*
1914 * Find the number of entries that can be arranged on this
1915 * iteration.
1916 */
1917 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1918
1919 /* Swap the entries in two parts of the array. */
1920 for (i = 0; i < size0; i++) {
1921 u8 *d = ptr + elem_size * (arr[1].begin + i);
1922 u8 *s = ptr + elem_size * (arr[0].begin + i);
1923 size_t j;
1924
1925 for (j = 0; j < elem_size; j++)
1926 swap(d[j], s[j]);
1927 }
1928
1929 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1930 /* The end of the first array remains unarranged. */
1931 arr[0].begin += size0;
1932 } else {
1933 /*
1934 * The first array is fully arranged so we proceed
1935 * handling the next one.
1936 */
1937 arr[0].begin = arr[1].begin;
1938 arr[0].end = arr[1].begin + size0 - 1;
1939 arr[1].begin += size0;
1940 }
1941 }
1942 }
1943
cio2_fbpt_rearrange(struct cio2_device * cio2,struct cio2_queue * q)1944 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1945 {
1946 unsigned int i, j;
1947
1948 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1949 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1950 if (q->bufs[j])
1951 break;
1952
1953 if (i == CIO2_MAX_BUFFERS)
1954 return;
1955
1956 if (j) {
1957 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1958 CIO2_MAX_BUFFERS, j);
1959 arrange(q->bufs, sizeof(struct cio2_buffer *),
1960 CIO2_MAX_BUFFERS, j);
1961 }
1962
1963 /*
1964 * DMA clears the valid bit when accessing the buffer.
1965 * When stopping stream in suspend callback, some of the buffers
1966 * may be in invalid state. After resume, when DMA meets the invalid
1967 * buffer, it will halt and stop receiving new data.
1968 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1969 */
1970 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1971 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1972 }
1973
cio2_suspend(struct device * dev)1974 static int __maybe_unused cio2_suspend(struct device *dev)
1975 {
1976 struct pci_dev *pci_dev = to_pci_dev(dev);
1977 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1978 struct cio2_queue *q = cio2->cur_queue;
1979
1980 dev_dbg(dev, "cio2 suspend\n");
1981 if (!cio2->streaming)
1982 return 0;
1983
1984 /* Stop stream */
1985 cio2_hw_exit(cio2, q);
1986 synchronize_irq(pci_dev->irq);
1987
1988 pm_runtime_force_suspend(dev);
1989
1990 /*
1991 * Upon resume, hw starts to process the fbpt entries from beginning,
1992 * so relocate the queued buffs to the fbpt head before suspend.
1993 */
1994 cio2_fbpt_rearrange(cio2, q);
1995 q->bufs_first = 0;
1996 q->bufs_next = 0;
1997
1998 return 0;
1999 }
2000
cio2_resume(struct device * dev)2001 static int __maybe_unused cio2_resume(struct device *dev)
2002 {
2003 struct pci_dev *pci_dev = to_pci_dev(dev);
2004 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
2005 int r = 0;
2006 struct cio2_queue *q = cio2->cur_queue;
2007
2008 dev_dbg(dev, "cio2 resume\n");
2009 if (!cio2->streaming)
2010 return 0;
2011 /* Start stream */
2012 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
2013 if (r < 0) {
2014 dev_err(&cio2->pci_dev->dev,
2015 "failed to set power %d\n", r);
2016 return r;
2017 }
2018
2019 r = cio2_hw_init(cio2, q);
2020 if (r)
2021 dev_err(dev, "fail to init cio2 hw\n");
2022
2023 return r;
2024 }
2025
2026 static const struct dev_pm_ops cio2_pm_ops = {
2027 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2028 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2029 };
2030
2031 static const struct pci_device_id cio2_pci_id_table[] = {
2032 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2033 { 0 }
2034 };
2035
2036 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2037
2038 static struct pci_driver cio2_pci_driver = {
2039 .name = CIO2_NAME,
2040 .id_table = cio2_pci_id_table,
2041 .probe = cio2_pci_probe,
2042 .remove = cio2_pci_remove,
2043 .driver = {
2044 .pm = &cio2_pm_ops,
2045 },
2046 };
2047
2048 module_pci_driver(cio2_pci_driver);
2049
2050 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2051 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2052 MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
2053 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2054 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2055 MODULE_LICENSE("GPL v2");
2056 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2057