]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/media/pci/intel/ipu3/ipu3-cio2.c
media: ipu3-cio2: Use macros from mm.h
[mirror_ubuntu-hirsute-kernel.git] / drivers / media / pci / intel / ipu3 / ipu3-cio2.c
CommitLineData
b39082e2 1// SPDX-License-Identifier: GPL-2.0
c2a6a07a 2/*
30f573a8 3 * Copyright (C) 2017,2020 Intel Corporation
c2a6a07a
YZ
4 *
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
11 * et al.
c2a6a07a
YZ
12 */
13
14#include <linux/delay.h>
15#include <linux/interrupt.h>
44677b03 16#include <linux/iopoll.h>
a25ba102 17#include <linux/mm.h>
c2a6a07a
YZ
18#include <linux/module.h>
19#include <linux/pci.h>
9a262c7a 20#include <linux/pfn.h>
c2a6a07a
YZ
21#include <linux/pm_runtime.h>
22#include <linux/property.h>
23#include <linux/vmalloc.h>
24#include <media/v4l2-ctrls.h>
25#include <media/v4l2-device.h>
26#include <media/v4l2-event.h>
27#include <media/v4l2-fwnode.h>
28#include <media/v4l2-ioctl.h>
29#include <media/videobuf2-dma-sg.h>
30
31#include "ipu3-cio2.h"
32
33struct ipu3_cio2_fmt {
34 u32 mbus_code;
35 u32 fourcc;
36 u8 mipicode;
4b6c129e 37 u8 bpp;
c2a6a07a
YZ
38};
39
40/*
41 * These are raw formats used in Intel's third generation of
42 * Image Processing Unit known as IPU3.
43 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
44 * last LSB 6 bits unused.
45 */
46static const struct ipu3_cio2_fmt formats[] = {
47 { /* put default entry at beginning */
48 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
49 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
50 .mipicode = 0x2b,
4b6c129e 51 .bpp = 10,
c2a6a07a
YZ
52 }, {
53 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
54 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
55 .mipicode = 0x2b,
4b6c129e 56 .bpp = 10,
c2a6a07a
YZ
57 }, {
58 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
59 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
60 .mipicode = 0x2b,
4b6c129e 61 .bpp = 10,
c2a6a07a
YZ
62 }, {
63 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
64 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
65 .mipicode = 0x2b,
4b6c129e 66 .bpp = 10,
c2a6a07a
YZ
67 },
68};
69
70/*
71 * cio2_find_format - lookup color format by fourcc or/and media bus code
72 * @pixelformat: fourcc to match, ignored if null
73 * @mbus_code: media bus code to match, ignored if null
74 */
75static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
76 const u32 *mbus_code)
77{
78 unsigned int i;
79
80 for (i = 0; i < ARRAY_SIZE(formats); i++) {
81 if (pixelformat && *pixelformat != formats[i].fourcc)
82 continue;
83 if (mbus_code && *mbus_code != formats[i].mbus_code)
84 continue;
85
86 return &formats[i];
87 }
88
89 return NULL;
90}
91
92static inline u32 cio2_bytesperline(const unsigned int width)
93{
94 /*
95 * 64 bytes for every 50 pixels, the line length
96 * in bytes is multiple of 64 (line end alignment).
97 */
98 return DIV_ROUND_UP(width, 50) * 64;
99}
100
101/**************** FBPT operations ****************/
102
103static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
104{
105 if (cio2->dummy_lop) {
e186f932 106 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
c2a6a07a
YZ
107 cio2->dummy_lop, cio2->dummy_lop_bus_addr);
108 cio2->dummy_lop = NULL;
109 }
110 if (cio2->dummy_page) {
e186f932 111 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
c2a6a07a
YZ
112 cio2->dummy_page, cio2->dummy_page_bus_addr);
113 cio2->dummy_page = NULL;
114 }
115}
116
117static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
118{
119 unsigned int i;
120
e186f932 121 cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
c2a6a07a
YZ
122 &cio2->dummy_page_bus_addr,
123 GFP_KERNEL);
e186f932 124 cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
c2a6a07a
YZ
125 &cio2->dummy_lop_bus_addr,
126 GFP_KERNEL);
127 if (!cio2->dummy_page || !cio2->dummy_lop) {
128 cio2_fbpt_exit_dummy(cio2);
129 return -ENOMEM;
130 }
131 /*
132 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
133 * Initialize each entry to dummy_page bus base address.
134 */
7b285f41 135 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
9a262c7a 136 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
c2a6a07a
YZ
137
138 return 0;
139}
140
141static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
142 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
143{
144 /*
145 * The CPU first initializes some fields in fbpt, then sets
146 * the VALID bit, this barrier is to ensure that the DMA(device)
147 * does not see the VALID bit enabled before other fields are
148 * initialized; otherwise it could lead to havoc.
149 */
150 dma_wmb();
151
152 /*
153 * Request interrupts for start and completion
154 * Valid bit is applicable only to 1st entry
155 */
156 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
157 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
158}
159
160/* Initialize fpbt entries to point to dummy frame */
161static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
162 struct cio2_fbpt_entry
163 entry[CIO2_MAX_LOPS])
164{
165 unsigned int i;
166
167 entry[0].first_entry.first_page_offset = 0;
7b285f41 168 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
e186f932 169 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
c2a6a07a
YZ
170
171 for (i = 0; i < CIO2_MAX_LOPS; i++)
9a262c7a 172 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
c2a6a07a
YZ
173
174 cio2_fbpt_entry_enable(cio2, entry);
175}
176
177/* Initialize fpbt entries to point to a given buffer */
178static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
179 struct cio2_buffer *b,
180 struct cio2_fbpt_entry
181 entry[CIO2_MAX_LOPS])
182{
183 struct vb2_buffer *vb = &b->vbb.vb2_buf;
184 unsigned int length = vb->planes[0].length;
185 int remaining, i;
186
187 entry[0].first_entry.first_page_offset = b->offset;
188 remaining = length + entry[0].first_entry.first_page_offset;
9a262c7a 189 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
c2a6a07a
YZ
190 /*
191 * last_page_available_bytes has the offset of the last byte in the
192 * last page which is still accessible by DMA. DMA cannot access
193 * beyond this point. Valid range for this is from 0 to 4095.
194 * 0 indicates 1st byte in the page is DMA accessible.
e186f932 195 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
c2a6a07a
YZ
196 * is available for DMA transfer.
197 */
a25ba102
AS
198 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
199 entry[1].second_entry.last_page_available_bytes = remaining - 1;
c2a6a07a
YZ
200 /* Fill FBPT */
201 remaining = length;
202 i = 0;
203 while (remaining > 0) {
9a262c7a 204 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
e186f932 205 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
c2a6a07a
YZ
206 entry++;
207 i++;
208 }
209
210 /*
211 * The first not meaningful FBPT entry should point to a valid LOP
212 */
9a262c7a 213 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
c2a6a07a
YZ
214
215 cio2_fbpt_entry_enable(cio2, entry);
216}
217
218static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
219{
220 struct device *dev = &cio2->pci_dev->dev;
221
750afb08
LC
222 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
223 GFP_KERNEL);
c2a6a07a
YZ
224 if (!q->fbpt)
225 return -ENOMEM;
226
c2a6a07a
YZ
227 return 0;
228}
229
230static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
231{
232 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
233}
234
235/**************** CSI2 hardware setup ****************/
236
237/*
238 * The CSI2 receiver has several parameters affecting
239 * the receiver timings. These depend on the MIPI bus frequency
240 * F in Hz (sensor transmitter rate) as follows:
241 * register value = (A/1e9 + B * UI) / COUNT_ACC
242 * where
243 * UI = 1 / (2 * F) in seconds
244 * COUNT_ACC = counter accuracy in seconds
245 * For IPU3 COUNT_ACC = 0.0625
246 *
247 * A and B are coefficients from the table below,
248 * depending whether the register minimum or maximum value is
249 * calculated.
250 * Minimum Maximum
251 * Clock lane A B A B
252 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
253 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
254 * Data lanes
255 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
256 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
257 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
258 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
259 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
260 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
261 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
262 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
263 *
264 * We use the minimum values of both A and B.
265 */
266
267/*
16790554 268 * shift for keeping value range suitable for 32-bit integer arithmetic
c2a6a07a
YZ
269 */
270#define LIMIT_SHIFT 8
271
272static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
273{
274 const u32 accinv = 16; /* invert of counter resolution */
275 const u32 uiinv = 500000000; /* 1e9 / 2 */
276 s32 r;
277
278 freq >>= LIMIT_SHIFT;
279
280 if (WARN_ON(freq <= 0 || freq > S32_MAX))
281 return def;
282 /*
283 * b could be 0, -2 or -8, so |accinv * b| is always
284 * less than (1 << ds) and thus |r| < 500000000.
285 */
286 r = accinv * b * (uiinv >> LIMIT_SHIFT);
287 r = r / (s32)freq;
288 /* max value of a is 95 */
289 r += accinv * a;
290
291 return r;
292};
293
294/* Calculate the the delay value for termination enable of clock lane HS Rx */
295static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
4b6c129e
SA
296 struct cio2_csi2_timing *timing,
297 unsigned int bpp, unsigned int lanes)
c2a6a07a
YZ
298{
299 struct device *dev = &cio2->pci_dev->dev;
c2a6a07a 300 s64 freq;
c2a6a07a
YZ
301
302 if (!q->sensor)
303 return -ENODEV;
304
4b6c129e
SA
305 freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
306 if (freq < 0) {
307 dev_err(dev, "error %lld, invalid link_freq\n", freq);
308 return freq;
c2a6a07a
YZ
309 }
310
c2a6a07a
YZ
311 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
312 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
313 freq,
314 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
315 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
316 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
317 freq,
318 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
319 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
320 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
321 freq,
322 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
323 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
324 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
325 freq,
326 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
327
328 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
329 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
330 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
331 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
332
333 return 0;
334};
335
336static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
337{
338 static const int NUM_VCS = 4;
339 static const int SID; /* Stream id */
340 static const int ENTRY;
341 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
342 CIO2_FBPT_SUBENTRY_UNIT);
343 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
344 const struct ipu3_cio2_fmt *fmt;
345 void __iomem *const base = cio2->base;
346 u8 lanes, csi2bus = q->csi2.port;
347 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
348 struct cio2_csi2_timing timing;
349 int i, r;
350
351 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
352 if (!fmt)
353 return -EINVAL;
354
355 lanes = q->csi2.lanes;
356
4b6c129e 357 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
c2a6a07a
YZ
358 if (r)
359 return r;
360
361 writel(timing.clk_termen, q->csi_rx_base +
362 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
363 writel(timing.clk_settle, q->csi_rx_base +
364 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
365
366 for (i = 0; i < lanes; i++) {
367 writel(timing.dat_termen, q->csi_rx_base +
368 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
369 writel(timing.dat_settle, q->csi_rx_base +
370 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
371 }
372
373 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
374 CIO2_PBM_WMCTRL1_MID1_2CK |
375 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
376 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
377 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
378 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
379 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
380 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
381 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
382 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
383 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
384 CIO2_PBM_ARB_CTRL_LE_EN |
385 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
386 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
387 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
388 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
389 base + CIO2_REG_PBM_ARB_CTRL);
390 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
391 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
392 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
393 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
394
395 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
396 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
397
398 /* Configure MIPI backend */
399 for (i = 0; i < NUM_VCS; i++)
400 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
401
402 /* There are 16 short packet LUT entry */
403 for (i = 0; i < 16; i++)
404 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
405 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
406 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
407 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
408
409 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
410 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
411 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
412 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
413 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
414 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
415
416 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
417 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
418 base + CIO2_REG_INT_EN);
419
420 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
421 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
422 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
423 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
424 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
425 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
426 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
427 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
428 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
429 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
430
431 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
432 writel(CIO2_CGC_PRIM_TGE |
433 CIO2_CGC_SIDE_TGE |
434 CIO2_CGC_XOSC_TGE |
435 CIO2_CGC_D3I3_TGE |
436 CIO2_CGC_CSI2_INTERFRAME_TGE |
437 CIO2_CGC_CSI2_PORT_DCGE |
438 CIO2_CGC_SIDE_DCGE |
439 CIO2_CGC_PRIM_DCGE |
440 CIO2_CGC_ROSC_DCGE |
441 CIO2_CGC_XOSC_DCGE |
442 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
443 CIO2_CGC_CSI_CLKGATE_HOLDOFF
444 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
445 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
446 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
447 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
448 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
449 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
450 base + CIO2_REG_LTRVAL01);
451 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
452 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
453 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
454 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
455 base + CIO2_REG_LTRVAL23);
456
457 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
458 writel(0, base + CIO2_REG_CDMABA(i));
459 writel(0, base + CIO2_REG_CDMAC0(i));
460 writel(0, base + CIO2_REG_CDMAC1(i));
461 }
462
463 /* Enable DMA */
9a262c7a 464 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
c2a6a07a
YZ
465
466 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
467 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
468 CIO2_CDMAC0_DMA_INTR_ON_FE |
469 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
470 CIO2_CDMAC0_DMA_EN |
471 CIO2_CDMAC0_DMA_INTR_ON_FS |
472 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
473
474 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
475 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
476
477 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
478
479 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
480 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
481 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
482 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
483 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
484
485 /* Clear interrupts */
486 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
487 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
488 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
489 writel(~0, base + CIO2_REG_INT_STS);
490
491 /* Enable devices, starting from the last device in the pipe */
492 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
493 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
494
495 return 0;
496}
497
498static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
499{
44677b03
AS
500 void __iomem *const base = cio2->base;
501 unsigned int i;
502 u32 value;
503 int ret;
c2a6a07a
YZ
504
505 /* Disable CSI receiver and MIPI backend devices */
d69a5a2c
YZ
506 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
507 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
c2a6a07a
YZ
508 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
509 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
510
511 /* Halt DMA */
512 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
44677b03
AS
513 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
514 value, value & CIO2_CDMAC0_DMA_HALTED,
515 4000, 2000000);
516 if (ret)
c2a6a07a
YZ
517 dev_err(&cio2->pci_dev->dev,
518 "DMA %i can not be halted\n", CIO2_DMA_CHAN);
519
520 for (i = 0; i < CIO2_NUM_PORTS; i++) {
521 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
522 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
523 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
524 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
525 }
526}
527
528static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
529{
530 struct device *dev = &cio2->pci_dev->dev;
531 struct cio2_queue *q = cio2->cur_queue;
a553c901 532 struct cio2_fbpt_entry *entry;
c2a6a07a
YZ
533 u64 ns = ktime_get_ns();
534
535 if (dma_chan >= CIO2_QUEUES) {
536 dev_err(dev, "bad DMA channel %i\n", dma_chan);
537 return;
538 }
539
a553c901
AS
540 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
541 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
542 dev_warn(&cio2->pci_dev->dev,
543 "no ready buffers found on DMA channel %u\n",
544 dma_chan);
545 return;
546 }
547
c2a6a07a
YZ
548 /* Find out which buffer(s) are ready */
549 do {
c2a6a07a
YZ
550 struct cio2_buffer *b;
551
c2a6a07a
YZ
552 b = q->bufs[q->bufs_first];
553 if (b) {
98f9aafa
SA
554 unsigned int received = entry[1].second_entry.num_of_bytes;
555 unsigned long payload =
556 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
c2a6a07a
YZ
557
558 q->bufs[q->bufs_first] = NULL;
559 atomic_dec(&q->bufs_queued);
560 dev_dbg(&cio2->pci_dev->dev,
561 "buffer %i done\n", b->vbb.vb2_buf.index);
562
563 b->vbb.vb2_buf.timestamp = ns;
564 b->vbb.field = V4L2_FIELD_NONE;
565 b->vbb.sequence = atomic_read(&q->frame_sequence);
98f9aafa
SA
566 if (payload != received)
567 dev_warn(dev,
568 "payload length is %lu, received %u\n",
569 payload, received);
c2a6a07a
YZ
570 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
571 }
572 atomic_inc(&q->frame_sequence);
573 cio2_fbpt_entry_init_dummy(cio2, entry);
574 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
a553c901
AS
575 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
576 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
c2a6a07a
YZ
577}
578
579static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
580{
581 /*
582 * For the user space camera control algorithms it is essential
583 * to know when the reception of a frame has begun. That's often
584 * the best timing information to get from the hardware.
585 */
586 struct v4l2_event event = {
587 .type = V4L2_EVENT_FRAME_SYNC,
588 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
589 };
590
591 v4l2_event_queue(q->subdev.devnode, &event);
592}
593
594static const char *const cio2_irq_errs[] = {
595 "single packet header error corrected",
596 "multiple packet header errors detected",
597 "payload checksum (CRC) error",
598 "fifo overflow",
599 "reserved short packet data type detected",
600 "reserved long packet data type detected",
601 "incomplete long packet detected",
602 "frame sync error",
603 "line sync error",
604 "DPHY start of transmission error",
605 "DPHY synchronization error",
606 "escape mode error",
607 "escape mode trigger event",
608 "escape mode ultra-low power state for data lane(s)",
609 "escape mode ultra-low power state exit for clock lane",
610 "inter-frame short packet discarded",
611 "inter-frame long packet discarded",
612 "non-matching Long Packet stalled",
613};
614
615static const char *const cio2_port_errs[] = {
616 "ECC recoverable",
617 "DPHY not recoverable",
618 "ECC not recoverable",
619 "CRC error",
620 "INTERFRAMEDATA",
621 "PKT2SHORT",
622 "PKT2LONG",
623};
624
09f20f2b 625static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
c2a6a07a 626{
c2a6a07a
YZ
627 void __iomem *const base = cio2->base;
628 struct device *dev = &cio2->pci_dev->dev;
c2a6a07a
YZ
629
630 if (int_status & CIO2_INT_IOOE) {
631 /*
632 * Interrupt on Output Error:
633 * 1) SRAM is full and FS received, or
634 * 2) An invalid bit detected by DMA.
635 */
636 u32 oe_status, oe_clear;
637
638 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
639 oe_status = oe_clear;
640
641 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
642 dev_err(dev, "DMA output error: 0x%x\n",
643 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
644 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
645 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
646 }
647 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
648 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
649 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
650 >> CIO2_INT_EXT_OE_OES_SHIFT);
651 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
652 }
653 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
654 if (oe_status)
655 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
656 oe_status);
657 int_status &= ~CIO2_INT_IOOE;
658 }
659
660 if (int_status & CIO2_INT_IOC_MASK) {
661 /* DMA IO done -- frame ready */
662 u32 clr = 0;
663 unsigned int d;
664
665 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
666 if (int_status & CIO2_INT_IOC(d)) {
667 clr |= CIO2_INT_IOC(d);
668 cio2_buffer_done(cio2, d);
669 }
670 int_status &= ~clr;
671 }
672
673 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
674 /* DMA IO starts or reached specified line */
675 u32 clr = 0;
676 unsigned int d;
677
678 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
679 if (int_status & CIO2_INT_IOS_IOLN(d)) {
680 clr |= CIO2_INT_IOS_IOLN(d);
681 if (d == CIO2_DMA_CHAN)
682 cio2_queue_event_sof(cio2,
683 cio2->cur_queue);
684 }
685 int_status &= ~clr;
686 }
687
688 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
689 /* CSI2 receiver (error) interrupt */
690 u32 ie_status, ie_clear;
691 unsigned int port;
692
693 ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
694 ie_status = ie_clear;
695
696 for (port = 0; port < CIO2_NUM_PORTS; port++) {
697 u32 port_status = (ie_status >> (port * 8)) & 0xff;
698 u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
699 void __iomem *const csi_rx_base =
700 base + CIO2_REG_PIPE_BASE(port);
701 unsigned int i;
702
703 while (port_status & err_mask) {
704 i = ffs(port_status) - 1;
705 dev_err(dev, "port %i error %s\n",
706 port, cio2_port_errs[i]);
707 ie_status &= ~BIT(port * 8 + i);
708 port_status &= ~BIT(i);
709 }
710
711 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
712 u32 csi2_status, csi2_clear;
713
714 csi2_status = readl(csi_rx_base +
715 CIO2_REG_IRQCTRL_STATUS);
716 csi2_clear = csi2_status;
717 err_mask =
718 BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
719
720 while (csi2_status & err_mask) {
721 i = ffs(csi2_status) - 1;
722 dev_err(dev,
723 "CSI-2 receiver port %i: %s\n",
724 port, cio2_irq_errs[i]);
725 csi2_status &= ~BIT(i);
726 }
727
728 writel(csi2_clear,
729 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
730 if (csi2_status)
731 dev_warn(dev,
732 "unknown CSI2 error 0x%x on port %i\n",
733 csi2_status, port);
734
735 ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
736 }
737 }
738
739 writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
740 if (ie_status)
741 dev_warn(dev, "unknown interrupt 0x%x on IE\n",
742 ie_status);
743
744 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
745 }
746
c2a6a07a
YZ
747 if (int_status)
748 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
09f20f2b
BC
749}
750
751static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
752{
753 struct cio2_device *cio2 = cio2_ptr;
754 void __iomem *const base = cio2->base;
755 struct device *dev = &cio2->pci_dev->dev;
756 u32 int_status;
757
758 int_status = readl(base + CIO2_REG_INT_STS);
759 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
760 if (!int_status)
761 return IRQ_NONE;
762
763 do {
764 writel(int_status, base + CIO2_REG_INT_STS);
765 cio2_irq_handle_once(cio2, int_status);
766 int_status = readl(base + CIO2_REG_INT_STS);
767 if (int_status)
768 dev_dbg(dev, "pending status 0x%x\n", int_status);
769 } while (int_status);
c2a6a07a
YZ
770
771 return IRQ_HANDLED;
772}
773
774/**************** Videobuf2 interface ****************/
775
dcd80955
YZ
776static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
777 enum vb2_buffer_state state)
c2a6a07a
YZ
778{
779 unsigned int i;
780
781 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
782 if (q->bufs[i]) {
783 atomic_dec(&q->bufs_queued);
784 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
dcd80955 785 state);
61e7f892 786 q->bufs[i] = NULL;
c2a6a07a
YZ
787 }
788 }
789}
790
791static int cio2_vb2_queue_setup(struct vb2_queue *vq,
792 unsigned int *num_buffers,
793 unsigned int *num_planes,
794 unsigned int sizes[],
795 struct device *alloc_devs[])
796{
797 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
798 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
799 unsigned int i;
800
801 *num_planes = q->format.num_planes;
802
803 for (i = 0; i < *num_planes; ++i) {
804 sizes[i] = q->format.plane_fmt[i].sizeimage;
805 alloc_devs[i] = &cio2->pci_dev->dev;
806 }
807
808 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
809
810 /* Initialize buffer queue */
811 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
812 q->bufs[i] = NULL;
813 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
814 }
815 atomic_set(&q->bufs_queued, 0);
816 q->bufs_first = 0;
817 q->bufs_next = 0;
818
819 return 0;
820}
821
822/* Called after each buffer is allocated */
823static int cio2_vb2_buf_init(struct vb2_buffer *vb)
824{
825 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
826 struct device *dev = &cio2->pci_dev->dev;
827 struct cio2_buffer *b =
828 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
9a262c7a 829 unsigned int pages = PFN_UP(vb->planes[0].length);
7b285f41 830 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
c2a6a07a 831 struct sg_table *sg;
d901b276 832 struct sg_dma_page_iter sg_iter;
bbacb274 833 unsigned int i, j;
c2a6a07a
YZ
834
835 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
836 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
837 vb->planes[0].length);
838 return -ENOSPC; /* Should never happen */
839 }
840
841 memset(b->lop, 0, sizeof(b->lop));
842 /* Allocate LOP table */
843 for (i = 0; i < lops; i++) {
e186f932 844 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
c2a6a07a
YZ
845 &b->lop_bus_addr[i], GFP_KERNEL);
846 if (!b->lop[i])
847 goto fail;
848 }
849
850 /* Fill LOP */
851 sg = vb2_dma_sg_plane_desc(vb, 0);
852 if (!sg)
853 return -ENOMEM;
854
855 if (sg->nents && sg->sgl)
856 b->offset = sg->sgl->offset;
857
858 i = j = 0;
30f573a8 859 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
c7cbef1f
YZ
860 if (!pages--)
861 break;
9a262c7a 862 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
c2a6a07a 863 j++;
7b285f41 864 if (j == CIO2_LOP_ENTRIES) {
c2a6a07a
YZ
865 i++;
866 j = 0;
867 }
868 }
869
9a262c7a 870 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
c2a6a07a
YZ
871 return 0;
872fail:
bbacb274 873 while (i--)
e186f932 874 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
c2a6a07a
YZ
875 return -ENOMEM;
876}
877
878/* Transfer buffer ownership to cio2 */
879static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
880{
881 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
882 struct cio2_queue *q =
883 container_of(vb->vb2_queue, struct cio2_queue, vbq);
884 struct cio2_buffer *b =
885 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
886 struct cio2_fbpt_entry *entry;
887 unsigned long flags;
888 unsigned int i, j, next = q->bufs_next;
889 int bufs_queued = atomic_inc_return(&q->bufs_queued);
890 u32 fbpt_rp;
891
892 dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
893
894 /*
895 * This code queues the buffer to the CIO2 DMA engine, which starts
896 * running once streaming has started. It is possible that this code
897 * gets pre-empted due to increased CPU load. Upon this, the driver
898 * does not get an opportunity to queue new buffers to the CIO2 DMA
899 * engine. When the DMA engine encounters an FBPT entry without the
900 * VALID bit set, the DMA engine halts, which requires a restart of
901 * the DMA engine and sensor, to continue streaming.
902 * This is not desired and is highly unlikely given that there are
903 * 32 FBPT entries that the DMA engine needs to process, to run into
904 * an FBPT entry, without the VALID bit set. We try to mitigate this
905 * by disabling interrupts for the duration of this queueing.
906 */
907 local_irq_save(flags);
908
909 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
910 >> CIO2_CDMARI_FBPT_RP_SHIFT)
911 & CIO2_CDMARI_FBPT_RP_MASK;
912
913 /*
914 * fbpt_rp is the fbpt entry that the dma is currently working
915 * on, but since it could jump to next entry at any time,
916 * assume that we might already be there.
917 */
918 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
919
920 if (bufs_queued <= 1 || fbpt_rp == next)
921 /* Buffers were drained */
922 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
923
924 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
925 /*
926 * We have allocated CIO2_MAX_BUFFERS circularly for the
927 * hw, the user has requested N buffer queue. The driver
928 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
929 * user queues a buffer, there necessarily is a free buffer.
930 */
931 if (!q->bufs[next]) {
932 q->bufs[next] = b;
933 entry = &q->fbpt[next * CIO2_MAX_LOPS];
934 cio2_fbpt_entry_init_buf(cio2, b, entry);
935 local_irq_restore(flags);
936 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
937 for (j = 0; j < vb->num_planes; j++)
938 vb2_set_plane_payload(vb, j,
939 q->format.plane_fmt[j].sizeimage);
940 return;
941 }
942
943 dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
944 next = (next + 1) % CIO2_MAX_BUFFERS;
945 }
946
947 local_irq_restore(flags);
948 dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
949 atomic_dec(&q->bufs_queued);
950 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
951}
952
953/* Called when each buffer is freed */
954static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
955{
956 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
957 struct cio2_buffer *b =
958 container_of(vb, struct cio2_buffer, vbb.vb2_buf);
959 unsigned int i;
960
961 /* Free LOP table */
962 for (i = 0; i < CIO2_MAX_LOPS; i++) {
963 if (b->lop[i])
e186f932 964 dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
c2a6a07a
YZ
965 b->lop[i], b->lop_bus_addr[i]);
966 }
967}
968
969static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
970{
971 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
972 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
973 int r;
974
975 cio2->cur_queue = q;
976 atomic_set(&q->frame_sequence, 0);
977
978 r = pm_runtime_get_sync(&cio2->pci_dev->dev);
979 if (r < 0) {
980 dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
981 pm_runtime_put_noidle(&cio2->pci_dev->dev);
982 return r;
983 }
984
985 r = media_pipeline_start(&q->vdev.entity, &q->pipe);
986 if (r)
987 goto fail_pipeline;
988
989 r = cio2_hw_init(cio2, q);
990 if (r)
991 goto fail_hw;
992
993 /* Start streaming on sensor */
994 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
995 if (r)
996 goto fail_csi2_subdev;
997
998 cio2->streaming = true;
999
1000 return 0;
1001
1002fail_csi2_subdev:
1003 cio2_hw_exit(cio2, q);
1004fail_hw:
1005 media_pipeline_stop(&q->vdev.entity);
1006fail_pipeline:
1007 dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
dcd80955 1008 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
c2a6a07a
YZ
1009 pm_runtime_put(&cio2->pci_dev->dev);
1010
1011 return r;
1012}
1013
1014static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1015{
1016 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1017 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1018
1019 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1020 dev_err(&cio2->pci_dev->dev,
1021 "failed to stop sensor streaming\n");
1022
1023 cio2_hw_exit(cio2, q);
d69a5a2c 1024 synchronize_irq(cio2->pci_dev->irq);
dcd80955 1025 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
c2a6a07a
YZ
1026 media_pipeline_stop(&q->vdev.entity);
1027 pm_runtime_put(&cio2->pci_dev->dev);
1028 cio2->streaming = false;
1029}
1030
1031static const struct vb2_ops cio2_vb2_ops = {
1032 .buf_init = cio2_vb2_buf_init,
1033 .buf_queue = cio2_vb2_buf_queue,
1034 .buf_cleanup = cio2_vb2_buf_cleanup,
1035 .queue_setup = cio2_vb2_queue_setup,
1036 .start_streaming = cio2_vb2_start_streaming,
1037 .stop_streaming = cio2_vb2_stop_streaming,
1038 .wait_prepare = vb2_ops_wait_prepare,
1039 .wait_finish = vb2_ops_wait_finish,
1040};
1041
1042/**************** V4L2 interface ****************/
1043
1044static int cio2_v4l2_querycap(struct file *file, void *fh,
1045 struct v4l2_capability *cap)
1046{
1047 struct cio2_device *cio2 = video_drvdata(file);
1048
c0decac1
MCC
1049 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1050 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
c2a6a07a
YZ
1051 snprintf(cap->bus_info, sizeof(cap->bus_info),
1052 "PCI:%s", pci_name(cio2->pci_dev));
1053
1054 return 0;
1055}
1056
1057static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1058 struct v4l2_fmtdesc *f)
1059{
1060 if (f->index >= ARRAY_SIZE(formats))
1061 return -EINVAL;
1062
1063 f->pixelformat = formats[f->index].fourcc;
1064
1065 return 0;
1066}
1067
1068/* The format is validated in cio2_video_link_validate() */
1069static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1070{
1071 struct cio2_queue *q = file_to_cio2_queue(file);
1072
1073 f->fmt.pix_mp = q->format;
1074
1075 return 0;
1076}
1077
1078static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1079{
1080 const struct ipu3_cio2_fmt *fmt;
1081 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1082
1083 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1084 if (!fmt)
1085 fmt = &formats[0];
1086
1087 /* Only supports up to 4224x3136 */
1088 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1089 mpix->width = CIO2_IMAGE_MAX_WIDTH;
b369132f
SA
1090 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1091 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
c2a6a07a
YZ
1092
1093 mpix->num_planes = 1;
1094 mpix->pixelformat = fmt->fourcc;
1095 mpix->colorspace = V4L2_COLORSPACE_RAW;
1096 mpix->field = V4L2_FIELD_NONE;
1097 memset(mpix->reserved, 0, sizeof(mpix->reserved));
1098 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1099 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1100 mpix->height;
1101 memset(mpix->plane_fmt[0].reserved, 0,
1102 sizeof(mpix->plane_fmt[0].reserved));
1103
1104 /* use default */
1105 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1106 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1107 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1108
1109 return 0;
1110}
1111
1112static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1113{
1114 struct cio2_queue *q = file_to_cio2_queue(file);
1115
1116 cio2_v4l2_try_fmt(file, fh, f);
1117 q->format = f->fmt.pix_mp;
1118
1119 return 0;
1120}
1121
1122static int
1123cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1124{
1125 if (input->index > 0)
1126 return -EINVAL;
1127
c0decac1 1128 strscpy(input->name, "camera", sizeof(input->name));
c2a6a07a
YZ
1129 input->type = V4L2_INPUT_TYPE_CAMERA;
1130
1131 return 0;
1132}
1133
1134static int
1135cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1136{
1137 *input = 0;
1138
1139 return 0;
1140}
1141
1142static int
1143cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1144{
1145 return input == 0 ? 0 : -EINVAL;
1146}
1147
1148static const struct v4l2_file_operations cio2_v4l2_fops = {
1149 .owner = THIS_MODULE,
1150 .unlocked_ioctl = video_ioctl2,
1151 .open = v4l2_fh_open,
1152 .release = vb2_fop_release,
1153 .poll = vb2_fop_poll,
1154 .mmap = vb2_fop_mmap,
1155};
1156
1157static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1158 .vidioc_querycap = cio2_v4l2_querycap,
7e98b7b5 1159 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
c2a6a07a
YZ
1160 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1161 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1162 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1163 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1164 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1165 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1166 .vidioc_querybuf = vb2_ioctl_querybuf,
1167 .vidioc_qbuf = vb2_ioctl_qbuf,
1168 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1169 .vidioc_streamon = vb2_ioctl_streamon,
1170 .vidioc_streamoff = vb2_ioctl_streamoff,
1171 .vidioc_expbuf = vb2_ioctl_expbuf,
1172 .vidioc_enum_input = cio2_video_enum_input,
1173 .vidioc_g_input = cio2_video_g_input,
1174 .vidioc_s_input = cio2_video_s_input,
1175};
1176
1177static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1178 struct v4l2_fh *fh,
1179 struct v4l2_event_subscription *sub)
1180{
1181 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1182 return -EINVAL;
1183
1184 /* Line number. For now only zero accepted. */
1185 if (sub->id != 0)
1186 return -EINVAL;
1187
1188 return v4l2_event_subscribe(fh, sub, 0, NULL);
1189}
1190
1191static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1192{
1193 struct v4l2_mbus_framefmt *format;
1194 const struct v4l2_mbus_framefmt fmt_default = {
1195 .width = 1936,
1196 .height = 1096,
1197 .code = formats[0].mbus_code,
1198 .field = V4L2_FIELD_NONE,
1199 .colorspace = V4L2_COLORSPACE_RAW,
1200 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1201 .quantization = V4L2_QUANTIZATION_DEFAULT,
1202 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1203 };
1204
1205 /* Initialize try_fmt */
1206 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
1207 *format = fmt_default;
1208
1209 /* same as sink */
1210 format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
1211 *format = fmt_default;
1212
1213 return 0;
1214}
1215
1216/*
1217 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1218 * @sd : pointer to v4l2 subdev structure
1219 * @cfg: V4L2 subdev pad config
1220 * @fmt: pointer to v4l2 subdev format structure
1221 * return -EINVAL or zero on success
1222 */
1223static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1224 struct v4l2_subdev_pad_config *cfg,
1225 struct v4l2_subdev_format *fmt)
1226{
1227 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
c2a6a07a 1228
55a6c6b2
SA
1229 mutex_lock(&q->subdev_lock);
1230
8160e867 1231 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
c2a6a07a 1232 fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
8160e867
SA
1233 else
1234 fmt->format = q->subdev_fmt;
c2a6a07a 1235
55a6c6b2
SA
1236 mutex_unlock(&q->subdev_lock);
1237
c2a6a07a
YZ
1238 return 0;
1239}
1240
1241/*
1242 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1243 * @sd : pointer to v4l2 subdev structure
1244 * @cfg: V4L2 subdev pad config
1245 * @fmt: pointer to v4l2 subdev format structure
1246 * return -EINVAL or zero on success
1247 */
1248static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1249 struct v4l2_subdev_pad_config *cfg,
1250 struct v4l2_subdev_format *fmt)
1251{
1252 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
a86cf9b2
SA
1253 struct v4l2_mbus_framefmt *mbus;
1254 u32 mbus_code = fmt->format.code;
1255 unsigned int i;
c2a6a07a
YZ
1256
1257 /*
1258 * Only allow setting sink pad format;
1259 * source always propagates from sink
1260 */
1261 if (fmt->pad == CIO2_PAD_SOURCE)
1262 return cio2_subdev_get_fmt(sd, cfg, fmt);
1263
a86cf9b2
SA
1264 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1265 mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
1266 else
1267 mbus = &q->subdev_fmt;
55a6c6b2 1268
a86cf9b2
SA
1269 fmt->format.code = formats[0].mbus_code;
1270
1271 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1272 if (formats[i].mbus_code == fmt->format.code) {
1273 fmt->format.code = mbus_code;
1274 break;
1275 }
c2a6a07a
YZ
1276 }
1277
40f072e1 1278 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
b369132f 1279 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
219a8b9c 1280 fmt->format.field = V4L2_FIELD_NONE;
a86cf9b2
SA
1281
1282 mutex_lock(&q->subdev_lock);
1283 *mbus = fmt->format;
55a6c6b2
SA
1284 mutex_unlock(&q->subdev_lock);
1285
c2a6a07a
YZ
1286 return 0;
1287}
1288
1289static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1290 struct v4l2_subdev_pad_config *cfg,
1291 struct v4l2_subdev_mbus_code_enum *code)
1292{
1293 if (code->index >= ARRAY_SIZE(formats))
1294 return -EINVAL;
1295
1296 code->code = formats[code->index].mbus_code;
1297 return 0;
1298}
1299
1300static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1301 struct v4l2_subdev_format *fmt)
1302{
1303 if (is_media_entity_v4l2_subdev(pad->entity)) {
1304 struct v4l2_subdev *sd =
1305 media_entity_to_v4l2_subdev(pad->entity);
1306
1307 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1308 fmt->pad = pad->index;
1309 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1310 }
1311
1312 return -EINVAL;
1313}
1314
1315static int cio2_video_link_validate(struct media_link *link)
1316{
1317 struct video_device *vd = container_of(link->sink->entity,
1318 struct video_device, entity);
1319 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1320 struct cio2_device *cio2 = video_get_drvdata(vd);
1321 struct v4l2_subdev_format source_fmt;
1322 int ret;
1323
1324 if (!media_entity_remote_pad(link->sink->entity->pads)) {
1325 dev_info(&cio2->pci_dev->dev,
1326 "video node %s pad not connected\n", vd->name);
1327 return -ENOTCONN;
1328 }
1329
1330 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1331 if (ret < 0)
1332 return 0;
1333
1334 if (source_fmt.format.width != q->format.width ||
1335 source_fmt.format.height != q->format.height) {
1336 dev_err(&cio2->pci_dev->dev,
1337 "Wrong width or height %ux%u (%ux%u expected)\n",
1338 q->format.width, q->format.height,
1339 source_fmt.format.width, source_fmt.format.height);
1340 return -EINVAL;
1341 }
1342
1343 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1344 return -EINVAL;
1345
1346 return 0;
1347}
1348
1349static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1350 .subscribe_event = cio2_subdev_subscribe_event,
1351 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1352};
1353
1354static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1355 .open = cio2_subdev_open,
1356};
1357
1358static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1359 .link_validate = v4l2_subdev_link_validate_default,
1360 .get_fmt = cio2_subdev_get_fmt,
1361 .set_fmt = cio2_subdev_set_fmt,
1362 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1363};
1364
1365static const struct v4l2_subdev_ops cio2_subdev_ops = {
1366 .core = &cio2_subdev_core_ops,
1367 .pad = &cio2_subdev_pad_ops,
1368};
1369
1370/******* V4L2 sub-device asynchronous registration callbacks***********/
1371
1372struct sensor_async_subdev {
1373 struct v4l2_async_subdev asd;
1374 struct csi2_bus_info csi2;
1375};
1376
1377/* The .bound() notifier callback when a match is found */
1378static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1379 struct v4l2_subdev *sd,
1380 struct v4l2_async_subdev *asd)
1381{
1382 struct cio2_device *cio2 = container_of(notifier,
1383 struct cio2_device, notifier);
1384 struct sensor_async_subdev *s_asd = container_of(asd,
1385 struct sensor_async_subdev, asd);
1386 struct cio2_queue *q;
1387
1388 if (cio2->queue[s_asd->csi2.port].sensor)
1389 return -EBUSY;
1390
1391 q = &cio2->queue[s_asd->csi2.port];
1392
1393 q->csi2 = s_asd->csi2;
1394 q->sensor = sd;
1395 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1396
1397 return 0;
1398}
1399
1400/* The .unbind callback */
1401static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1402 struct v4l2_subdev *sd,
1403 struct v4l2_async_subdev *asd)
1404{
1405 struct cio2_device *cio2 = container_of(notifier,
1406 struct cio2_device, notifier);
1407 struct sensor_async_subdev *s_asd = container_of(asd,
1408 struct sensor_async_subdev, asd);
1409
1410 cio2->queue[s_asd->csi2.port].sensor = NULL;
1411}
1412
1413/* .complete() is called after all subdevices have been located */
1414static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1415{
1416 struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
1417 notifier);
1418 struct sensor_async_subdev *s_asd;
eae2aed1 1419 struct v4l2_async_subdev *asd;
c2a6a07a 1420 struct cio2_queue *q;
eae2aed1 1421 unsigned int pad;
c2a6a07a
YZ
1422 int ret;
1423
eae2aed1
SL
1424 list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
1425 s_asd = container_of(asd, struct sensor_async_subdev, asd);
c2a6a07a
YZ
1426 q = &cio2->queue[s_asd->csi2.port];
1427
1428 for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
1429 if (q->sensor->entity.pads[pad].flags &
1430 MEDIA_PAD_FL_SOURCE)
1431 break;
1432
1433 if (pad == q->sensor->entity.num_pads) {
1434 dev_err(&cio2->pci_dev->dev,
1435 "failed to find src pad for %s\n",
1436 q->sensor->name);
1437 return -ENXIO;
1438 }
1439
1440 ret = media_create_pad_link(
1441 &q->sensor->entity, pad,
1442 &q->subdev.entity, CIO2_PAD_SINK,
1443 0);
1444 if (ret) {
1445 dev_err(&cio2->pci_dev->dev,
1446 "failed to create link for %s\n",
eae2aed1 1447 q->sensor->name);
c2a6a07a
YZ
1448 return ret;
1449 }
1450 }
1451
1452 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1453}
1454
1455static const struct v4l2_async_notifier_operations cio2_async_ops = {
1456 .bound = cio2_notifier_bound,
1457 .unbind = cio2_notifier_unbind,
1458 .complete = cio2_notifier_complete,
1459};
1460
2c933466 1461static int cio2_parse_firmware(struct cio2_device *cio2)
c2a6a07a 1462{
2c933466
SA
1463 unsigned int i;
1464 int ret;
c2a6a07a 1465
2c933466
SA
1466 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1467 struct v4l2_fwnode_endpoint vep = {
1468 .bus_type = V4L2_MBUS_CSI2_DPHY
1469 };
1470 struct sensor_async_subdev *s_asd = NULL;
1471 struct fwnode_handle *ep;
c2a6a07a 1472
2c933466
SA
1473 ep = fwnode_graph_get_endpoint_by_id(
1474 dev_fwnode(&cio2->pci_dev->dev), i, 0,
1475 FWNODE_GRAPH_ENDPOINT_NEXT);
c2a6a07a 1476
2c933466
SA
1477 if (!ep)
1478 continue;
c2a6a07a 1479
2c933466
SA
1480 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1481 if (ret)
1482 goto err_parse;
c2a6a07a 1483
2c933466
SA
1484 s_asd = kzalloc(sizeof(*s_asd), GFP_KERNEL);
1485 if (!s_asd) {
1486 ret = -ENOMEM;
1487 goto err_parse;
1488 }
eae2aed1 1489
2c933466
SA
1490 s_asd->csi2.port = vep.base.port;
1491 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1492
1493 ret = v4l2_async_notifier_add_fwnode_remote_subdev(
1494 &cio2->notifier, ep, &s_asd->asd);
1495 if (ret)
1496 goto err_parse;
1497
1498 fwnode_handle_put(ep);
1499
1500 continue;
1501
1502err_parse:
1503 fwnode_handle_put(ep);
1504 kfree(s_asd);
1505 return ret;
1506 }
c2a6a07a 1507
706c0cff
SA
1508 /*
1509 * Proceed even without sensors connected to allow the device to
1510 * suspend.
1511 */
c2a6a07a
YZ
1512 cio2->notifier.ops = &cio2_async_ops;
1513 ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
2c933466 1514 if (ret)
c2a6a07a
YZ
1515 dev_err(&cio2->pci_dev->dev,
1516 "failed to register async notifier : %d\n", ret);
f6a5242b 1517
c2a6a07a
YZ
1518 return ret;
1519}
1520
c2a6a07a
YZ
1521/**************** Queue initialization ****************/
1522static const struct media_entity_operations cio2_media_ops = {
1523 .link_validate = v4l2_subdev_link_validate,
1524};
1525
1526static const struct media_entity_operations cio2_video_entity_ops = {
1527 .link_validate = cio2_video_link_validate,
1528};
1529
1530static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1531{
1532 static const u32 default_width = 1936;
1533 static const u32 default_height = 1096;
1534 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1535
1536 struct video_device *vdev = &q->vdev;
1537 struct vb2_queue *vbq = &q->vbq;
1538 struct v4l2_subdev *subdev = &q->subdev;
1539 struct v4l2_mbus_framefmt *fmt;
1540 int r;
1541
1542 /* Initialize miscellaneous variables */
1543 mutex_init(&q->lock);
55a6c6b2 1544 mutex_init(&q->subdev_lock);
c2a6a07a
YZ
1545
1546 /* Initialize formats to default values */
1547 fmt = &q->subdev_fmt;
1548 fmt->width = default_width;
1549 fmt->height = default_height;
1550 fmt->code = dflt_fmt.mbus_code;
1551 fmt->field = V4L2_FIELD_NONE;
1552
1553 q->format.width = default_width;
1554 q->format.height = default_height;
1555 q->format.pixelformat = dflt_fmt.fourcc;
1556 q->format.colorspace = V4L2_COLORSPACE_RAW;
1557 q->format.field = V4L2_FIELD_NONE;
1558 q->format.num_planes = 1;
1559 q->format.plane_fmt[0].bytesperline =
1560 cio2_bytesperline(q->format.width);
1561 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1562 q->format.height;
1563
1564 /* Initialize fbpt */
1565 r = cio2_fbpt_init(cio2, q);
1566 if (r)
1567 goto fail_fbpt;
1568
1569 /* Initialize media entities */
1570 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1571 MEDIA_PAD_FL_MUST_CONNECT;
1572 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1573 subdev->entity.ops = &cio2_media_ops;
1574 subdev->internal_ops = &cio2_subdev_internal_ops;
1575 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1576 if (r) {
1577 dev_err(&cio2->pci_dev->dev,
1578 "failed initialize subdev media entity (%d)\n", r);
1579 goto fail_subdev_media_entity;
1580 }
1581
1582 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1583 vdev->entity.ops = &cio2_video_entity_ops;
1584 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1585 if (r) {
1586 dev_err(&cio2->pci_dev->dev,
1587 "failed initialize videodev media entity (%d)\n", r);
1588 goto fail_vdev_media_entity;
1589 }
1590
1591 /* Initialize subdev */
1592 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1593 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1594 subdev->owner = THIS_MODULE;
1595 snprintf(subdev->name, sizeof(subdev->name),
1596 CIO2_ENTITY_NAME " %td", q - cio2->queue);
a5c7caa1 1597 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
c2a6a07a
YZ
1598 v4l2_set_subdevdata(subdev, cio2);
1599 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1600 if (r) {
1601 dev_err(&cio2->pci_dev->dev,
1602 "failed initialize subdev (%d)\n", r);
1603 goto fail_subdev;
1604 }
1605
1606 /* Initialize vbq */
1607 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1608 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1609 vbq->ops = &cio2_vb2_ops;
1610 vbq->mem_ops = &vb2_dma_sg_memops;
1611 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1612 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1613 vbq->min_buffers_needed = 1;
1614 vbq->drv_priv = cio2;
1615 vbq->lock = &q->lock;
1616 r = vb2_queue_init(vbq);
1617 if (r) {
1618 dev_err(&cio2->pci_dev->dev,
1619 "failed to initialize videobuf2 queue (%d)\n", r);
11788d9b 1620 goto fail_subdev;
c2a6a07a
YZ
1621 }
1622
1623 /* Initialize vdev */
1624 snprintf(vdev->name, sizeof(vdev->name),
1625 "%s %td", CIO2_NAME, q - cio2->queue);
1626 vdev->release = video_device_release_empty;
1627 vdev->fops = &cio2_v4l2_fops;
1628 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1629 vdev->lock = &cio2->lock;
1630 vdev->v4l2_dev = &cio2->v4l2_dev;
1631 vdev->queue = &q->vbq;
1632 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1633 video_set_drvdata(vdev, cio2);
3e30a927 1634 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
c2a6a07a
YZ
1635 if (r) {
1636 dev_err(&cio2->pci_dev->dev,
1637 "failed to register video device (%d)\n", r);
1638 goto fail_vdev;
1639 }
1640
1641 /* Create link from CIO2 subdev to output node */
1642 r = media_create_pad_link(
1643 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1644 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1645 if (r)
1646 goto fail_link;
1647
1648 return 0;
1649
1650fail_link:
11788d9b 1651 vb2_video_unregister_device(&q->vdev);
c2a6a07a 1652fail_vdev:
c2a6a07a
YZ
1653 v4l2_device_unregister_subdev(subdev);
1654fail_subdev:
1655 media_entity_cleanup(&vdev->entity);
1656fail_vdev_media_entity:
1657 media_entity_cleanup(&subdev->entity);
1658fail_subdev_media_entity:
1659 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1660fail_fbpt:
55a6c6b2 1661 mutex_destroy(&q->subdev_lock);
c2a6a07a
YZ
1662 mutex_destroy(&q->lock);
1663
1664 return r;
1665}
1666
1667static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1668{
11788d9b 1669 vb2_video_unregister_device(&q->vdev);
c2a6a07a 1670 media_entity_cleanup(&q->vdev.entity);
c2a6a07a
YZ
1671 v4l2_device_unregister_subdev(&q->subdev);
1672 media_entity_cleanup(&q->subdev.entity);
1673 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
55a6c6b2 1674 mutex_destroy(&q->subdev_lock);
c2a6a07a
YZ
1675 mutex_destroy(&q->lock);
1676}
1677
1678static int cio2_queues_init(struct cio2_device *cio2)
1679{
1680 int i, r;
1681
1682 for (i = 0; i < CIO2_QUEUES; i++) {
1683 r = cio2_queue_init(cio2, &cio2->queue[i]);
1684 if (r)
1685 break;
1686 }
1687
1688 if (i == CIO2_QUEUES)
1689 return 0;
1690
1691 for (i--; i >= 0; i--)
1692 cio2_queue_exit(cio2, &cio2->queue[i]);
1693
1694 return r;
1695}
1696
1697static void cio2_queues_exit(struct cio2_device *cio2)
1698{
1699 unsigned int i;
1700
1701 for (i = 0; i < CIO2_QUEUES; i++)
1702 cio2_queue_exit(cio2, &cio2->queue[i]);
1703}
1704
1705/**************** PCI interface ****************/
1706
c2a6a07a
YZ
1707static int cio2_pci_probe(struct pci_dev *pci_dev,
1708 const struct pci_device_id *id)
1709{
1710 struct cio2_device *cio2;
c2a6a07a
YZ
1711 int r;
1712
1713 cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
1714 if (!cio2)
1715 return -ENOMEM;
1716 cio2->pci_dev = pci_dev;
1717
1718 r = pcim_enable_device(pci_dev);
1719 if (r) {
1720 dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
1721 return r;
1722 }
1723
1724 dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
1725 pci_dev->device, pci_dev->revision);
1726
1727 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1728 if (r) {
1729 dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
1730 return -ENODEV;
1731 }
1732
c1284138 1733 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
c2a6a07a
YZ
1734
1735 pci_set_drvdata(pci_dev, cio2);
1736
1737 pci_set_master(pci_dev);
1738
1739 r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
1740 if (r) {
1741 dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
1742 return -ENODEV;
1743 }
1744
34a07e65
AS
1745 r = pci_enable_msi(pci_dev);
1746 if (r) {
1747 dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
1748 return r;
1749 }
c2a6a07a
YZ
1750
1751 r = cio2_fbpt_init_dummy(cio2);
1752 if (r)
1753 return r;
1754
1755 mutex_init(&cio2->lock);
1756
1757 cio2->media_dev.dev = &cio2->pci_dev->dev;
c0decac1 1758 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
c2a6a07a
YZ
1759 sizeof(cio2->media_dev.model));
1760 snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
1761 "PCI:%s", pci_name(cio2->pci_dev));
1762 cio2->media_dev.hw_revision = 0;
1763
1764 media_device_init(&cio2->media_dev);
1765 r = media_device_register(&cio2->media_dev);
1766 if (r < 0)
1767 goto fail_mutex_destroy;
1768
1769 cio2->v4l2_dev.mdev = &cio2->media_dev;
1770 r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
1771 if (r) {
1772 dev_err(&pci_dev->dev,
1773 "failed to register V4L2 device (%d)\n", r);
1774 goto fail_media_device_unregister;
1775 }
1776
1777 r = cio2_queues_init(cio2);
1778 if (r)
1779 goto fail_v4l2_device_unregister;
1780
2c933466
SA
1781 v4l2_async_notifier_init(&cio2->notifier);
1782
c2a6a07a 1783 /* Register notifier for subdevices we care */
2c933466 1784 r = cio2_parse_firmware(cio2);
706c0cff 1785 if (r)
2c933466 1786 goto fail_clean_notifier;
c2a6a07a
YZ
1787
1788 r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
1789 IRQF_SHARED, CIO2_NAME, cio2);
1790 if (r) {
1791 dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
2c933466 1792 goto fail_clean_notifier;
c2a6a07a
YZ
1793 }
1794
1795 pm_runtime_put_noidle(&pci_dev->dev);
1796 pm_runtime_allow(&pci_dev->dev);
1797
1798 return 0;
1799
2c933466
SA
1800fail_clean_notifier:
1801 v4l2_async_notifier_unregister(&cio2->notifier);
1802 v4l2_async_notifier_cleanup(&cio2->notifier);
c2a6a07a
YZ
1803 cio2_queues_exit(cio2);
1804fail_v4l2_device_unregister:
1805 v4l2_device_unregister(&cio2->v4l2_dev);
1806fail_media_device_unregister:
1807 media_device_unregister(&cio2->media_dev);
1808 media_device_cleanup(&cio2->media_dev);
1809fail_mutex_destroy:
1810 mutex_destroy(&cio2->lock);
1811 cio2_fbpt_exit_dummy(cio2);
1812
1813 return r;
1814}
1815
1816static void cio2_pci_remove(struct pci_dev *pci_dev)
1817{
1818 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
c2a6a07a 1819
32388d6e 1820 media_device_unregister(&cio2->media_dev);
2c933466
SA
1821 v4l2_async_notifier_unregister(&cio2->notifier);
1822 v4l2_async_notifier_cleanup(&cio2->notifier);
4e26f692 1823 cio2_queues_exit(cio2);
32388d6e 1824 cio2_fbpt_exit_dummy(cio2);
c2a6a07a 1825 v4l2_device_unregister(&cio2->v4l2_dev);
c2a6a07a
YZ
1826 media_device_cleanup(&cio2->media_dev);
1827 mutex_destroy(&cio2->lock);
1828}
1829
5eb8c768 1830static int __maybe_unused cio2_runtime_suspend(struct device *dev)
c2a6a07a
YZ
1831{
1832 struct pci_dev *pci_dev = to_pci_dev(dev);
1833 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1834 void __iomem *const base = cio2->base;
1835 u16 pm;
1836
1837 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1838 dev_dbg(dev, "cio2 runtime suspend.\n");
1839
1840 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1841 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1842 pm |= CIO2_PMCSR_D3;
1843 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1844
1845 return 0;
1846}
1847
5eb8c768 1848static int __maybe_unused cio2_runtime_resume(struct device *dev)
c2a6a07a
YZ
1849{
1850 struct pci_dev *pci_dev = to_pci_dev(dev);
1851 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1852 void __iomem *const base = cio2->base;
1853 u16 pm;
1854
1855 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1856 dev_dbg(dev, "cio2 runtime resume.\n");
1857
1858 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1859 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1860 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1861
1862 return 0;
1863}
1864
1865/*
1866 * Helper function to advance all the elements of a circular buffer by "start"
1867 * positions
1868 */
1869static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1870{
1871 struct {
1872 size_t begin, end;
1873 } arr[2] = {
1874 { 0, start - 1 },
1875 { start, elems - 1 },
1876 };
1877
6afda56a 1878#define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
c2a6a07a
YZ
1879
1880 /* Loop as long as we have out-of-place entries */
6afda56a 1881 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
c2a6a07a
YZ
1882 size_t size0, i;
1883
1884 /*
1885 * Find the number of entries that can be arranged on this
1886 * iteration.
1887 */
6afda56a 1888 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
c2a6a07a
YZ
1889
1890 /* Swap the entries in two parts of the array. */
1891 for (i = 0; i < size0; i++) {
1892 u8 *d = ptr + elem_size * (arr[1].begin + i);
1893 u8 *s = ptr + elem_size * (arr[0].begin + i);
1894 size_t j;
1895
1896 for (j = 0; j < elem_size; j++)
1897 swap(d[j], s[j]);
1898 }
1899
6afda56a 1900 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
c2a6a07a
YZ
1901 /* The end of the first array remains unarranged. */
1902 arr[0].begin += size0;
1903 } else {
1904 /*
1905 * The first array is fully arranged so we proceed
1906 * handling the next one.
1907 */
1908 arr[0].begin = arr[1].begin;
1909 arr[0].end = arr[1].begin + size0 - 1;
1910 arr[1].begin += size0;
1911 }
1912 }
1913}
1914
1915static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1916{
1917 unsigned int i, j;
1918
1919 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1920 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1921 if (q->bufs[j])
1922 break;
1923
1924 if (i == CIO2_MAX_BUFFERS)
1925 return;
1926
1927 if (j) {
1928 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1929 CIO2_MAX_BUFFERS, j);
1930 arrange(q->bufs, sizeof(struct cio2_buffer *),
1931 CIO2_MAX_BUFFERS, j);
1932 }
1933
1934 /*
1935 * DMA clears the valid bit when accessing the buffer.
1936 * When stopping stream in suspend callback, some of the buffers
1937 * may be in invalid state. After resume, when DMA meets the invalid
1938 * buffer, it will halt and stop receiving new data.
1939 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1940 */
1941 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1942 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1943}
1944
2086dd35 1945static int __maybe_unused cio2_suspend(struct device *dev)
c2a6a07a
YZ
1946{
1947 struct pci_dev *pci_dev = to_pci_dev(dev);
1948 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1949 struct cio2_queue *q = cio2->cur_queue;
1950
1951 dev_dbg(dev, "cio2 suspend\n");
1952 if (!cio2->streaming)
1953 return 0;
1954
1955 /* Stop stream */
1956 cio2_hw_exit(cio2, q);
d69a5a2c 1957 synchronize_irq(pci_dev->irq);
c2a6a07a
YZ
1958
1959 pm_runtime_force_suspend(dev);
1960
1961 /*
1962 * Upon resume, hw starts to process the fbpt entries from beginning,
1963 * so relocate the queued buffs to the fbpt head before suspend.
1964 */
1965 cio2_fbpt_rearrange(cio2, q);
1966 q->bufs_first = 0;
1967 q->bufs_next = 0;
1968
1969 return 0;
1970}
1971
2086dd35 1972static int __maybe_unused cio2_resume(struct device *dev)
c2a6a07a 1973{
bfe655d1 1974 struct cio2_device *cio2 = dev_get_drvdata(dev);
c2a6a07a 1975 struct cio2_queue *q = cio2->cur_queue;
39fec547 1976 int r;
c2a6a07a
YZ
1977
1978 dev_dbg(dev, "cio2 resume\n");
1979 if (!cio2->streaming)
1980 return 0;
1981 /* Start stream */
1982 r = pm_runtime_force_resume(&cio2->pci_dev->dev);
1983 if (r < 0) {
1984 dev_err(&cio2->pci_dev->dev,
1985 "failed to set power %d\n", r);
1986 return r;
1987 }
1988
1989 r = cio2_hw_init(cio2, q);
1990 if (r)
1991 dev_err(dev, "fail to init cio2 hw\n");
1992
1993 return r;
1994}
1995
1996static const struct dev_pm_ops cio2_pm_ops = {
1997 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
1998 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
1999};
2000
2001static const struct pci_device_id cio2_pci_id_table[] = {
2002 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
39fec547 2003 { }
c2a6a07a
YZ
2004};
2005
2006MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2007
2008static struct pci_driver cio2_pci_driver = {
2009 .name = CIO2_NAME,
2010 .id_table = cio2_pci_id_table,
2011 .probe = cio2_pci_probe,
2012 .remove = cio2_pci_remove,
2013 .driver = {
2014 .pm = &cio2_pm_ops,
2015 },
2016};
2017
2018module_pci_driver(cio2_pci_driver);
2019
2020MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2021MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
8f57763e 2022MODULE_AUTHOR("Jian Xu Zheng");
c2a6a07a
YZ
2023MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2024MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2025MODULE_LICENSE("GPL v2");
2026MODULE_DESCRIPTION("IPU3 CIO2 driver");