1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017,2020 Intel Corporation
5 * Based partially on Intel IPU4 driver written by
6 * Sakari Ailus <sakari.ailus@linux.intel.com>
7 * Samu Onkalo <samu.onkalo@intel.com>
8 * Jouni Högander <jouni.hogander@intel.com>
9 * Jouni Ukkonen <jouni.ukkonen@intel.com>
10 * Antti Laakso <antti.laakso@intel.com>
14 #include <linux/bitops.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/pfn.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/property.h>
24 #include <linux/vmalloc.h>
26 #include <media/ipu-bridge.h>
27 #include <media/v4l2-ctrls.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-fwnode.h>
31 #include <media/v4l2-ioctl.h>
32 #include <media/videobuf2-dma-sg.h>
34 #include "ipu3-cio2.h"
36 struct ipu3_cio2_fmt {
44 * These are raw formats used in Intel's third generation of
45 * Image Processing Unit known as IPU3.
46 * 10bit raw bayer packed, 32 bytes for every 25 pixels,
47 * last LSB 6 bits unused.
49 static const struct ipu3_cio2_fmt formats[] = {
50 { /* put default entry at beginning */
51 .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
52 .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
56 .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
57 .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
61 .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
62 .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
66 .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
67 .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
71 .mbus_code = MEDIA_BUS_FMT_Y10_1X10,
72 .fourcc = V4L2_PIX_FMT_IPU3_Y10,
79 * cio2_find_format - lookup color format by fourcc or/and media bus code
80 * @pixelformat: fourcc to match, ignored if null
81 * @mbus_code: media bus code to match, ignored if null
83 static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
88 for (i = 0; i < ARRAY_SIZE(formats); i++) {
89 if (pixelformat && *pixelformat != formats[i].fourcc)
91 if (mbus_code && *mbus_code != formats[i].mbus_code)
100 static inline u32 cio2_bytesperline(const unsigned int width)
103 * 64 bytes for every 50 pixels, the line length
104 * in bytes is multiple of 64 (line end alignment).
106 return DIV_ROUND_UP(width, 50) * 64;
109 /**************** FBPT operations ****************/
111 static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
113 struct device *dev = &cio2->pci_dev->dev;
115 if (cio2->dummy_lop) {
116 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
117 cio2->dummy_lop_bus_addr);
118 cio2->dummy_lop = NULL;
120 if (cio2->dummy_page) {
121 dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
122 cio2->dummy_page_bus_addr);
123 cio2->dummy_page = NULL;
127 static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
129 struct device *dev = &cio2->pci_dev->dev;
132 cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
133 &cio2->dummy_page_bus_addr,
135 cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
136 &cio2->dummy_lop_bus_addr,
138 if (!cio2->dummy_page || !cio2->dummy_lop) {
139 cio2_fbpt_exit_dummy(cio2);
143 * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
144 * Initialize each entry to dummy_page bus base address.
146 for (i = 0; i < CIO2_LOP_ENTRIES; i++)
147 cio2->dummy_lop[i] = PFN_DOWN(cio2->dummy_page_bus_addr);
152 static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
153 struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
156 * The CPU first initializes some fields in fbpt, then sets
157 * the VALID bit, this barrier is to ensure that the DMA(device)
158 * does not see the VALID bit enabled before other fields are
159 * initialized; otherwise it could lead to havoc.
164 * Request interrupts for start and completion
165 * Valid bit is applicable only to 1st entry
167 entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
168 CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
171 /* Initialize fpbt entries to point to dummy frame */
172 static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
173 struct cio2_fbpt_entry
174 entry[CIO2_MAX_LOPS])
178 entry[0].first_entry.first_page_offset = 0;
179 entry[1].second_entry.num_of_pages = CIO2_LOP_ENTRIES * CIO2_MAX_LOPS;
180 entry[1].second_entry.last_page_available_bytes = PAGE_SIZE - 1;
182 for (i = 0; i < CIO2_MAX_LOPS; i++)
183 entry[i].lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
185 cio2_fbpt_entry_enable(cio2, entry);
188 /* Initialize fpbt entries to point to a given buffer */
189 static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
190 struct cio2_buffer *b,
191 struct cio2_fbpt_entry
192 entry[CIO2_MAX_LOPS])
194 struct vb2_buffer *vb = &b->vbb.vb2_buf;
195 unsigned int length = vb->planes[0].length;
198 entry[0].first_entry.first_page_offset = b->offset;
199 remaining = length + entry[0].first_entry.first_page_offset;
200 entry[1].second_entry.num_of_pages = PFN_UP(remaining);
202 * last_page_available_bytes has the offset of the last byte in the
203 * last page which is still accessible by DMA. DMA cannot access
204 * beyond this point. Valid range for this is from 0 to 4095.
205 * 0 indicates 1st byte in the page is DMA accessible.
206 * 4095 (PAGE_SIZE - 1) means every single byte in the last page
207 * is available for DMA transfer.
209 remaining = offset_in_page(remaining) ?: PAGE_SIZE;
210 entry[1].second_entry.last_page_available_bytes = remaining - 1;
214 while (remaining > 0) {
215 entry->lop_page_addr = PFN_DOWN(b->lop_bus_addr[i]);
216 remaining -= CIO2_LOP_ENTRIES * PAGE_SIZE;
222 * The first not meaningful FBPT entry should point to a valid LOP
224 entry->lop_page_addr = PFN_DOWN(cio2->dummy_lop_bus_addr);
226 cio2_fbpt_entry_enable(cio2, entry);
229 static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
231 struct device *dev = &cio2->pci_dev->dev;
233 q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
241 static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
243 dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
246 /**************** CSI2 hardware setup ****************/
249 * The CSI2 receiver has several parameters affecting
250 * the receiver timings. These depend on the MIPI bus frequency
251 * F in Hz (sensor transmitter rate) as follows:
252 * register value = (A/1e9 + B * UI) / COUNT_ACC
254 * UI = 1 / (2 * F) in seconds
255 * COUNT_ACC = counter accuracy in seconds
256 * For IPU3 COUNT_ACC = 0.0625
258 * A and B are coefficients from the table below,
259 * depending whether the register minimum or maximum value is
263 * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
264 * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
266 * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
267 * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
268 * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
269 * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
270 * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
271 * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
272 * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
273 * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
275 * We use the minimum values of both A and B.
279 * shift for keeping value range suitable for 32-bit integer arithmetic
281 #define LIMIT_SHIFT 8
283 static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
285 const u32 accinv = 16; /* invert of counter resolution */
286 const u32 uiinv = 500000000; /* 1e9 / 2 */
289 freq >>= LIMIT_SHIFT;
291 if (WARN_ON(freq <= 0 || freq > S32_MAX))
294 * b could be 0, -2 or -8, so |accinv * b| is always
295 * less than (1 << ds) and thus |r| < 500000000.
297 r = accinv * b * (uiinv >> LIMIT_SHIFT);
299 /* max value of a is 95 */
305 /* Calculate the delay value for termination enable of clock lane HS Rx */
306 static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
307 struct cio2_csi2_timing *timing,
308 unsigned int bpp, unsigned int lanes)
310 struct device *dev = &cio2->pci_dev->dev;
316 freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes * 2);
318 dev_err(dev, "error %lld, invalid link_freq\n", freq);
322 timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
323 CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
325 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
326 timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
327 CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
329 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
330 timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
331 CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
333 CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
334 timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
335 CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
337 CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
339 dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
340 dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
341 dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
342 dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
347 static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
349 static const int NUM_VCS = 4;
350 static const int SID; /* Stream id */
351 static const int ENTRY;
352 static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
353 CIO2_FBPT_SUBENTRY_UNIT);
354 const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
355 const struct ipu3_cio2_fmt *fmt;
356 void __iomem *const base = cio2->base;
357 u8 lanes, csi2bus = q->csi2.port;
358 u8 sensor_vc = SENSOR_VIR_CH_DFLT;
359 struct cio2_csi2_timing timing = { 0 };
362 fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
366 lanes = q->csi2.lanes;
368 r = cio2_csi2_calc_timing(cio2, q, &timing, fmt->bpp, lanes);
372 writel(timing.clk_termen, q->csi_rx_base +
373 CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
374 writel(timing.clk_settle, q->csi_rx_base +
375 CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
377 for (i = 0; i < lanes; i++) {
378 writel(timing.dat_termen, q->csi_rx_base +
379 CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
380 writel(timing.dat_settle, q->csi_rx_base +
381 CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
384 writel(CIO2_PBM_WMCTRL1_MIN_2CK |
385 CIO2_PBM_WMCTRL1_MID1_2CK |
386 CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
387 writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
388 CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
389 CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
390 CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
391 CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
392 CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
393 writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
394 CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
395 CIO2_PBM_ARB_CTRL_LE_EN |
396 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
397 CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
398 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
399 CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
400 base + CIO2_REG_PBM_ARB_CTRL);
401 writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
402 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
403 writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
404 q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
406 writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
407 writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
409 /* Configure MIPI backend */
410 for (i = 0; i < NUM_VCS; i++)
411 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
413 /* There are 16 short packet LUT entry */
414 for (i = 0; i < 16; i++)
415 writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
416 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
417 writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
418 q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
420 writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
421 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
422 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
423 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
424 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
425 writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
427 writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
428 CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
429 base + CIO2_REG_INT_EN);
431 writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
432 << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
433 base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
434 writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
435 sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
436 fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
437 q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
438 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
439 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
440 writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
442 writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
443 writel(CIO2_CGC_PRIM_TGE |
447 CIO2_CGC_CSI2_INTERFRAME_TGE |
448 CIO2_CGC_CSI2_PORT_DCGE |
453 CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
454 CIO2_CGC_CSI_CLKGATE_HOLDOFF
455 << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
456 writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
457 writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
458 CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
459 CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
460 CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
461 base + CIO2_REG_LTRVAL01);
462 writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
463 CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
464 CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
465 CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
466 base + CIO2_REG_LTRVAL23);
468 for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
469 writel(0, base + CIO2_REG_CDMABA(i));
470 writel(0, base + CIO2_REG_CDMAC0(i));
471 writel(0, base + CIO2_REG_CDMAC1(i));
475 writel(PFN_DOWN(q->fbpt_bus_addr), base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
477 writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
478 FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
479 CIO2_CDMAC0_DMA_INTR_ON_FE |
480 CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
482 CIO2_CDMAC0_DMA_INTR_ON_FS |
483 CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
485 writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
486 base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
488 writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
490 writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
491 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
492 CIO2_PXM_FRF_CFG_MSK_ECC_RE |
493 CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
494 base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
496 /* Clear interrupts */
497 writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
498 writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
499 writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
500 writel(~0, base + CIO2_REG_INT_STS);
502 /* Enable devices, starting from the last device in the pipe */
503 writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
504 writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
509 static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
511 struct device *dev = &cio2->pci_dev->dev;
512 void __iomem *const base = cio2->base;
517 /* Disable CSI receiver and MIPI backend devices */
518 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
519 writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
520 writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
521 writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
524 writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
525 ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN),
526 value, value & CIO2_CDMAC0_DMA_HALTED,
529 dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
531 for (i = 0; i < CIO2_NUM_PORTS; i++) {
532 writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
533 CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
534 writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
535 CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
539 static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
541 struct device *dev = &cio2->pci_dev->dev;
542 struct cio2_queue *q = cio2->cur_queue;
543 struct cio2_fbpt_entry *entry;
544 u64 ns = ktime_get_ns();
546 if (dma_chan >= CIO2_QUEUES) {
547 dev_err(dev, "bad DMA channel %i\n", dma_chan);
551 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
552 if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
553 dev_warn(dev, "no ready buffers found on DMA channel %u\n",
558 /* Find out which buffer(s) are ready */
560 struct cio2_buffer *b;
562 b = q->bufs[q->bufs_first];
564 unsigned int received = entry[1].second_entry.num_of_bytes;
565 unsigned long payload =
566 vb2_get_plane_payload(&b->vbb.vb2_buf, 0);
568 q->bufs[q->bufs_first] = NULL;
569 atomic_dec(&q->bufs_queued);
570 dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
572 b->vbb.vb2_buf.timestamp = ns;
573 b->vbb.field = V4L2_FIELD_NONE;
574 b->vbb.sequence = atomic_read(&q->frame_sequence);
575 if (payload != received)
577 "payload length is %lu, received %u\n",
579 vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
581 atomic_inc(&q->frame_sequence);
582 cio2_fbpt_entry_init_dummy(cio2, entry);
583 q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
584 entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
585 } while (!(entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID));
588 static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
591 * For the user space camera control algorithms it is essential
592 * to know when the reception of a frame has begun. That's often
593 * the best timing information to get from the hardware.
595 struct v4l2_event event = {
596 .type = V4L2_EVENT_FRAME_SYNC,
597 .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
600 v4l2_event_queue(q->subdev.devnode, &event);
603 static const char *const cio2_irq_errs[] = {
604 "single packet header error corrected",
605 "multiple packet header errors detected",
606 "payload checksum (CRC) error",
608 "reserved short packet data type detected",
609 "reserved long packet data type detected",
610 "incomplete long packet detected",
613 "DPHY start of transmission error",
614 "DPHY synchronization error",
616 "escape mode trigger event",
617 "escape mode ultra-low power state for data lane(s)",
618 "escape mode ultra-low power state exit for clock lane",
619 "inter-frame short packet discarded",
620 "inter-frame long packet discarded",
621 "non-matching Long Packet stalled",
624 static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
626 unsigned long csi2_status = status;
629 for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
630 dev_err(dev, "CSI-2 receiver port %i: %s\n",
631 port, cio2_irq_errs[i]);
633 if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
634 dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
638 static const char *const cio2_port_errs[] = {
640 "DPHY not recoverable",
641 "ECC not recoverable",
648 static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
650 unsigned long port_status = status;
653 for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
654 dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
657 static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
659 struct device *dev = &cio2->pci_dev->dev;
660 void __iomem *const base = cio2->base;
662 if (int_status & CIO2_INT_IOOE) {
664 * Interrupt on Output Error:
665 * 1) SRAM is full and FS received, or
666 * 2) An invalid bit detected by DMA.
668 u32 oe_status, oe_clear;
670 oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
671 oe_status = oe_clear;
673 if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
674 dev_err(dev, "DMA output error: 0x%x\n",
675 (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
676 >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
677 oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
679 if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
680 dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
681 (oe_status & CIO2_INT_EXT_OE_OES_MASK)
682 >> CIO2_INT_EXT_OE_OES_SHIFT);
683 oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
685 writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
687 dev_warn(dev, "unknown interrupt 0x%x on OE\n",
689 int_status &= ~CIO2_INT_IOOE;
692 if (int_status & CIO2_INT_IOC_MASK) {
693 /* DMA IO done -- frame ready */
697 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
698 if (int_status & CIO2_INT_IOC(d)) {
699 clr |= CIO2_INT_IOC(d);
700 cio2_buffer_done(cio2, d);
705 if (int_status & CIO2_INT_IOS_IOLN_MASK) {
706 /* DMA IO starts or reached specified line */
710 for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
711 if (int_status & CIO2_INT_IOS_IOLN(d)) {
712 clr |= CIO2_INT_IOS_IOLN(d);
713 if (d == CIO2_DMA_CHAN)
714 cio2_queue_event_sof(cio2,
720 if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
721 /* CSI2 receiver (error) interrupt */
725 ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
727 for (port = 0; port < CIO2_NUM_PORTS; port++) {
728 u32 port_status = (ie_status >> (port * 8)) & 0xff;
730 cio2_irq_log_port_errs(dev, port, port_status);
732 if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
733 void __iomem *csi_rx_base =
734 base + CIO2_REG_PIPE_BASE(port);
737 csi2_status = readl(csi_rx_base +
738 CIO2_REG_IRQCTRL_STATUS);
740 cio2_irq_log_irq_errs(dev, port, csi2_status);
743 csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
747 writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
749 int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
753 dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
756 static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
758 struct cio2_device *cio2 = cio2_ptr;
759 void __iomem *const base = cio2->base;
760 struct device *dev = &cio2->pci_dev->dev;
763 int_status = readl(base + CIO2_REG_INT_STS);
764 dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
769 writel(int_status, base + CIO2_REG_INT_STS);
770 cio2_irq_handle_once(cio2, int_status);
771 int_status = readl(base + CIO2_REG_INT_STS);
773 dev_dbg(dev, "pending status 0x%x\n", int_status);
774 } while (int_status);
779 /**************** Videobuf2 interface ****************/
781 static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
782 enum vb2_buffer_state state)
786 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
788 atomic_dec(&q->bufs_queued);
789 vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
796 static int cio2_vb2_queue_setup(struct vb2_queue *vq,
797 unsigned int *num_buffers,
798 unsigned int *num_planes,
799 unsigned int sizes[],
800 struct device *alloc_devs[])
802 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
803 struct device *dev = &cio2->pci_dev->dev;
804 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
807 if (*num_planes && *num_planes < q->format.num_planes)
810 for (i = 0; i < q->format.num_planes; ++i) {
811 if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
813 sizes[i] = q->format.plane_fmt[i].sizeimage;
817 *num_planes = q->format.num_planes;
818 *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
820 /* Initialize buffer queue */
821 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
823 cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
825 atomic_set(&q->bufs_queued, 0);
832 /* Called after each buffer is allocated */
833 static int cio2_vb2_buf_init(struct vb2_buffer *vb)
835 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
836 struct device *dev = &cio2->pci_dev->dev;
837 struct cio2_buffer *b = to_cio2_buffer(vb);
838 unsigned int pages = PFN_UP(vb->planes[0].length);
839 unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
841 struct sg_dma_page_iter sg_iter;
844 if (lops <= 0 || lops > CIO2_MAX_LOPS) {
845 dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
846 vb->planes[0].length);
847 return -ENOSPC; /* Should never happen */
850 memset(b->lop, 0, sizeof(b->lop));
851 /* Allocate LOP table */
852 for (i = 0; i < lops; i++) {
853 b->lop[i] = dma_alloc_coherent(dev, PAGE_SIZE,
854 &b->lop_bus_addr[i], GFP_KERNEL);
860 sg = vb2_dma_sg_plane_desc(vb, 0);
864 if (sg->nents && sg->sgl)
865 b->offset = sg->sgl->offset;
868 for_each_sg_dma_page(sg->sgl, &sg_iter, sg->nents, 0) {
871 b->lop[i][j] = PFN_DOWN(sg_page_iter_dma_address(&sg_iter));
873 if (j == CIO2_LOP_ENTRIES) {
879 b->lop[i][j] = PFN_DOWN(cio2->dummy_page_bus_addr);
883 dma_free_coherent(dev, PAGE_SIZE, b->lop[i], b->lop_bus_addr[i]);
887 /* Transfer buffer ownership to cio2 */
888 static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
890 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
891 struct device *dev = &cio2->pci_dev->dev;
892 struct cio2_queue *q =
893 container_of(vb->vb2_queue, struct cio2_queue, vbq);
894 struct cio2_buffer *b = to_cio2_buffer(vb);
895 struct cio2_fbpt_entry *entry;
897 unsigned int i, j, next = q->bufs_next;
898 int bufs_queued = atomic_inc_return(&q->bufs_queued);
901 dev_dbg(dev, "queue buffer %d\n", vb->index);
904 * This code queues the buffer to the CIO2 DMA engine, which starts
905 * running once streaming has started. It is possible that this code
906 * gets pre-empted due to increased CPU load. Upon this, the driver
907 * does not get an opportunity to queue new buffers to the CIO2 DMA
908 * engine. When the DMA engine encounters an FBPT entry without the
909 * VALID bit set, the DMA engine halts, which requires a restart of
910 * the DMA engine and sensor, to continue streaming.
911 * This is not desired and is highly unlikely given that there are
912 * 32 FBPT entries that the DMA engine needs to process, to run into
913 * an FBPT entry, without the VALID bit set. We try to mitigate this
914 * by disabling interrupts for the duration of this queueing.
916 local_irq_save(flags);
918 fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
919 >> CIO2_CDMARI_FBPT_RP_SHIFT)
920 & CIO2_CDMARI_FBPT_RP_MASK;
923 * fbpt_rp is the fbpt entry that the dma is currently working
924 * on, but since it could jump to next entry at any time,
925 * assume that we might already be there.
927 fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
929 if (bufs_queued <= 1 || fbpt_rp == next)
930 /* Buffers were drained */
931 next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
933 for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
935 * We have allocated CIO2_MAX_BUFFERS circularly for the
936 * hw, the user has requested N buffer queue. The driver
937 * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
938 * user queues a buffer, there necessarily is a free buffer.
940 if (!q->bufs[next]) {
942 entry = &q->fbpt[next * CIO2_MAX_LOPS];
943 cio2_fbpt_entry_init_buf(cio2, b, entry);
944 local_irq_restore(flags);
945 q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
946 for (j = 0; j < vb->num_planes; j++)
947 vb2_set_plane_payload(vb, j,
948 q->format.plane_fmt[j].sizeimage);
952 dev_dbg(dev, "entry %i was full!\n", next);
953 next = (next + 1) % CIO2_MAX_BUFFERS;
956 local_irq_restore(flags);
957 dev_err(dev, "error: all cio2 entries were full!\n");
958 atomic_dec(&q->bufs_queued);
959 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
962 /* Called when each buffer is freed */
963 static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
965 struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
966 struct device *dev = &cio2->pci_dev->dev;
967 struct cio2_buffer *b = to_cio2_buffer(vb);
971 for (i = 0; i < CIO2_MAX_LOPS; i++) {
973 dma_free_coherent(dev, PAGE_SIZE,
974 b->lop[i], b->lop_bus_addr[i]);
978 static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
980 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
981 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
982 struct device *dev = &cio2->pci_dev->dev;
986 atomic_set(&q->frame_sequence, 0);
988 r = pm_runtime_resume_and_get(dev);
990 dev_info(dev, "failed to set power %d\n", r);
994 r = video_device_pipeline_start(&q->vdev, &q->pipe);
998 r = cio2_hw_init(cio2, q);
1002 /* Start streaming on sensor */
1003 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
1005 goto fail_csi2_subdev;
1007 cio2->streaming = true;
1012 cio2_hw_exit(cio2, q);
1014 video_device_pipeline_stop(&q->vdev);
1016 dev_dbg(dev, "failed to start streaming (%d)\n", r);
1017 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
1018 pm_runtime_put(dev);
1023 static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
1025 struct cio2_queue *q = vb2q_to_cio2_queue(vq);
1026 struct cio2_device *cio2 = vb2_get_drv_priv(vq);
1027 struct device *dev = &cio2->pci_dev->dev;
1029 if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
1030 dev_err(dev, "failed to stop sensor streaming\n");
1032 cio2_hw_exit(cio2, q);
1033 synchronize_irq(cio2->pci_dev->irq);
1034 cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
1035 video_device_pipeline_stop(&q->vdev);
1036 pm_runtime_put(dev);
1037 cio2->streaming = false;
1040 static const struct vb2_ops cio2_vb2_ops = {
1041 .buf_init = cio2_vb2_buf_init,
1042 .buf_queue = cio2_vb2_buf_queue,
1043 .buf_cleanup = cio2_vb2_buf_cleanup,
1044 .queue_setup = cio2_vb2_queue_setup,
1045 .start_streaming = cio2_vb2_start_streaming,
1046 .stop_streaming = cio2_vb2_stop_streaming,
1047 .wait_prepare = vb2_ops_wait_prepare,
1048 .wait_finish = vb2_ops_wait_finish,
1051 /**************** V4L2 interface ****************/
1053 static int cio2_v4l2_querycap(struct file *file, void *fh,
1054 struct v4l2_capability *cap)
1056 strscpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
1057 strscpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
1062 static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
1063 struct v4l2_fmtdesc *f)
1065 if (f->index >= ARRAY_SIZE(formats))
1068 f->pixelformat = formats[f->index].fourcc;
1073 /* The format is validated in cio2_video_link_validate() */
1074 static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
1076 struct cio2_queue *q = file_to_cio2_queue(file);
1078 f->fmt.pix_mp = q->format;
1083 static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
1085 const struct ipu3_cio2_fmt *fmt;
1086 struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
1088 fmt = cio2_find_format(&mpix->pixelformat, NULL);
1092 /* Only supports up to 4224x3136 */
1093 if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
1094 mpix->width = CIO2_IMAGE_MAX_WIDTH;
1095 if (mpix->height > CIO2_IMAGE_MAX_HEIGHT)
1096 mpix->height = CIO2_IMAGE_MAX_HEIGHT;
1098 mpix->num_planes = 1;
1099 mpix->pixelformat = fmt->fourcc;
1100 mpix->colorspace = V4L2_COLORSPACE_RAW;
1101 mpix->field = V4L2_FIELD_NONE;
1102 mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
1103 mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
1107 mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
1108 mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
1109 mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
1114 static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
1116 struct cio2_queue *q = file_to_cio2_queue(file);
1118 cio2_v4l2_try_fmt(file, fh, f);
1119 q->format = f->fmt.pix_mp;
1125 cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1127 if (input->index > 0)
1130 strscpy(input->name, "camera", sizeof(input->name));
1131 input->type = V4L2_INPUT_TYPE_CAMERA;
1137 cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
1145 cio2_video_s_input(struct file *file, void *fh, unsigned int input)
1147 return input == 0 ? 0 : -EINVAL;
1150 static const struct v4l2_file_operations cio2_v4l2_fops = {
1151 .owner = THIS_MODULE,
1152 .unlocked_ioctl = video_ioctl2,
1153 .open = v4l2_fh_open,
1154 .release = vb2_fop_release,
1155 .poll = vb2_fop_poll,
1156 .mmap = vb2_fop_mmap,
1159 static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
1160 .vidioc_querycap = cio2_v4l2_querycap,
1161 .vidioc_enum_fmt_vid_cap = cio2_v4l2_enum_fmt,
1162 .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
1163 .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
1164 .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
1165 .vidioc_reqbufs = vb2_ioctl_reqbufs,
1166 .vidioc_create_bufs = vb2_ioctl_create_bufs,
1167 .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
1168 .vidioc_querybuf = vb2_ioctl_querybuf,
1169 .vidioc_qbuf = vb2_ioctl_qbuf,
1170 .vidioc_dqbuf = vb2_ioctl_dqbuf,
1171 .vidioc_streamon = vb2_ioctl_streamon,
1172 .vidioc_streamoff = vb2_ioctl_streamoff,
1173 .vidioc_expbuf = vb2_ioctl_expbuf,
1174 .vidioc_enum_input = cio2_video_enum_input,
1175 .vidioc_g_input = cio2_video_g_input,
1176 .vidioc_s_input = cio2_video_s_input,
1179 static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
1181 struct v4l2_event_subscription *sub)
1183 if (sub->type != V4L2_EVENT_FRAME_SYNC)
1186 /* Line number. For now only zero accepted. */
1190 return v4l2_event_subscribe(fh, sub, 0, NULL);
1193 static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
1195 struct v4l2_mbus_framefmt *format;
1196 const struct v4l2_mbus_framefmt fmt_default = {
1199 .code = formats[0].mbus_code,
1200 .field = V4L2_FIELD_NONE,
1201 .colorspace = V4L2_COLORSPACE_RAW,
1202 .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
1203 .quantization = V4L2_QUANTIZATION_DEFAULT,
1204 .xfer_func = V4L2_XFER_FUNC_DEFAULT,
1207 /* Initialize try_fmt */
1208 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SINK);
1209 *format = fmt_default;
1212 format = v4l2_subdev_get_try_format(sd, fh->state, CIO2_PAD_SOURCE);
1213 *format = fmt_default;
1219 * cio2_subdev_get_fmt - Handle get format by pads subdev method
1220 * @sd : pointer to v4l2 subdev structure
1221 * @cfg: V4L2 subdev pad config
1222 * @fmt: pointer to v4l2 subdev format structure
1223 * return -EINVAL or zero on success
1225 static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
1226 struct v4l2_subdev_state *sd_state,
1227 struct v4l2_subdev_format *fmt)
1229 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1231 mutex_lock(&q->subdev_lock);
1233 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1234 fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
1237 fmt->format = q->subdev_fmt;
1239 mutex_unlock(&q->subdev_lock);
1245 * cio2_subdev_set_fmt - Handle set format by pads subdev method
1246 * @sd : pointer to v4l2 subdev structure
1247 * @cfg: V4L2 subdev pad config
1248 * @fmt: pointer to v4l2 subdev format structure
1249 * return -EINVAL or zero on success
1251 static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
1252 struct v4l2_subdev_state *sd_state,
1253 struct v4l2_subdev_format *fmt)
1255 struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
1256 struct v4l2_mbus_framefmt *mbus;
1257 u32 mbus_code = fmt->format.code;
1261 * Only allow setting sink pad format;
1262 * source always propagates from sink
1264 if (fmt->pad == CIO2_PAD_SOURCE)
1265 return cio2_subdev_get_fmt(sd, sd_state, fmt);
1267 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
1268 mbus = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
1270 mbus = &q->subdev_fmt;
1272 fmt->format.code = formats[0].mbus_code;
1274 for (i = 0; i < ARRAY_SIZE(formats); i++) {
1275 if (formats[i].mbus_code == mbus_code) {
1276 fmt->format.code = mbus_code;
1281 fmt->format.width = min(fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
1282 fmt->format.height = min(fmt->format.height, CIO2_IMAGE_MAX_HEIGHT);
1283 fmt->format.field = V4L2_FIELD_NONE;
1285 mutex_lock(&q->subdev_lock);
1286 *mbus = fmt->format;
1287 mutex_unlock(&q->subdev_lock);
1292 static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
1293 struct v4l2_subdev_state *sd_state,
1294 struct v4l2_subdev_mbus_code_enum *code)
1296 if (code->index >= ARRAY_SIZE(formats))
1299 code->code = formats[code->index].mbus_code;
1303 static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
1304 struct v4l2_subdev_format *fmt)
1306 if (is_media_entity_v4l2_subdev(pad->entity)) {
1307 struct v4l2_subdev *sd =
1308 media_entity_to_v4l2_subdev(pad->entity);
1310 memset(fmt, 0, sizeof(*fmt));
1311 fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
1312 fmt->pad = pad->index;
1313 return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
1319 static int cio2_video_link_validate(struct media_link *link)
1321 struct media_entity *entity = link->sink->entity;
1322 struct video_device *vd = media_entity_to_video_device(entity);
1323 struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
1324 struct cio2_device *cio2 = video_get_drvdata(vd);
1325 struct device *dev = &cio2->pci_dev->dev;
1326 struct v4l2_subdev_format source_fmt;
1329 if (!media_pad_remote_pad_first(entity->pads)) {
1330 dev_info(dev, "video node %s pad not connected\n", vd->name);
1334 ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
1338 if (source_fmt.format.width != q->format.width ||
1339 source_fmt.format.height != q->format.height) {
1340 dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
1341 q->format.width, q->format.height,
1342 source_fmt.format.width, source_fmt.format.height);
1346 if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
1352 static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
1353 .subscribe_event = cio2_subdev_subscribe_event,
1354 .unsubscribe_event = v4l2_event_subdev_unsubscribe,
1357 static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
1358 .open = cio2_subdev_open,
1361 static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
1362 .link_validate = v4l2_subdev_link_validate_default,
1363 .get_fmt = cio2_subdev_get_fmt,
1364 .set_fmt = cio2_subdev_set_fmt,
1365 .enum_mbus_code = cio2_subdev_enum_mbus_code,
1368 static const struct v4l2_subdev_ops cio2_subdev_ops = {
1369 .core = &cio2_subdev_core_ops,
1370 .pad = &cio2_subdev_pad_ops,
1373 /******* V4L2 sub-device asynchronous registration callbacks***********/
1375 struct sensor_async_subdev {
1376 struct v4l2_async_connection asd;
1377 struct csi2_bus_info csi2;
1380 #define to_sensor_asd(__asd) \
1381 container_of_const(__asd, struct sensor_async_subdev, asd)
1383 /* The .bound() notifier callback when a match is found */
1384 static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
1385 struct v4l2_subdev *sd,
1386 struct v4l2_async_connection *asd)
1388 struct cio2_device *cio2 = to_cio2_device(notifier);
1389 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1390 struct cio2_queue *q;
1393 if (cio2->queue[s_asd->csi2.port].sensor)
1396 ret = ipu_bridge_instantiate_vcm(sd->dev);
1400 q = &cio2->queue[s_asd->csi2.port];
1402 q->csi2 = s_asd->csi2;
1404 q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
1409 /* The .unbind callback */
1410 static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
1411 struct v4l2_subdev *sd,
1412 struct v4l2_async_connection *asd)
1414 struct cio2_device *cio2 = to_cio2_device(notifier);
1415 struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
1417 cio2->queue[s_asd->csi2.port].sensor = NULL;
1420 /* .complete() is called after all subdevices have been located */
1421 static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
1423 struct cio2_device *cio2 = to_cio2_device(notifier);
1424 struct device *dev = &cio2->pci_dev->dev;
1425 struct sensor_async_subdev *s_asd;
1426 struct v4l2_async_connection *asd;
1427 struct cio2_queue *q;
1430 list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
1431 s_asd = to_sensor_asd(asd);
1432 q = &cio2->queue[s_asd->csi2.port];
1434 ret = media_entity_get_fwnode_pad(&q->sensor->entity,
1435 s_asd->asd.match.fwnode,
1436 MEDIA_PAD_FL_SOURCE);
1438 dev_err(dev, "no pad for endpoint %pfw (%d)\n",
1439 s_asd->asd.match.fwnode, ret);
1443 ret = media_create_pad_link(&q->sensor->entity, ret,
1444 &q->subdev.entity, CIO2_PAD_SINK,
1447 dev_err(dev, "failed to create link for %s (endpoint %pfw, error %d)\n",
1448 q->sensor->name, s_asd->asd.match.fwnode, ret);
1453 return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
1456 static const struct v4l2_async_notifier_operations cio2_async_ops = {
1457 .bound = cio2_notifier_bound,
1458 .unbind = cio2_notifier_unbind,
1459 .complete = cio2_notifier_complete,
1462 static int cio2_parse_firmware(struct cio2_device *cio2)
1464 struct device *dev = &cio2->pci_dev->dev;
1468 for (i = 0; i < CIO2_NUM_PORTS; i++) {
1469 struct v4l2_fwnode_endpoint vep = {
1470 .bus_type = V4L2_MBUS_CSI2_DPHY
1472 struct sensor_async_subdev *s_asd;
1473 struct fwnode_handle *ep;
1475 ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
1476 FWNODE_GRAPH_ENDPOINT_NEXT);
1480 ret = v4l2_fwnode_endpoint_parse(ep, &vep);
1484 s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
1486 sensor_async_subdev);
1487 if (IS_ERR(s_asd)) {
1488 ret = PTR_ERR(s_asd);
1492 s_asd->csi2.port = vep.base.port;
1493 s_asd->csi2.lanes = vep.bus.mipi_csi2.num_data_lanes;
1495 fwnode_handle_put(ep);
1500 fwnode_handle_put(ep);
1505 * Proceed even without sensors connected to allow the device to
1508 cio2->notifier.ops = &cio2_async_ops;
1509 ret = v4l2_async_nf_register(&cio2->notifier);
1511 dev_err(dev, "failed to register async notifier : %d\n", ret);
1516 /**************** Queue initialization ****************/
1517 static const struct media_entity_operations cio2_media_ops = {
1518 .link_validate = v4l2_subdev_link_validate,
1521 static const struct media_entity_operations cio2_video_entity_ops = {
1522 .link_validate = cio2_video_link_validate,
1525 static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
1527 static const u32 default_width = 1936;
1528 static const u32 default_height = 1096;
1529 const struct ipu3_cio2_fmt dflt_fmt = formats[0];
1530 struct device *dev = &cio2->pci_dev->dev;
1531 struct video_device *vdev = &q->vdev;
1532 struct vb2_queue *vbq = &q->vbq;
1533 struct v4l2_subdev *subdev = &q->subdev;
1534 struct v4l2_mbus_framefmt *fmt;
1537 /* Initialize miscellaneous variables */
1538 mutex_init(&q->lock);
1539 mutex_init(&q->subdev_lock);
1541 /* Initialize formats to default values */
1542 fmt = &q->subdev_fmt;
1543 fmt->width = default_width;
1544 fmt->height = default_height;
1545 fmt->code = dflt_fmt.mbus_code;
1546 fmt->field = V4L2_FIELD_NONE;
1548 q->format.width = default_width;
1549 q->format.height = default_height;
1550 q->format.pixelformat = dflt_fmt.fourcc;
1551 q->format.colorspace = V4L2_COLORSPACE_RAW;
1552 q->format.field = V4L2_FIELD_NONE;
1553 q->format.num_planes = 1;
1554 q->format.plane_fmt[0].bytesperline =
1555 cio2_bytesperline(q->format.width);
1556 q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
1559 /* Initialize fbpt */
1560 r = cio2_fbpt_init(cio2, q);
1564 /* Initialize media entities */
1565 q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
1566 MEDIA_PAD_FL_MUST_CONNECT;
1567 q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
1568 subdev->entity.ops = &cio2_media_ops;
1569 subdev->internal_ops = &cio2_subdev_internal_ops;
1570 r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
1572 dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
1573 goto fail_subdev_media_entity;
1576 q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
1577 vdev->entity.ops = &cio2_video_entity_ops;
1578 r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
1580 dev_err(dev, "failed initialize videodev media entity (%d)\n",
1582 goto fail_vdev_media_entity;
1585 /* Initialize subdev */
1586 v4l2_subdev_init(subdev, &cio2_subdev_ops);
1587 subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
1588 subdev->owner = THIS_MODULE;
1589 snprintf(subdev->name, sizeof(subdev->name),
1590 CIO2_ENTITY_NAME " %td", q - cio2->queue);
1591 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
1592 v4l2_set_subdevdata(subdev, cio2);
1593 r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
1595 dev_err(dev, "failed initialize subdev (%d)\n", r);
1599 /* Initialize vbq */
1600 vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1601 vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
1602 vbq->ops = &cio2_vb2_ops;
1603 vbq->mem_ops = &vb2_dma_sg_memops;
1604 vbq->buf_struct_size = sizeof(struct cio2_buffer);
1605 vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
1606 vbq->min_buffers_needed = 1;
1607 vbq->drv_priv = cio2;
1608 vbq->lock = &q->lock;
1609 r = vb2_queue_init(vbq);
1611 dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
1615 /* Initialize vdev */
1616 snprintf(vdev->name, sizeof(vdev->name),
1617 "%s %td", CIO2_NAME, q - cio2->queue);
1618 vdev->release = video_device_release_empty;
1619 vdev->fops = &cio2_v4l2_fops;
1620 vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
1621 vdev->lock = &cio2->lock;
1622 vdev->v4l2_dev = &cio2->v4l2_dev;
1623 vdev->queue = &q->vbq;
1624 vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
1625 video_set_drvdata(vdev, cio2);
1626 r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
1628 dev_err(dev, "failed to register video device (%d)\n", r);
1632 /* Create link from CIO2 subdev to output node */
1633 r = media_create_pad_link(
1634 &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
1635 MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
1642 vb2_video_unregister_device(&q->vdev);
1644 v4l2_device_unregister_subdev(subdev);
1646 media_entity_cleanup(&vdev->entity);
1647 fail_vdev_media_entity:
1648 media_entity_cleanup(&subdev->entity);
1649 fail_subdev_media_entity:
1650 cio2_fbpt_exit(q, dev);
1652 mutex_destroy(&q->subdev_lock);
1653 mutex_destroy(&q->lock);
1658 static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
1660 vb2_video_unregister_device(&q->vdev);
1661 media_entity_cleanup(&q->vdev.entity);
1662 v4l2_device_unregister_subdev(&q->subdev);
1663 media_entity_cleanup(&q->subdev.entity);
1664 cio2_fbpt_exit(q, &cio2->pci_dev->dev);
1665 mutex_destroy(&q->subdev_lock);
1666 mutex_destroy(&q->lock);
1669 static int cio2_queues_init(struct cio2_device *cio2)
1673 for (i = 0; i < CIO2_QUEUES; i++) {
1674 r = cio2_queue_init(cio2, &cio2->queue[i]);
1679 if (i == CIO2_QUEUES)
1682 for (i--; i >= 0; i--)
1683 cio2_queue_exit(cio2, &cio2->queue[i]);
1688 static void cio2_queues_exit(struct cio2_device *cio2)
1692 for (i = 0; i < CIO2_QUEUES; i++)
1693 cio2_queue_exit(cio2, &cio2->queue[i]);
1696 static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
1698 struct fwnode_handle *endpoint;
1700 if (IS_ERR_OR_NULL(fwnode))
1703 endpoint = fwnode_graph_get_next_endpoint(fwnode, NULL);
1705 fwnode_handle_put(endpoint);
1709 return cio2_check_fwnode_graph(fwnode->secondary);
1712 /**************** PCI interface ****************/
1714 static int cio2_pci_probe(struct pci_dev *pci_dev,
1715 const struct pci_device_id *id)
1717 struct device *dev = &pci_dev->dev;
1718 struct fwnode_handle *fwnode = dev_fwnode(dev);
1719 struct cio2_device *cio2;
1723 * On some platforms no connections to sensors are defined in firmware,
1724 * if the device has no endpoints then we can try to build those as
1725 * software_nodes parsed from SSDB.
1727 r = cio2_check_fwnode_graph(fwnode);
1729 if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
1730 dev_err(dev, "fwnode graph has no endpoints connected\n");
1734 r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
1739 cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
1742 cio2->pci_dev = pci_dev;
1744 r = pcim_enable_device(pci_dev);
1746 dev_err(dev, "failed to enable device (%d)\n", r);
1750 dev_info(dev, "device 0x%x (rev: 0x%x)\n",
1751 pci_dev->device, pci_dev->revision);
1753 r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
1755 dev_err(dev, "failed to remap I/O memory (%d)\n", r);
1759 cio2->base = pcim_iomap_table(pci_dev)[CIO2_PCI_BAR];
1761 pci_set_drvdata(pci_dev, cio2);
1763 pci_set_master(pci_dev);
1765 r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
1767 dev_err(dev, "failed to set DMA mask (%d)\n", r);
1771 r = pci_enable_msi(pci_dev);
1773 dev_err(dev, "failed to enable MSI (%d)\n", r);
1777 r = cio2_fbpt_init_dummy(cio2);
1781 mutex_init(&cio2->lock);
1783 cio2->media_dev.dev = dev;
1784 strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
1785 sizeof(cio2->media_dev.model));
1786 cio2->media_dev.hw_revision = 0;
1788 media_device_init(&cio2->media_dev);
1789 r = media_device_register(&cio2->media_dev);
1791 goto fail_mutex_destroy;
1793 cio2->v4l2_dev.mdev = &cio2->media_dev;
1794 r = v4l2_device_register(dev, &cio2->v4l2_dev);
1796 dev_err(dev, "failed to register V4L2 device (%d)\n", r);
1797 goto fail_media_device_unregister;
1800 r = cio2_queues_init(cio2);
1802 goto fail_v4l2_device_unregister;
1804 v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
1806 /* Register notifier for subdevices we care */
1807 r = cio2_parse_firmware(cio2);
1809 goto fail_clean_notifier;
1811 r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
1814 dev_err(dev, "failed to request IRQ (%d)\n", r);
1815 goto fail_clean_notifier;
1818 pm_runtime_put_noidle(dev);
1819 pm_runtime_allow(dev);
1823 fail_clean_notifier:
1824 v4l2_async_nf_unregister(&cio2->notifier);
1825 v4l2_async_nf_cleanup(&cio2->notifier);
1826 cio2_queues_exit(cio2);
1827 fail_v4l2_device_unregister:
1828 v4l2_device_unregister(&cio2->v4l2_dev);
1829 fail_media_device_unregister:
1830 media_device_unregister(&cio2->media_dev);
1831 media_device_cleanup(&cio2->media_dev);
1833 mutex_destroy(&cio2->lock);
1834 cio2_fbpt_exit_dummy(cio2);
1839 static void cio2_pci_remove(struct pci_dev *pci_dev)
1841 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1843 media_device_unregister(&cio2->media_dev);
1844 v4l2_async_nf_unregister(&cio2->notifier);
1845 v4l2_async_nf_cleanup(&cio2->notifier);
1846 cio2_queues_exit(cio2);
1847 cio2_fbpt_exit_dummy(cio2);
1848 v4l2_device_unregister(&cio2->v4l2_dev);
1849 media_device_cleanup(&cio2->media_dev);
1850 mutex_destroy(&cio2->lock);
1852 pm_runtime_forbid(&pci_dev->dev);
1853 pm_runtime_get_noresume(&pci_dev->dev);
1856 static int __maybe_unused cio2_runtime_suspend(struct device *dev)
1858 struct pci_dev *pci_dev = to_pci_dev(dev);
1859 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1860 void __iomem *const base = cio2->base;
1863 writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
1864 dev_dbg(dev, "cio2 runtime suspend.\n");
1866 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1867 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1868 pm |= CIO2_PMCSR_D3;
1869 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1874 static int __maybe_unused cio2_runtime_resume(struct device *dev)
1876 struct pci_dev *pci_dev = to_pci_dev(dev);
1877 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1878 void __iomem *const base = cio2->base;
1881 writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
1882 dev_dbg(dev, "cio2 runtime resume.\n");
1884 pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
1885 pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
1886 pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
1892 * Helper function to advance all the elements of a circular buffer by "start"
1895 static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
1901 { start, elems - 1 },
1904 #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
1906 /* Loop as long as we have out-of-place entries */
1907 while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
1911 * Find the number of entries that can be arranged on this
1914 size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
1916 /* Swap the entries in two parts of the array. */
1917 for (i = 0; i < size0; i++) {
1918 u8 *d = ptr + elem_size * (arr[1].begin + i);
1919 u8 *s = ptr + elem_size * (arr[0].begin + i);
1922 for (j = 0; j < elem_size; j++)
1926 if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
1927 /* The end of the first array remains unarranged. */
1928 arr[0].begin += size0;
1931 * The first array is fully arranged so we proceed
1932 * handling the next one.
1934 arr[0].begin = arr[1].begin;
1935 arr[0].end = arr[1].begin + size0 - 1;
1936 arr[1].begin += size0;
1941 static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
1945 for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
1946 i++, j = (j + 1) % CIO2_MAX_BUFFERS)
1950 if (i == CIO2_MAX_BUFFERS)
1954 arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
1955 CIO2_MAX_BUFFERS, j);
1956 arrange(q->bufs, sizeof(struct cio2_buffer *),
1957 CIO2_MAX_BUFFERS, j);
1961 * DMA clears the valid bit when accessing the buffer.
1962 * When stopping stream in suspend callback, some of the buffers
1963 * may be in invalid state. After resume, when DMA meets the invalid
1964 * buffer, it will halt and stop receiving new data.
1965 * To avoid DMA halting, set the valid bit for all buffers in FBPT.
1967 for (i = 0; i < CIO2_MAX_BUFFERS; i++)
1968 cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
1971 static int __maybe_unused cio2_suspend(struct device *dev)
1973 struct pci_dev *pci_dev = to_pci_dev(dev);
1974 struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
1975 struct cio2_queue *q = cio2->cur_queue;
1978 dev_dbg(dev, "cio2 suspend\n");
1979 if (!cio2->streaming)
1983 r = v4l2_subdev_call(q->sensor, video, s_stream, 0);
1985 dev_err(dev, "failed to stop sensor streaming\n");
1989 cio2_hw_exit(cio2, q);
1990 synchronize_irq(pci_dev->irq);
1992 pm_runtime_force_suspend(dev);
1995 * Upon resume, hw starts to process the fbpt entries from beginning,
1996 * so relocate the queued buffs to the fbpt head before suspend.
1998 cio2_fbpt_rearrange(cio2, q);
2005 static int __maybe_unused cio2_resume(struct device *dev)
2007 struct cio2_device *cio2 = dev_get_drvdata(dev);
2008 struct cio2_queue *q = cio2->cur_queue;
2011 dev_dbg(dev, "cio2 resume\n");
2012 if (!cio2->streaming)
2015 r = pm_runtime_force_resume(dev);
2017 dev_err(dev, "failed to set power %d\n", r);
2021 r = cio2_hw_init(cio2, q);
2023 dev_err(dev, "fail to init cio2 hw\n");
2027 r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
2029 dev_err(dev, "fail to start sensor streaming\n");
2030 cio2_hw_exit(cio2, q);
2036 static const struct dev_pm_ops cio2_pm_ops = {
2037 SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
2038 SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
2041 static const struct pci_device_id cio2_pci_id_table[] = {
2042 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
2046 MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
2048 static struct pci_driver cio2_pci_driver = {
2050 .id_table = cio2_pci_id_table,
2051 .probe = cio2_pci_probe,
2052 .remove = cio2_pci_remove,
2058 module_pci_driver(cio2_pci_driver);
2060 MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
2061 MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
2062 MODULE_AUTHOR("Jian Xu Zheng");
2063 MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
2064 MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
2065 MODULE_LICENSE("GPL v2");
2066 MODULE_DESCRIPTION("IPU3 CIO2 driver");
2067 MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);