4 * Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
6 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
7 * Copyright (C) 2015-2017 Linaro Ltd.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 and
11 * only version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/completion.h>
20 #include <linux/interrupt.h>
21 #include <linux/iommu.h>
22 #include <linux/iopoll.h>
23 #include <linux/mutex.h>
25 #include <linux/platform_device.h>
26 #include <linux/spinlock_types.h>
27 #include <linux/spinlock.h>
28 #include <media/media-entity.h>
29 #include <media/v4l2-device.h>
30 #include <media/v4l2-subdev.h>
32 #include "camss-vfe.h"
35 #define MSM_VFE_NAME "msm_vfe"
37 #define vfe_line_array(ptr_line) \
38 ((const struct vfe_line (*)[]) &(ptr_line[-(ptr_line->id)]))
40 #define to_vfe(ptr_line) \
41 container_of(vfe_line_array(ptr_line), struct vfe_device, ptr_line)
43 #define VFE_0_HW_VERSION 0x000
45 #define VFE_0_GLOBAL_RESET_CMD 0x00c
46 #define VFE_0_GLOBAL_RESET_CMD_CORE (1 << 0)
47 #define VFE_0_GLOBAL_RESET_CMD_CAMIF (1 << 1)
48 #define VFE_0_GLOBAL_RESET_CMD_BUS (1 << 2)
49 #define VFE_0_GLOBAL_RESET_CMD_BUS_BDG (1 << 3)
50 #define VFE_0_GLOBAL_RESET_CMD_REGISTER (1 << 4)
51 #define VFE_0_GLOBAL_RESET_CMD_TIMER (1 << 5)
52 #define VFE_0_GLOBAL_RESET_CMD_PM (1 << 6)
53 #define VFE_0_GLOBAL_RESET_CMD_BUS_MISR (1 << 7)
54 #define VFE_0_GLOBAL_RESET_CMD_TESTGEN (1 << 8)
56 #define VFE_0_MODULE_CFG 0x018
57 #define VFE_0_MODULE_CFG_DEMUX (1 << 2)
58 #define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE (1 << 3)
59 #define VFE_0_MODULE_CFG_SCALE_ENC (1 << 23)
60 #define VFE_0_MODULE_CFG_CROP_ENC (1 << 27)
62 #define VFE_0_CORE_CFG 0x01c
63 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
64 #define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
65 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
66 #define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
68 #define VFE_0_IRQ_CMD 0x024
69 #define VFE_0_IRQ_CMD_GLOBAL_CLEAR (1 << 0)
71 #define VFE_0_IRQ_MASK_0 0x028
72 #define VFE_0_IRQ_MASK_0_CAMIF_SOF (1 << 0)
73 #define VFE_0_IRQ_MASK_0_CAMIF_EOF (1 << 1)
74 #define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
75 #define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
76 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
77 #define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
78 #define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
79 #define VFE_0_IRQ_MASK_0_RESET_ACK (1 << 31)
80 #define VFE_0_IRQ_MASK_1 0x02c
81 #define VFE_0_IRQ_MASK_1_CAMIF_ERROR (1 << 0)
82 #define VFE_0_IRQ_MASK_1_VIOLATION (1 << 7)
83 #define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK (1 << 8)
84 #define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) (1 << ((n) + 9))
85 #define VFE_0_IRQ_MASK_1_RDIn_SOF(n) (1 << ((n) + 29))
87 #define VFE_0_IRQ_CLEAR_0 0x030
88 #define VFE_0_IRQ_CLEAR_1 0x034
90 #define VFE_0_IRQ_STATUS_0 0x038
91 #define VFE_0_IRQ_STATUS_0_CAMIF_SOF (1 << 0)
92 #define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) (1 << ((n) + 5))
93 #define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
94 ((n) == VFE_LINE_PIX ? (1 << 4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
95 #define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) (1 << ((n) + 8))
96 #define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) (1 << ((n) + 25))
97 #define VFE_0_IRQ_STATUS_0_RESET_ACK (1 << 31)
98 #define VFE_0_IRQ_STATUS_1 0x03c
99 #define VFE_0_IRQ_STATUS_1_VIOLATION (1 << 7)
100 #define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK (1 << 8)
101 #define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) (1 << ((n) + 29))
103 #define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
104 #define VFE_0_VIOLATION_STATUS 0x48
106 #define VFE_0_BUS_CMD 0x4c
107 #define VFE_0_BUS_CMD_Mx_RLD_CMD(x) (1 << (x))
109 #define VFE_0_BUS_CFG 0x050
111 #define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
112 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN (1 << 1)
113 #define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
114 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
115 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
116 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
117 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
118 #define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
120 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
121 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
122 #define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
123 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
124 #define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
125 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
126 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
127 #define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1F << 2)
129 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
130 #define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
131 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
132 #define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
133 #define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
135 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
137 #define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
139 #define VFE_0_BUS_PING_PONG_STATUS 0x268
141 #define VFE_0_BUS_BDG_CMD 0x2c0
142 #define VFE_0_BUS_BDG_CMD_HALT_REQ 1
144 #define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
145 #define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
146 #define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
147 #define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
148 #define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
149 #define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
150 #define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
151 #define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
152 #define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
153 #define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
155 #define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
156 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
157 #define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
158 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
159 #define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
160 #define VFE_0_RDI_CFG_x_RDI_EN_BIT (1 << 2)
161 #define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
162 #define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) (1 << (16 + (r)))
164 #define VFE_0_CAMIF_CMD 0x2f4
165 #define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
166 #define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
167 #define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS (1 << 2)
168 #define VFE_0_CAMIF_CFG 0x2f8
169 #define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN (1 << 6)
170 #define VFE_0_CAMIF_FRAME_CFG 0x300
171 #define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
172 #define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
173 #define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
174 #define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
175 #define VFE_0_CAMIF_STATUS 0x31c
176 #define VFE_0_CAMIF_STATUS_HALT (1 << 31)
178 #define VFE_0_REG_UPDATE 0x378
179 #define VFE_0_REG_UPDATE_RDIn(n) (1 << (1 + (n)))
180 #define VFE_0_REG_UPDATE_line_n(n) \
181 ((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
183 #define VFE_0_DEMUX_CFG 0x424
184 #define VFE_0_DEMUX_CFG_PERIOD 0x3
185 #define VFE_0_DEMUX_GAIN_0 0x428
186 #define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
187 #define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
188 #define VFE_0_DEMUX_GAIN_1 0x42c
189 #define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
190 #define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
191 #define VFE_0_DEMUX_EVEN_CFG 0x438
192 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
193 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
194 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
195 #define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
196 #define VFE_0_DEMUX_ODD_CFG 0x43c
197 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
198 #define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
199 #define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
200 #define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
202 #define VFE_0_SCALE_ENC_Y_CFG 0x75c
203 #define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
204 #define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
205 #define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
206 #define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
207 #define VFE_0_SCALE_ENC_CBCR_CFG 0x778
208 #define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
209 #define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
210 #define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
211 #define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
213 #define VFE_0_CROP_ENC_Y_WIDTH 0x854
214 #define VFE_0_CROP_ENC_Y_HEIGHT 0x858
215 #define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
216 #define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
218 #define VFE_0_CLAMP_ENC_MAX_CFG 0x874
219 #define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
220 #define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
221 #define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
222 #define VFE_0_CLAMP_ENC_MIN_CFG 0x878
223 #define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
224 #define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
225 #define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
227 #define VFE_0_CGC_OVERRIDE_1 0x974
228 #define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) (1 << (x))
230 /* VFE reset timeout */
231 #define VFE_RESET_TIMEOUT_MS 50
232 /* VFE halt timeout */
233 #define VFE_HALT_TIMEOUT_MS 100
234 /* Max number of frame drop updates per frame */
235 #define VFE_FRAME_DROP_UPDATES 5
236 /* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */
237 #define VFE_FRAME_DROP_VAL 20
239 #define VFE_NEXT_SOF_MS 500
241 #define CAMIF_TIMEOUT_SLEEP_US 1000
242 #define CAMIF_TIMEOUT_ALL_US 1000000
244 #define SCALER_RATIO_MAX 16
246 static const struct {
251 MEDIA_BUS_FMT_UYVY8_2X8,
255 MEDIA_BUS_FMT_VYUY8_2X8,
259 MEDIA_BUS_FMT_YUYV8_2X8,
263 MEDIA_BUS_FMT_YVYU8_2X8,
267 MEDIA_BUS_FMT_SBGGR8_1X8,
271 MEDIA_BUS_FMT_SGBRG8_1X8,
275 MEDIA_BUS_FMT_SGRBG8_1X8,
279 MEDIA_BUS_FMT_SRGGB8_1X8,
283 MEDIA_BUS_FMT_SBGGR10_1X10,
287 MEDIA_BUS_FMT_SGBRG10_1X10,
291 MEDIA_BUS_FMT_SGRBG10_1X10,
295 MEDIA_BUS_FMT_SRGGB10_1X10,
299 MEDIA_BUS_FMT_SBGGR12_1X12,
303 MEDIA_BUS_FMT_SGBRG12_1X12,
307 MEDIA_BUS_FMT_SGRBG12_1X12,
311 MEDIA_BUS_FMT_SRGGB12_1X12,
317 * vfe_get_bpp - map media bus format to bits per pixel
318 * @code: media bus format code
320 * Return number of bits per pixel
322 static u8 vfe_get_bpp(u32 code)
326 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
327 if (code == vfe_formats[i].code)
328 return vfe_formats[i].bpp;
330 WARN(1, "Unknown format\n");
332 return vfe_formats[0].bpp;
335 static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
337 u32 bits = readl_relaxed(vfe->base + reg);
339 writel_relaxed(bits & ~clr_bits, vfe->base + reg);
342 static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
344 u32 bits = readl_relaxed(vfe->base + reg);
346 writel_relaxed(bits | set_bits, vfe->base + reg);
349 static void vfe_global_reset(struct vfe_device *vfe)
351 u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
352 VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
353 VFE_0_GLOBAL_RESET_CMD_PM |
354 VFE_0_GLOBAL_RESET_CMD_TIMER |
355 VFE_0_GLOBAL_RESET_CMD_REGISTER |
356 VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
357 VFE_0_GLOBAL_RESET_CMD_BUS |
358 VFE_0_GLOBAL_RESET_CMD_CAMIF |
359 VFE_0_GLOBAL_RESET_CMD_CORE;
361 writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
364 static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
367 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
368 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
370 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
371 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
374 static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
377 vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
378 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
380 vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
381 1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
384 #define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
386 static int vfe_word_per_line(uint32_t format, uint32_t pixel_per_line)
391 case V4L2_PIX_FMT_NV12:
392 case V4L2_PIX_FMT_NV21:
393 case V4L2_PIX_FMT_NV16:
394 case V4L2_PIX_FMT_NV61:
395 val = CALC_WORD(pixel_per_line, 1, 8);
397 case V4L2_PIX_FMT_YUYV:
398 case V4L2_PIX_FMT_YVYU:
399 case V4L2_PIX_FMT_UYVY:
400 case V4L2_PIX_FMT_VYUY:
401 val = CALC_WORD(pixel_per_line, 2, 8);
408 static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
409 u16 *width, u16 *height, u16 *bytesperline)
411 switch (pix->pixelformat) {
412 case V4L2_PIX_FMT_NV12:
413 case V4L2_PIX_FMT_NV21:
415 *height = pix->height;
416 *bytesperline = pix->plane_fmt[0].bytesperline;
420 case V4L2_PIX_FMT_NV16:
421 case V4L2_PIX_FMT_NV61:
423 *height = pix->height;
424 *bytesperline = pix->plane_fmt[0].bytesperline;
429 static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
430 struct v4l2_pix_format_mplane *pix,
431 u8 plane, u32 enable)
436 u16 width = 0, height = 0, bytesperline = 0, wpl;
438 vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
440 wpl = vfe_word_per_line(pix->pixelformat, width);
443 reg |= ((wpl + 1) / 2 - 1) << 16;
445 writel_relaxed(reg, vfe->base +
446 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
448 wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
451 reg |= (height - 1) << 4;
454 writel_relaxed(reg, vfe->base +
455 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
457 writel_relaxed(0, vfe->base +
458 VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
459 writel_relaxed(0, vfe->base +
460 VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
464 static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
468 reg = readl_relaxed(vfe->base +
469 VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
471 reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
473 reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
474 & VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
477 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
480 static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
483 writel_relaxed(pattern,
484 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
487 static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm, u16 offset,
492 reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
494 writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
497 static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
500 writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
504 static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
507 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
510 static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
513 vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
516 static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
520 reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
522 return (reg >> wm) & 0x1;
525 static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
528 writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
530 writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
533 static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
538 reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
539 reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
540 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
542 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
543 reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
544 VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
545 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
550 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
551 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
554 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
555 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
558 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
559 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
566 vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
569 static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
571 writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
573 VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
576 static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
581 reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
582 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
584 reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
585 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
590 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
591 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
594 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
595 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
598 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
599 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
606 vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
609 static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
612 struct vfe_line *line = container_of(output, struct vfe_line, output);
613 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
617 for (i = 0; i < output->wm_num; i++) {
619 reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
620 VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
622 reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
623 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
624 reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
627 if (output->wm_idx[i] % 2 == 1)
632 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
636 VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
641 static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
643 vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
644 VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
646 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
647 cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
650 static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
652 vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
654 writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
658 static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
659 enum vfe_line_id line_id, u8 enable)
661 u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
662 VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
663 u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
664 VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
667 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
668 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
670 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
671 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
675 static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
676 enum vfe_line_id line_id, u8 enable)
678 struct vfe_output *output = &vfe->line[line_id].output;
684 irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
685 irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
686 irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
687 irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
688 irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
689 for (i = 0; i < output->wm_num; i++) {
690 irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
692 comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
696 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
697 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
698 vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
700 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
701 vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
702 vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
706 static void vfe_enable_irq_common(struct vfe_device *vfe)
708 u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
709 u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
710 VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
712 vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
713 vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
716 static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
718 u32 val, even_cfg, odd_cfg;
720 writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
722 val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
723 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
725 val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
726 writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
728 switch (line->fmt[MSM_VFE_PAD_SINK].code) {
729 case MEDIA_BUS_FMT_YUYV8_2X8:
730 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
731 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
733 case MEDIA_BUS_FMT_YVYU8_2X8:
734 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
735 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
737 case MEDIA_BUS_FMT_UYVY8_2X8:
739 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
740 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
742 case MEDIA_BUS_FMT_VYUY8_2X8:
743 even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
744 odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
748 writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
749 writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
752 static inline u8 vfe_calc_interp_reso(u16 input, u16 output)
754 if (input / output >= 16)
757 if (input / output >= 8)
760 if (input / output >= 4)
766 static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
768 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
774 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
776 input = line->fmt[MSM_VFE_PAD_SINK].width;
777 output = line->compose.width;
778 reg = (output << 16) | input;
779 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
781 interp_reso = vfe_calc_interp_reso(input, output);
782 phase_mult = input * (1 << (13 + interp_reso)) / output;
783 reg = (interp_reso << 20) | phase_mult;
784 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
786 input = line->fmt[MSM_VFE_PAD_SINK].height;
787 output = line->compose.height;
788 reg = (output << 16) | input;
789 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
791 interp_reso = vfe_calc_interp_reso(input, output);
792 phase_mult = input * (1 << (13 + interp_reso)) / output;
793 reg = (interp_reso << 20) | phase_mult;
794 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
796 writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
798 input = line->fmt[MSM_VFE_PAD_SINK].width;
799 output = line->compose.width / 2;
800 reg = (output << 16) | input;
801 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
803 interp_reso = vfe_calc_interp_reso(input, output);
804 phase_mult = input * (1 << (13 + interp_reso)) / output;
805 reg = (interp_reso << 20) | phase_mult;
806 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
808 input = line->fmt[MSM_VFE_PAD_SINK].height;
809 output = line->compose.height;
810 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
811 output = line->compose.height / 2;
812 reg = (output << 16) | input;
813 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
815 interp_reso = vfe_calc_interp_reso(input, output);
816 phase_mult = input * (1 << (13 + interp_reso)) / output;
817 reg = (interp_reso << 20) | phase_mult;
818 writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
821 static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
823 u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
827 first = line->crop.left;
828 last = line->crop.left + line->crop.width - 1;
829 reg = (first << 16) | last;
830 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
832 first = line->crop.top;
833 last = line->crop.top + line->crop.height - 1;
834 reg = (first << 16) | last;
835 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
837 first = line->crop.left / 2;
838 last = line->crop.left / 2 + line->crop.width / 2 - 1;
839 reg = (first << 16) | last;
840 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
842 first = line->crop.top;
843 last = line->crop.top + line->crop.height - 1;
844 if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
845 first = line->crop.top / 2;
846 last = line->crop.top / 2 + line->crop.height / 2 - 1;
848 reg = (first << 16) | last;
849 writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
852 static void vfe_set_clamp_cfg(struct vfe_device *vfe)
854 u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
855 VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
856 VFE_0_CLAMP_ENC_MAX_CFG_CH2;
858 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
860 val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
861 VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
862 VFE_0_CLAMP_ENC_MIN_CFG_CH2;
864 writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
868 * vfe_reset - Trigger reset on VFE module and wait to complete
871 * Return 0 on success or a negative error code otherwise
873 static int vfe_reset(struct vfe_device *vfe)
877 reinit_completion(&vfe->reset_complete);
879 vfe_global_reset(vfe);
881 time = wait_for_completion_timeout(&vfe->reset_complete,
882 msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
884 dev_err(to_device(vfe), "VFE reset timeout\n");
892 * vfe_halt - Trigger halt on VFE module and wait to complete
895 * Return 0 on success or a negative error code otherwise
897 static int vfe_halt(struct vfe_device *vfe)
901 reinit_completion(&vfe->halt_complete);
903 writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
904 vfe->base + VFE_0_BUS_BDG_CMD);
906 time = wait_for_completion_timeout(&vfe->halt_complete,
907 msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
909 dev_err(to_device(vfe), "VFE halt timeout\n");
916 static void vfe_init_outputs(struct vfe_device *vfe)
920 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
921 struct vfe_output *output = &vfe->line[i].output;
923 output->state = VFE_OUTPUT_OFF;
924 output->buf[0] = NULL;
925 output->buf[1] = NULL;
926 INIT_LIST_HEAD(&output->pending_bufs);
929 if (vfe->line[i].id == VFE_LINE_PIX)
934 static void vfe_reset_output_maps(struct vfe_device *vfe)
938 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
939 vfe->wm_output_map[i] = VFE_LINE_NONE;
942 static void vfe_set_qos(struct vfe_device *vfe)
944 u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
945 u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
947 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
948 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
949 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
950 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
951 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
952 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
953 writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
954 writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
957 static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
959 u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
962 vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
964 vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
969 static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
971 u32 val = VFE_0_MODULE_CFG_DEMUX |
972 VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
973 VFE_0_MODULE_CFG_SCALE_ENC |
974 VFE_0_MODULE_CFG_CROP_ENC;
977 writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
979 writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
982 static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
986 switch (line->fmt[MSM_VFE_PAD_SINK].code) {
987 case MEDIA_BUS_FMT_YUYV8_2X8:
988 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
990 case MEDIA_BUS_FMT_YVYU8_2X8:
991 val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
993 case MEDIA_BUS_FMT_UYVY8_2X8:
995 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
997 case MEDIA_BUS_FMT_VYUY8_2X8:
998 val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
1002 writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
1004 val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
1005 val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
1006 writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
1008 val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
1009 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
1011 val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
1012 writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
1015 writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
1018 writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
1020 val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
1021 vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
1023 val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
1024 writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
1027 static void vfe_set_camif_cmd(struct vfe_device *vfe, u32 cmd)
1029 writel_relaxed(VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS,
1030 vfe->base + VFE_0_CAMIF_CMD);
1032 writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
1035 static int vfe_camif_wait_for_stop(struct vfe_device *vfe)
1040 ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
1042 (val & VFE_0_CAMIF_STATUS_HALT),
1043 CAMIF_TIMEOUT_SLEEP_US,
1044 CAMIF_TIMEOUT_ALL_US);
1046 dev_err(to_device(vfe), "%s: camif stop timeout\n", __func__);
1051 static void vfe_output_init_addrs(struct vfe_device *vfe,
1052 struct vfe_output *output, u8 sync)
1058 output->active_buf = 0;
1060 for (i = 0; i < output->wm_num; i++) {
1062 ping_addr = output->buf[0]->addr[i];
1067 pong_addr = output->buf[1]->addr[i];
1069 pong_addr = ping_addr;
1071 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
1072 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
1074 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1078 static void vfe_output_update_ping_addr(struct vfe_device *vfe,
1079 struct vfe_output *output, u8 sync)
1084 for (i = 0; i < output->wm_num; i++) {
1086 addr = output->buf[0]->addr[i];
1090 vfe_wm_set_ping_addr(vfe, output->wm_idx[i], addr);
1092 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1096 static void vfe_output_update_pong_addr(struct vfe_device *vfe,
1097 struct vfe_output *output, u8 sync)
1102 for (i = 0; i < output->wm_num; i++) {
1104 addr = output->buf[1]->addr[i];
1108 vfe_wm_set_pong_addr(vfe, output->wm_idx[i], addr);
1110 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1115 static int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id)
1120 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
1121 if (vfe->wm_output_map[i] == VFE_LINE_NONE) {
1122 vfe->wm_output_map[i] = line_id;
1131 static int vfe_release_wm(struct vfe_device *vfe, u8 wm)
1133 if (wm >= ARRAY_SIZE(vfe->wm_output_map))
1136 vfe->wm_output_map[wm] = VFE_LINE_NONE;
1141 static void vfe_output_frame_drop(struct vfe_device *vfe,
1142 struct vfe_output *output,
1148 /* We need to toggle update period to be valid on next frame */
1149 output->drop_update_idx++;
1150 output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
1151 drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
1153 for (i = 0; i < output->wm_num; i++) {
1154 vfe_wm_set_framedrop_period(vfe, output->wm_idx[i],
1156 vfe_wm_set_framedrop_pattern(vfe, output->wm_idx[i],
1159 vfe_reg_update(vfe, container_of(output, struct vfe_line, output)->id);
1162 static struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
1164 struct camss_buffer *buffer = NULL;
1166 if (!list_empty(&output->pending_bufs)) {
1167 buffer = list_first_entry(&output->pending_bufs,
1168 struct camss_buffer,
1170 list_del(&buffer->queue);
1177 * vfe_buf_add_pending - Add output buffer to list of pending
1178 * @output: VFE output
1179 * @buffer: Video buffer
1181 static void vfe_buf_add_pending(struct vfe_output *output,
1182 struct camss_buffer *buffer)
1184 INIT_LIST_HEAD(&buffer->queue);
1185 list_add_tail(&buffer->queue, &output->pending_bufs);
1189 * vfe_buf_flush_pending - Flush all pending buffers.
1190 * @output: VFE output
1191 * @state: vb2 buffer state
1193 static void vfe_buf_flush_pending(struct vfe_output *output,
1194 enum vb2_buffer_state state)
1196 struct camss_buffer *buf;
1197 struct camss_buffer *t;
1199 list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
1200 vb2_buffer_done(&buf->vb.vb2_buf, state);
1201 list_del(&buf->queue);
1205 static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
1206 struct vfe_output *output)
1208 switch (output->state) {
1209 case VFE_OUTPUT_CONTINUOUS:
1210 vfe_output_frame_drop(vfe, output, 3);
1212 case VFE_OUTPUT_SINGLE:
1214 dev_err_ratelimited(to_device(vfe),
1215 "Next buf in wrong state! %d\n",
1221 static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
1222 struct vfe_output *output)
1224 switch (output->state) {
1225 case VFE_OUTPUT_CONTINUOUS:
1226 output->state = VFE_OUTPUT_SINGLE;
1227 vfe_output_frame_drop(vfe, output, 1);
1229 case VFE_OUTPUT_SINGLE:
1230 output->state = VFE_OUTPUT_STOPPING;
1231 vfe_output_frame_drop(vfe, output, 0);
1234 dev_err_ratelimited(to_device(vfe),
1235 "Last buff in wrong state! %d\n",
1241 static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
1242 struct vfe_output *output,
1243 struct camss_buffer *new_buf)
1247 switch (output->state) {
1248 case VFE_OUTPUT_SINGLE:
1249 inactive_idx = !output->active_buf;
1251 if (!output->buf[inactive_idx]) {
1252 output->buf[inactive_idx] = new_buf;
1255 vfe_output_update_pong_addr(vfe, output, 0);
1257 vfe_output_update_ping_addr(vfe, output, 0);
1259 vfe_output_frame_drop(vfe, output, 3);
1260 output->state = VFE_OUTPUT_CONTINUOUS;
1262 vfe_buf_add_pending(output, new_buf);
1263 dev_err_ratelimited(to_device(vfe),
1264 "Inactive buffer is busy\n");
1268 case VFE_OUTPUT_IDLE:
1269 if (!output->buf[0]) {
1270 output->buf[0] = new_buf;
1272 vfe_output_init_addrs(vfe, output, 1);
1274 vfe_output_frame_drop(vfe, output, 1);
1275 output->state = VFE_OUTPUT_SINGLE;
1277 vfe_buf_add_pending(output, new_buf);
1278 dev_err_ratelimited(to_device(vfe),
1279 "Output idle with buffer set!\n");
1283 case VFE_OUTPUT_CONTINUOUS:
1285 vfe_buf_add_pending(output, new_buf);
1290 static int vfe_get_output(struct vfe_line *line)
1292 struct vfe_device *vfe = to_vfe(line);
1293 struct vfe_output *output;
1294 unsigned long flags;
1298 spin_lock_irqsave(&vfe->output_lock, flags);
1300 output = &line->output;
1301 if (output->state != VFE_OUTPUT_OFF) {
1302 dev_err(to_device(vfe), "Output is running\n");
1305 output->state = VFE_OUTPUT_RESERVED;
1307 output->active_buf = 0;
1309 for (i = 0; i < output->wm_num; i++) {
1310 wm_idx = vfe_reserve_wm(vfe, line->id);
1312 dev_err(to_device(vfe), "Can not reserve wm\n");
1315 output->wm_idx[i] = wm_idx;
1318 output->drop_update_idx = 0;
1320 spin_unlock_irqrestore(&vfe->output_lock, flags);
1325 for (i--; i >= 0; i--)
1326 vfe_release_wm(vfe, output->wm_idx[i]);
1327 output->state = VFE_OUTPUT_OFF;
1329 spin_unlock_irqrestore(&vfe->output_lock, flags);
1334 static int vfe_put_output(struct vfe_line *line)
1336 struct vfe_device *vfe = to_vfe(line);
1337 struct vfe_output *output = &line->output;
1338 unsigned long flags;
1341 spin_lock_irqsave(&vfe->output_lock, flags);
1343 for (i = 0; i < output->wm_num; i++)
1344 vfe_release_wm(vfe, output->wm_idx[i]);
1346 output->state = VFE_OUTPUT_OFF;
1348 spin_unlock_irqrestore(&vfe->output_lock, flags);
1352 static int vfe_enable_output(struct vfe_line *line)
1354 struct vfe_device *vfe = to_vfe(line);
1355 struct vfe_output *output = &line->output;
1356 unsigned long flags;
1362 ub_size = MSM_VFE_VFE0_UB_SIZE_RDI;
1365 ub_size = MSM_VFE_VFE1_UB_SIZE_RDI;
1371 spin_lock_irqsave(&vfe->output_lock, flags);
1373 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line->id);
1375 if (output->state != VFE_OUTPUT_RESERVED) {
1376 dev_err(to_device(vfe), "Output is not in reserved state %d\n",
1378 spin_unlock_irqrestore(&vfe->output_lock, flags);
1381 output->state = VFE_OUTPUT_IDLE;
1383 output->buf[0] = vfe_buf_get_pending(output);
1384 output->buf[1] = vfe_buf_get_pending(output);
1386 if (!output->buf[0] && output->buf[1]) {
1387 output->buf[0] = output->buf[1];
1388 output->buf[1] = NULL;
1392 output->state = VFE_OUTPUT_SINGLE;
1395 output->state = VFE_OUTPUT_CONTINUOUS;
1397 switch (output->state) {
1398 case VFE_OUTPUT_SINGLE:
1399 vfe_output_frame_drop(vfe, output, 1);
1401 case VFE_OUTPUT_CONTINUOUS:
1402 vfe_output_frame_drop(vfe, output, 3);
1405 vfe_output_frame_drop(vfe, output, 0);
1409 output->sequence = 0;
1410 output->wait_sof = 0;
1411 output->wait_reg_update = 0;
1412 reinit_completion(&output->sof);
1413 reinit_completion(&output->reg_update);
1415 vfe_output_init_addrs(vfe, output, 0);
1417 if (line->id != VFE_LINE_PIX) {
1418 vfe_set_cgc_override(vfe, output->wm_idx[0], 1);
1419 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
1420 vfe_bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
1421 vfe_wm_set_subsample(vfe, output->wm_idx[0]);
1422 vfe_set_rdi_cid(vfe, line->id, 0);
1423 vfe_wm_set_ub_cfg(vfe, output->wm_idx[0],
1424 (ub_size + 1) * output->wm_idx[0], ub_size);
1425 vfe_wm_frame_based(vfe, output->wm_idx[0], 1);
1426 vfe_wm_enable(vfe, output->wm_idx[0], 1);
1427 vfe_bus_reload_wm(vfe, output->wm_idx[0]);
1429 ub_size /= output->wm_num;
1430 for (i = 0; i < output->wm_num; i++) {
1431 vfe_set_cgc_override(vfe, output->wm_idx[i], 1);
1432 vfe_wm_set_subsample(vfe, output->wm_idx[i]);
1433 vfe_wm_set_ub_cfg(vfe, output->wm_idx[i],
1434 (ub_size + 1) * output->wm_idx[i],
1436 vfe_wm_line_based(vfe, output->wm_idx[i],
1437 &line->video_out.active_fmt.fmt.pix_mp,
1439 vfe_wm_enable(vfe, output->wm_idx[i], 1);
1440 vfe_bus_reload_wm(vfe, output->wm_idx[i]);
1442 vfe_enable_irq_pix_line(vfe, 0, line->id, 1);
1443 vfe_set_module_cfg(vfe, 1);
1444 vfe_set_camif_cfg(vfe, line);
1445 vfe_set_xbar_cfg(vfe, output, 1);
1446 vfe_set_demux_cfg(vfe, line);
1447 vfe_set_scale_cfg(vfe, line);
1448 vfe_set_crop_cfg(vfe, line);
1449 vfe_set_clamp_cfg(vfe);
1450 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY);
1453 vfe_reg_update(vfe, line->id);
1455 spin_unlock_irqrestore(&vfe->output_lock, flags);
1460 static int vfe_disable_output(struct vfe_line *line)
1462 struct vfe_device *vfe = to_vfe(line);
1463 struct vfe_output *output = &line->output;
1464 unsigned long flags;
1468 spin_lock_irqsave(&vfe->output_lock, flags);
1470 output->wait_sof = 1;
1471 spin_unlock_irqrestore(&vfe->output_lock, flags);
1473 time = wait_for_completion_timeout(&output->sof,
1474 msecs_to_jiffies(VFE_NEXT_SOF_MS));
1476 dev_err(to_device(vfe), "VFE sof timeout\n");
1478 spin_lock_irqsave(&vfe->output_lock, flags);
1479 for (i = 0; i < output->wm_num; i++)
1480 vfe_wm_enable(vfe, output->wm_idx[i], 0);
1482 vfe_reg_update(vfe, line->id);
1483 output->wait_reg_update = 1;
1484 spin_unlock_irqrestore(&vfe->output_lock, flags);
1486 time = wait_for_completion_timeout(&output->reg_update,
1487 msecs_to_jiffies(VFE_NEXT_SOF_MS));
1489 dev_err(to_device(vfe), "VFE reg update timeout\n");
1491 spin_lock_irqsave(&vfe->output_lock, flags);
1493 if (line->id != VFE_LINE_PIX) {
1494 vfe_wm_frame_based(vfe, output->wm_idx[0], 0);
1495 vfe_bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
1496 vfe_enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
1497 vfe_set_cgc_override(vfe, output->wm_idx[0], 0);
1498 spin_unlock_irqrestore(&vfe->output_lock, flags);
1500 for (i = 0; i < output->wm_num; i++) {
1501 vfe_wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
1502 vfe_set_cgc_override(vfe, output->wm_idx[i], 0);
1505 vfe_enable_irq_pix_line(vfe, 0, line->id, 0);
1506 vfe_set_module_cfg(vfe, 0);
1507 vfe_set_xbar_cfg(vfe, output, 0);
1509 vfe_set_camif_cmd(vfe, VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY);
1510 spin_unlock_irqrestore(&vfe->output_lock, flags);
1512 vfe_camif_wait_for_stop(vfe);
1519 * vfe_enable - Enable streaming on VFE line
1522 * Return 0 on success or a negative error code otherwise
1524 static int vfe_enable(struct vfe_line *line)
1526 struct vfe_device *vfe = to_vfe(line);
1529 mutex_lock(&vfe->stream_lock);
1531 if (!vfe->stream_count) {
1532 vfe_enable_irq_common(vfe);
1534 vfe_bus_enable_wr_if(vfe, 1);
1539 vfe->stream_count++;
1541 mutex_unlock(&vfe->stream_lock);
1543 ret = vfe_get_output(line);
1545 goto error_get_output;
1547 ret = vfe_enable_output(line);
1549 goto error_enable_output;
1551 vfe->was_streaming = 1;
1556 error_enable_output:
1557 vfe_put_output(line);
1560 mutex_lock(&vfe->stream_lock);
1562 if (vfe->stream_count == 1)
1563 vfe_bus_enable_wr_if(vfe, 0);
1565 vfe->stream_count--;
1567 mutex_unlock(&vfe->stream_lock);
1573 * vfe_disable - Disable streaming on VFE line
1576 * Return 0 on success or a negative error code otherwise
1578 static int vfe_disable(struct vfe_line *line)
1580 struct vfe_device *vfe = to_vfe(line);
1582 vfe_disable_output(line);
1584 vfe_put_output(line);
1586 mutex_lock(&vfe->stream_lock);
1588 if (vfe->stream_count == 1)
1589 vfe_bus_enable_wr_if(vfe, 0);
1591 vfe->stream_count--;
1593 mutex_unlock(&vfe->stream_lock);
1599 * vfe_isr_sof - Process start of frame interrupt
1601 * @line_id: VFE line
1603 static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
1605 struct vfe_output *output;
1606 unsigned long flags;
1608 spin_lock_irqsave(&vfe->output_lock, flags);
1609 output = &vfe->line[line_id].output;
1610 if (output->wait_sof) {
1611 output->wait_sof = 0;
1612 complete(&output->sof);
1614 spin_unlock_irqrestore(&vfe->output_lock, flags);
1618 * vfe_isr_reg_update - Process reg update interrupt
1620 * @line_id: VFE line
1622 static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
1624 struct vfe_output *output;
1625 unsigned long flags;
1627 spin_lock_irqsave(&vfe->output_lock, flags);
1628 vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
1630 output = &vfe->line[line_id].output;
1632 if (output->wait_reg_update) {
1633 output->wait_reg_update = 0;
1634 complete(&output->reg_update);
1635 spin_unlock_irqrestore(&vfe->output_lock, flags);
1639 if (output->state == VFE_OUTPUT_STOPPING) {
1640 /* Release last buffer when hw is idle */
1641 if (output->last_buffer) {
1642 vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
1643 VB2_BUF_STATE_DONE);
1644 output->last_buffer = NULL;
1646 output->state = VFE_OUTPUT_IDLE;
1648 /* Buffers received in stopping state are queued in */
1649 /* dma pending queue, start next capture here */
1651 output->buf[0] = vfe_buf_get_pending(output);
1652 output->buf[1] = vfe_buf_get_pending(output);
1654 if (!output->buf[0] && output->buf[1]) {
1655 output->buf[0] = output->buf[1];
1656 output->buf[1] = NULL;
1660 output->state = VFE_OUTPUT_SINGLE;
1663 output->state = VFE_OUTPUT_CONTINUOUS;
1665 switch (output->state) {
1666 case VFE_OUTPUT_SINGLE:
1667 vfe_output_frame_drop(vfe, output, 2);
1669 case VFE_OUTPUT_CONTINUOUS:
1670 vfe_output_frame_drop(vfe, output, 3);
1673 vfe_output_frame_drop(vfe, output, 0);
1677 vfe_output_init_addrs(vfe, output, 1);
1680 spin_unlock_irqrestore(&vfe->output_lock, flags);
1684 * vfe_isr_wm_done - Process write master done interrupt
1686 * @wm: Write master id
1688 static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
1690 struct camss_buffer *ready_buf;
1691 struct vfe_output *output;
1692 dma_addr_t *new_addr;
1693 unsigned long flags;
1695 u64 ts = ktime_get_ns();
1698 active_index = vfe_wm_get_ping_pong_status(vfe, wm);
1700 spin_lock_irqsave(&vfe->output_lock, flags);
1702 if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
1703 dev_err_ratelimited(to_device(vfe),
1704 "Received wm done for unmapped index\n");
1707 output = &vfe->line[vfe->wm_output_map[wm]].output;
1709 if (output->active_buf == active_index) {
1710 dev_err_ratelimited(to_device(vfe),
1711 "Active buffer mismatch!\n");
1714 output->active_buf = active_index;
1716 ready_buf = output->buf[!active_index];
1718 dev_err_ratelimited(to_device(vfe),
1719 "Missing ready buf %d %d!\n",
1720 !active_index, output->state);
1724 ready_buf->vb.vb2_buf.timestamp = ts;
1725 ready_buf->vb.sequence = output->sequence++;
1727 /* Get next buffer */
1728 output->buf[!active_index] = vfe_buf_get_pending(output);
1729 if (!output->buf[!active_index]) {
1730 /* No next buffer - set same address */
1731 new_addr = ready_buf->addr;
1732 vfe_buf_update_wm_on_last(vfe, output);
1734 new_addr = output->buf[!active_index]->addr;
1735 vfe_buf_update_wm_on_next(vfe, output);
1739 for (i = 0; i < output->wm_num; i++)
1740 vfe_wm_set_ping_addr(vfe, output->wm_idx[i],
1743 for (i = 0; i < output->wm_num; i++)
1744 vfe_wm_set_pong_addr(vfe, output->wm_idx[i],
1747 spin_unlock_irqrestore(&vfe->output_lock, flags);
1749 if (output->state == VFE_OUTPUT_STOPPING)
1750 output->last_buffer = ready_buf;
1752 vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
1757 spin_unlock_irqrestore(&vfe->output_lock, flags);
1761 * vfe_isr_wm_done - Process composite image done interrupt
1763 * @comp: Composite image id
1765 static void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
1769 for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
1770 if (vfe->wm_output_map[i] == VFE_LINE_PIX) {
1771 vfe_isr_wm_done(vfe, i);
1777 * vfe_isr - ISPIF module interrupt handler
1778 * @irq: Interrupt line
1781 * Return IRQ_HANDLED on success
1783 static irqreturn_t vfe_isr(int irq, void *dev)
1785 struct vfe_device *vfe = dev;
1790 value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
1791 value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
1793 writel_relaxed(value0, vfe->base + VFE_0_IRQ_CLEAR_0);
1794 writel_relaxed(value1, vfe->base + VFE_0_IRQ_CLEAR_1);
1797 writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
1799 if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
1800 complete(&vfe->reset_complete);
1802 if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION) {
1803 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
1804 dev_err_ratelimited(to_device(vfe),
1805 "VFE: violation = 0x%08x\n", violation);
1808 if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK) {
1809 complete(&vfe->halt_complete);
1810 writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
1813 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
1814 if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
1815 vfe_isr_reg_update(vfe, i);
1817 if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
1818 vfe_isr_sof(vfe, VFE_LINE_PIX);
1820 for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
1821 if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
1822 vfe_isr_sof(vfe, i);
1824 for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
1825 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
1826 vfe_isr_comp_done(vfe, i);
1827 for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
1828 if (vfe->wm_output_map[j] == VFE_LINE_PIX)
1829 value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
1832 for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
1833 if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
1834 vfe_isr_wm_done(vfe, i);
1840 * vfe_set_clock_rates - Calculate and set clock rates on VFE module
1843 * Return 0 on success or a negative error code otherwise
1845 static int vfe_set_clock_rates(struct vfe_device *vfe)
1847 struct device *dev = to_device(vfe);
1848 u32 pixel_clock[MSM_VFE_LINE_NUM];
1852 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
1853 ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
1859 for (i = 0; i < vfe->nclocks; i++) {
1860 struct camss_clock *clock = &vfe->clock[i];
1862 if (!strcmp(clock->name, "camss_vfe_vfe")) {
1866 for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
1870 if (j == VFE_LINE_PIX) {
1871 tmp = pixel_clock[j];
1873 bpp = vfe_get_bpp(vfe->line[j].
1874 fmt[MSM_VFE_PAD_SINK].code);
1875 tmp = pixel_clock[j] * bpp / 64;
1882 camss_add_clock_margin(&min_rate);
1884 for (j = 0; j < clock->nfreqs; j++)
1885 if (min_rate < clock->freq[j])
1888 if (j == clock->nfreqs) {
1890 "Pixel clock is too high for VFE");
1894 /* if sensor pixel clock is not available */
1895 /* set highest possible VFE clock rate */
1897 j = clock->nfreqs - 1;
1899 rate = clk_round_rate(clock->clk, clock->freq[j]);
1901 dev_err(dev, "clk round rate failed: %ld\n",
1906 ret = clk_set_rate(clock->clk, rate);
1908 dev_err(dev, "clk set rate failed: %d\n", ret);
1918 * vfe_check_clock_rates - Check current clock rates on VFE module
1921 * Return 0 if current clock rates are suitable for a new pipeline
1922 * or a negative error code otherwise
1924 static int vfe_check_clock_rates(struct vfe_device *vfe)
1926 u32 pixel_clock[MSM_VFE_LINE_NUM];
1930 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
1931 ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
1937 for (i = 0; i < vfe->nclocks; i++) {
1938 struct camss_clock *clock = &vfe->clock[i];
1940 if (!strcmp(clock->name, "camss_vfe_vfe")) {
1944 for (j = VFE_LINE_RDI0; j <= VFE_LINE_PIX; j++) {
1948 if (j == VFE_LINE_PIX) {
1949 tmp = pixel_clock[j];
1951 bpp = vfe_get_bpp(vfe->line[j].
1952 fmt[MSM_VFE_PAD_SINK].code);
1953 tmp = pixel_clock[j] * bpp / 64;
1960 camss_add_clock_margin(&min_rate);
1962 rate = clk_get_rate(clock->clk);
1963 if (rate < min_rate)
1972 * vfe_get - Power up and reset VFE module
1975 * Return 0 on success or a negative error code otherwise
1977 static int vfe_get(struct vfe_device *vfe)
1981 mutex_lock(&vfe->power_lock);
1983 if (vfe->power_count == 0) {
1984 ret = vfe_set_clock_rates(vfe);
1988 ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
1993 ret = vfe_reset(vfe);
1997 vfe_reset_output_maps(vfe);
1999 vfe_init_outputs(vfe);
2001 ret = vfe_check_clock_rates(vfe);
2007 mutex_unlock(&vfe->power_lock);
2012 camss_disable_clocks(vfe->nclocks, vfe->clock);
2015 mutex_unlock(&vfe->power_lock);
2021 * vfe_put - Power down VFE module
2024 static void vfe_put(struct vfe_device *vfe)
2026 mutex_lock(&vfe->power_lock);
2028 if (vfe->power_count == 0) {
2029 dev_err(to_device(vfe), "vfe power off on power_count == 0\n");
2031 } else if (vfe->power_count == 1) {
2032 if (vfe->was_streaming) {
2033 vfe->was_streaming = 0;
2036 camss_disable_clocks(vfe->nclocks, vfe->clock);
2042 mutex_unlock(&vfe->power_lock);
2046 * vfe_video_pad_to_line - Get pointer to VFE line by media pad
2049 * Return pointer to vfe line structure
2051 static struct vfe_line *vfe_video_pad_to_line(struct media_pad *pad)
2053 struct media_pad *vfe_pad;
2054 struct v4l2_subdev *subdev;
2056 vfe_pad = media_entity_remote_pad(pad);
2057 if (vfe_pad == NULL)
2060 subdev = media_entity_to_v4l2_subdev(vfe_pad->entity);
2062 return container_of(subdev, struct vfe_line, subdev);
2066 * vfe_queue_buffer - Add empty buffer
2067 * @vid: Video device structure
2068 * @buf: Buffer to be enqueued
2070 * Add an empty buffer - depending on the current number of buffers it will be
2071 * put in pending buffer queue or directly given to the hardware to be filled.
2073 * Return 0 on success or a negative error code otherwise
2075 static int vfe_queue_buffer(struct camss_video *vid,
2076 struct camss_buffer *buf)
2078 struct vfe_device *vfe = &vid->camss->vfe;
2079 struct vfe_line *line;
2080 struct vfe_output *output;
2081 unsigned long flags;
2083 line = vfe_video_pad_to_line(&vid->pad);
2085 dev_err(to_device(vfe), "Can not queue buffer\n");
2088 output = &line->output;
2090 spin_lock_irqsave(&vfe->output_lock, flags);
2092 vfe_buf_update_wm_on_new(vfe, output, buf);
2094 spin_unlock_irqrestore(&vfe->output_lock, flags);
2100 * vfe_flush_buffers - Return all vb2 buffers
2101 * @vid: Video device structure
2102 * @state: vb2 buffer state of the returned buffers
2104 * Return all buffers to vb2. This includes queued pending buffers (still
2105 * unused) and any buffers given to the hardware but again still not used.
2107 * Return 0 on success or a negative error code otherwise
2109 static int vfe_flush_buffers(struct camss_video *vid,
2110 enum vb2_buffer_state state)
2112 struct vfe_device *vfe = &vid->camss->vfe;
2113 struct vfe_line *line;
2114 struct vfe_output *output;
2115 unsigned long flags;
2117 line = vfe_video_pad_to_line(&vid->pad);
2119 dev_err(to_device(vfe), "Can not flush buffers\n");
2122 output = &line->output;
2124 spin_lock_irqsave(&vfe->output_lock, flags);
2126 vfe_buf_flush_pending(output, state);
2129 vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
2132 vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
2134 if (output->last_buffer) {
2135 vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
2136 output->last_buffer = NULL;
2139 spin_unlock_irqrestore(&vfe->output_lock, flags);
2145 * vfe_set_power - Power on/off VFE module
2146 * @sd: VFE V4L2 subdevice
2147 * @on: Requested power state
2149 * Return 0 on success or a negative error code otherwise
2151 static int vfe_set_power(struct v4l2_subdev *sd, int on)
2153 struct vfe_line *line = v4l2_get_subdevdata(sd);
2154 struct vfe_device *vfe = to_vfe(line);
2164 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
2165 dev_dbg(to_device(vfe),
2166 "VFE HW Version = 0x%08x\n", hw_version);
2175 * vfe_set_stream - Enable/disable streaming on VFE module
2176 * @sd: VFE V4L2 subdevice
2177 * @enable: Requested streaming state
2179 * Main configuration of VFE module is triggered here.
2181 * Return 0 on success or a negative error code otherwise
2183 static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
2185 struct vfe_line *line = v4l2_get_subdevdata(sd);
2186 struct vfe_device *vfe = to_vfe(line);
2190 ret = vfe_enable(line);
2192 dev_err(to_device(vfe),
2193 "Failed to enable vfe outputs\n");
2195 ret = vfe_disable(line);
2197 dev_err(to_device(vfe),
2198 "Failed to disable vfe outputs\n");
2205 * __vfe_get_format - Get pointer to format structure
2207 * @cfg: V4L2 subdev pad configuration
2208 * @pad: pad from which format is requested
2209 * @which: TRY or ACTIVE format
2211 * Return pointer to TRY or ACTIVE format structure
2213 static struct v4l2_mbus_framefmt *
2214 __vfe_get_format(struct vfe_line *line,
2215 struct v4l2_subdev_pad_config *cfg,
2217 enum v4l2_subdev_format_whence which)
2219 if (which == V4L2_SUBDEV_FORMAT_TRY)
2220 return v4l2_subdev_get_try_format(&line->subdev, cfg, pad);
2222 return &line->fmt[pad];
2226 * __vfe_get_compose - Get pointer to compose selection structure
2228 * @cfg: V4L2 subdev pad configuration
2229 * @which: TRY or ACTIVE format
2231 * Return pointer to TRY or ACTIVE compose rectangle structure
2233 static struct v4l2_rect *
2234 __vfe_get_compose(struct vfe_line *line,
2235 struct v4l2_subdev_pad_config *cfg,
2236 enum v4l2_subdev_format_whence which)
2238 if (which == V4L2_SUBDEV_FORMAT_TRY)
2239 return v4l2_subdev_get_try_compose(&line->subdev, cfg,
2242 return &line->compose;
2246 * __vfe_get_crop - Get pointer to crop selection structure
2248 * @cfg: V4L2 subdev pad configuration
2249 * @which: TRY or ACTIVE format
2251 * Return pointer to TRY or ACTIVE crop rectangle structure
2253 static struct v4l2_rect *
2254 __vfe_get_crop(struct vfe_line *line,
2255 struct v4l2_subdev_pad_config *cfg,
2256 enum v4l2_subdev_format_whence which)
2258 if (which == V4L2_SUBDEV_FORMAT_TRY)
2259 return v4l2_subdev_get_try_crop(&line->subdev, cfg,
2266 * vfe_try_format - Handle try format by pad subdev method
2268 * @cfg: V4L2 subdev pad configuration
2269 * @pad: pad on which format is requested
2270 * @fmt: pointer to v4l2 format structure
2271 * @which: wanted subdev format
2273 static void vfe_try_format(struct vfe_line *line,
2274 struct v4l2_subdev_pad_config *cfg,
2276 struct v4l2_mbus_framefmt *fmt,
2277 enum v4l2_subdev_format_whence which)
2283 case MSM_VFE_PAD_SINK:
2284 /* Set format on sink pad */
2286 for (i = 0; i < ARRAY_SIZE(vfe_formats); i++)
2287 if (fmt->code == vfe_formats[i].code)
2290 /* If not found, use UYVY as default */
2291 if (i >= ARRAY_SIZE(vfe_formats))
2292 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
2294 fmt->width = clamp_t(u32, fmt->width, 1, 8191);
2295 fmt->height = clamp_t(u32, fmt->height, 1, 8191);
2297 fmt->field = V4L2_FIELD_NONE;
2298 fmt->colorspace = V4L2_COLORSPACE_SRGB;
2302 case MSM_VFE_PAD_SRC:
2303 /* Set and return a format same as sink pad */
2307 *fmt = *__vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
2310 if (line->id == VFE_LINE_PIX) {
2311 struct v4l2_rect *rect;
2313 rect = __vfe_get_crop(line, cfg, which);
2315 fmt->width = rect->width;
2316 fmt->height = rect->height;
2318 switch (fmt->code) {
2319 case MEDIA_BUS_FMT_YUYV8_2X8:
2320 if (code == MEDIA_BUS_FMT_YUYV8_1_5X8)
2321 fmt->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
2323 fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
2325 case MEDIA_BUS_FMT_YVYU8_2X8:
2326 if (code == MEDIA_BUS_FMT_YVYU8_1_5X8)
2327 fmt->code = MEDIA_BUS_FMT_YVYU8_1_5X8;
2329 fmt->code = MEDIA_BUS_FMT_YVYU8_2X8;
2331 case MEDIA_BUS_FMT_UYVY8_2X8:
2333 if (code == MEDIA_BUS_FMT_UYVY8_1_5X8)
2334 fmt->code = MEDIA_BUS_FMT_UYVY8_1_5X8;
2336 fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
2338 case MEDIA_BUS_FMT_VYUY8_2X8:
2339 if (code == MEDIA_BUS_FMT_VYUY8_1_5X8)
2340 fmt->code = MEDIA_BUS_FMT_VYUY8_1_5X8;
2342 fmt->code = MEDIA_BUS_FMT_VYUY8_2X8;
2350 fmt->colorspace = V4L2_COLORSPACE_SRGB;
2354 * vfe_try_compose - Handle try compose selection by pad subdev method
2356 * @cfg: V4L2 subdev pad configuration
2357 * @rect: pointer to v4l2 rect structure
2358 * @which: wanted subdev format
2360 static void vfe_try_compose(struct vfe_line *line,
2361 struct v4l2_subdev_pad_config *cfg,
2362 struct v4l2_rect *rect,
2363 enum v4l2_subdev_format_whence which)
2365 struct v4l2_mbus_framefmt *fmt;
2367 fmt = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK, which);
2369 if (rect->width > fmt->width)
2370 rect->width = fmt->width;
2372 if (rect->height > fmt->height)
2373 rect->height = fmt->height;
2375 if (fmt->width > rect->width * SCALER_RATIO_MAX)
2376 rect->width = (fmt->width + SCALER_RATIO_MAX - 1) /
2379 rect->width &= ~0x1;
2381 if (fmt->height > rect->height * SCALER_RATIO_MAX)
2382 rect->height = (fmt->height + SCALER_RATIO_MAX - 1) /
2385 if (rect->width < 16)
2388 if (rect->height < 4)
2393 * vfe_try_crop - Handle try crop selection by pad subdev method
2395 * @cfg: V4L2 subdev pad configuration
2396 * @rect: pointer to v4l2 rect structure
2397 * @which: wanted subdev format
2399 static void vfe_try_crop(struct vfe_line *line,
2400 struct v4l2_subdev_pad_config *cfg,
2401 struct v4l2_rect *rect,
2402 enum v4l2_subdev_format_whence which)
2404 struct v4l2_rect *compose;
2406 compose = __vfe_get_compose(line, cfg, which);
2408 if (rect->width > compose->width)
2409 rect->width = compose->width;
2411 if (rect->width + rect->left > compose->width)
2412 rect->left = compose->width - rect->width;
2414 if (rect->height > compose->height)
2415 rect->height = compose->height;
2417 if (rect->height + rect->top > compose->height)
2418 rect->top = compose->height - rect->height;
2420 /* wm in line based mode writes multiple of 16 horizontally */
2421 rect->left += (rect->width & 0xf) >> 1;
2422 rect->width &= ~0xf;
2424 if (rect->width < 16) {
2429 if (rect->height < 4) {
2436 * vfe_enum_mbus_code - Handle pixel format enumeration
2437 * @sd: VFE V4L2 subdevice
2438 * @cfg: V4L2 subdev pad configuration
2439 * @code: pointer to v4l2_subdev_mbus_code_enum structure
2441 * return -EINVAL or zero on success
2443 static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
2444 struct v4l2_subdev_pad_config *cfg,
2445 struct v4l2_subdev_mbus_code_enum *code)
2447 struct vfe_line *line = v4l2_get_subdevdata(sd);
2448 struct v4l2_mbus_framefmt *format;
2450 if (code->pad == MSM_VFE_PAD_SINK) {
2451 if (code->index >= ARRAY_SIZE(vfe_formats))
2454 code->code = vfe_formats[code->index].code;
2456 if (code->index > 0)
2459 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SINK,
2462 code->code = format->code;
2469 * vfe_enum_frame_size - Handle frame size enumeration
2470 * @sd: VFE V4L2 subdevice
2471 * @cfg: V4L2 subdev pad configuration
2472 * @fse: pointer to v4l2_subdev_frame_size_enum structure
2474 * Return -EINVAL or zero on success
2476 static int vfe_enum_frame_size(struct v4l2_subdev *sd,
2477 struct v4l2_subdev_pad_config *cfg,
2478 struct v4l2_subdev_frame_size_enum *fse)
2480 struct vfe_line *line = v4l2_get_subdevdata(sd);
2481 struct v4l2_mbus_framefmt format;
2483 if (fse->index != 0)
2486 format.code = fse->code;
2489 vfe_try_format(line, cfg, fse->pad, &format, fse->which);
2490 fse->min_width = format.width;
2491 fse->min_height = format.height;
2493 if (format.code != fse->code)
2496 format.code = fse->code;
2499 vfe_try_format(line, cfg, fse->pad, &format, fse->which);
2500 fse->max_width = format.width;
2501 fse->max_height = format.height;
2507 * vfe_get_format - Handle get format by pads subdev method
2508 * @sd: VFE V4L2 subdevice
2509 * @cfg: V4L2 subdev pad configuration
2510 * @fmt: pointer to v4l2 subdev format structure
2512 * Return -EINVAL or zero on success
2514 static int vfe_get_format(struct v4l2_subdev *sd,
2515 struct v4l2_subdev_pad_config *cfg,
2516 struct v4l2_subdev_format *fmt)
2518 struct vfe_line *line = v4l2_get_subdevdata(sd);
2519 struct v4l2_mbus_framefmt *format;
2521 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
2525 fmt->format = *format;
2530 static int vfe_set_selection(struct v4l2_subdev *sd,
2531 struct v4l2_subdev_pad_config *cfg,
2532 struct v4l2_subdev_selection *sel);
2535 * vfe_set_format - Handle set format by pads subdev method
2536 * @sd: VFE V4L2 subdevice
2537 * @cfg: V4L2 subdev pad configuration
2538 * @fmt: pointer to v4l2 subdev format structure
2540 * Return -EINVAL or zero on success
2542 static int vfe_set_format(struct v4l2_subdev *sd,
2543 struct v4l2_subdev_pad_config *cfg,
2544 struct v4l2_subdev_format *fmt)
2546 struct vfe_line *line = v4l2_get_subdevdata(sd);
2547 struct v4l2_mbus_framefmt *format;
2549 format = __vfe_get_format(line, cfg, fmt->pad, fmt->which);
2553 vfe_try_format(line, cfg, fmt->pad, &fmt->format, fmt->which);
2554 *format = fmt->format;
2556 if (fmt->pad == MSM_VFE_PAD_SINK) {
2557 struct v4l2_subdev_selection sel = { 0 };
2560 /* Propagate the format from sink to source */
2561 format = __vfe_get_format(line, cfg, MSM_VFE_PAD_SRC,
2564 *format = fmt->format;
2565 vfe_try_format(line, cfg, MSM_VFE_PAD_SRC, format,
2568 if (line->id != VFE_LINE_PIX)
2571 /* Reset sink pad compose selection */
2572 sel.which = fmt->which;
2573 sel.pad = MSM_VFE_PAD_SINK;
2574 sel.target = V4L2_SEL_TGT_COMPOSE;
2575 sel.r.width = fmt->format.width;
2576 sel.r.height = fmt->format.height;
2577 ret = vfe_set_selection(sd, cfg, &sel);
2586 * vfe_get_selection - Handle get selection by pads subdev method
2587 * @sd: VFE V4L2 subdevice
2588 * @cfg: V4L2 subdev pad configuration
2589 * @sel: pointer to v4l2 subdev selection structure
2591 * Return -EINVAL or zero on success
2593 static int vfe_get_selection(struct v4l2_subdev *sd,
2594 struct v4l2_subdev_pad_config *cfg,
2595 struct v4l2_subdev_selection *sel)
2597 struct vfe_line *line = v4l2_get_subdevdata(sd);
2598 struct v4l2_subdev_format fmt = { 0 };
2599 struct v4l2_rect *rect;
2602 if (line->id != VFE_LINE_PIX)
2605 if (sel->pad == MSM_VFE_PAD_SINK)
2606 switch (sel->target) {
2607 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
2609 fmt.which = sel->which;
2610 ret = vfe_get_format(sd, cfg, &fmt);
2616 sel->r.width = fmt.format.width;
2617 sel->r.height = fmt.format.height;
2619 case V4L2_SEL_TGT_COMPOSE:
2620 rect = __vfe_get_compose(line, cfg, sel->which);
2629 else if (sel->pad == MSM_VFE_PAD_SRC)
2630 switch (sel->target) {
2631 case V4L2_SEL_TGT_CROP_BOUNDS:
2632 rect = __vfe_get_compose(line, cfg, sel->which);
2636 sel->r.left = rect->left;
2637 sel->r.top = rect->top;
2638 sel->r.width = rect->width;
2639 sel->r.height = rect->height;
2641 case V4L2_SEL_TGT_CROP:
2642 rect = __vfe_get_crop(line, cfg, sel->which);
2656 * vfe_set_selection - Handle set selection by pads subdev method
2657 * @sd: VFE V4L2 subdevice
2658 * @cfg: V4L2 subdev pad configuration
2659 * @sel: pointer to v4l2 subdev selection structure
2661 * Return -EINVAL or zero on success
2663 int vfe_set_selection(struct v4l2_subdev *sd,
2664 struct v4l2_subdev_pad_config *cfg,
2665 struct v4l2_subdev_selection *sel)
2667 struct vfe_line *line = v4l2_get_subdevdata(sd);
2668 struct v4l2_rect *rect;
2671 if (line->id != VFE_LINE_PIX)
2674 if (sel->target == V4L2_SEL_TGT_COMPOSE &&
2675 sel->pad == MSM_VFE_PAD_SINK) {
2676 struct v4l2_subdev_selection crop = { 0 };
2678 rect = __vfe_get_compose(line, cfg, sel->which);
2682 vfe_try_compose(line, cfg, &sel->r, sel->which);
2685 /* Reset source crop selection */
2686 crop.which = sel->which;
2687 crop.pad = MSM_VFE_PAD_SRC;
2688 crop.target = V4L2_SEL_TGT_CROP;
2690 ret = vfe_set_selection(sd, cfg, &crop);
2691 } else if (sel->target == V4L2_SEL_TGT_CROP &&
2692 sel->pad == MSM_VFE_PAD_SRC) {
2693 struct v4l2_subdev_format fmt = { 0 };
2695 rect = __vfe_get_crop(line, cfg, sel->which);
2699 vfe_try_crop(line, cfg, &sel->r, sel->which);
2702 /* Reset source pad format width and height */
2703 fmt.which = sel->which;
2704 fmt.pad = MSM_VFE_PAD_SRC;
2705 ret = vfe_get_format(sd, cfg, &fmt);
2709 fmt.format.width = rect->width;
2710 fmt.format.height = rect->height;
2711 ret = vfe_set_format(sd, cfg, &fmt);
2720 * vfe_init_formats - Initialize formats on all pads
2721 * @sd: VFE V4L2 subdevice
2722 * @fh: V4L2 subdev file handle
2724 * Initialize all pad formats with default values.
2726 * Return 0 on success or a negative error code otherwise
2728 static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
2730 struct v4l2_subdev_format format = {
2731 .pad = MSM_VFE_PAD_SINK,
2732 .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
2733 V4L2_SUBDEV_FORMAT_ACTIVE,
2735 .code = MEDIA_BUS_FMT_UYVY8_2X8,
2741 return vfe_set_format(sd, fh ? fh->pad : NULL, &format);
2745 * msm_vfe_subdev_init - Initialize VFE device structure and resources
2747 * @res: VFE module resources table
2749 * Return 0 on success or a negative error code otherwise
2751 int msm_vfe_subdev_init(struct vfe_device *vfe, const struct resources *res)
2753 struct device *dev = to_device(vfe);
2754 struct platform_device *pdev = to_platform_device(dev);
2756 struct camss *camss = to_camss(vfe);
2762 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res->reg[0]);
2763 vfe->base = devm_ioremap_resource(dev, r);
2764 if (IS_ERR(vfe->base)) {
2765 dev_err(dev, "could not map memory\n");
2766 return PTR_ERR(vfe->base);
2771 r = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
2774 dev_err(dev, "missing IRQ\n");
2778 vfe->irq = r->start;
2779 snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
2780 dev_name(dev), MSM_VFE_NAME, vfe->id);
2781 ret = devm_request_irq(dev, vfe->irq, vfe_isr,
2782 IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
2784 dev_err(dev, "request_irq failed: %d\n", ret);
2791 while (res->clock[vfe->nclocks])
2794 vfe->clock = devm_kzalloc(dev, vfe->nclocks * sizeof(*vfe->clock),
2799 for (i = 0; i < vfe->nclocks; i++) {
2800 struct camss_clock *clock = &vfe->clock[i];
2802 clock->clk = devm_clk_get(dev, res->clock[i]);
2803 if (IS_ERR(clock->clk))
2804 return PTR_ERR(clock->clk);
2806 clock->name = res->clock[i];
2809 while (res->clock_rate[i][clock->nfreqs])
2812 if (!clock->nfreqs) {
2817 clock->freq = devm_kzalloc(dev, clock->nfreqs *
2818 sizeof(*clock->freq), GFP_KERNEL);
2822 for (j = 0; j < clock->nfreqs; j++)
2823 clock->freq[j] = res->clock_rate[i][j];
2826 mutex_init(&vfe->power_lock);
2827 vfe->power_count = 0;
2829 mutex_init(&vfe->stream_lock);
2830 vfe->stream_count = 0;
2832 spin_lock_init(&vfe->output_lock);
2835 vfe->reg_update = 0;
2837 for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++) {
2838 vfe->line[i].video_out.type =
2839 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2840 vfe->line[i].video_out.camss = camss;
2841 vfe->line[i].id = i;
2842 init_completion(&vfe->line[i].output.sof);
2843 init_completion(&vfe->line[i].output.reg_update);
2846 init_completion(&vfe->reset_complete);
2847 init_completion(&vfe->halt_complete);
2853 * msm_vfe_get_vfe_id - Get VFE HW module id
2854 * @entity: Pointer to VFE media entity structure
2855 * @id: Return CSID HW module id here
2857 void msm_vfe_get_vfe_id(struct media_entity *entity, u8 *id)
2859 struct v4l2_subdev *sd;
2860 struct vfe_line *line;
2861 struct vfe_device *vfe;
2863 sd = media_entity_to_v4l2_subdev(entity);
2864 line = v4l2_get_subdevdata(sd);
2871 * msm_vfe_get_vfe_line_id - Get VFE line id by media entity
2872 * @entity: Pointer to VFE media entity structure
2873 * @id: Return VFE line id here
2875 void msm_vfe_get_vfe_line_id(struct media_entity *entity, enum vfe_line_id *id)
2877 struct v4l2_subdev *sd;
2878 struct vfe_line *line;
2880 sd = media_entity_to_v4l2_subdev(entity);
2881 line = v4l2_get_subdevdata(sd);
2887 * vfe_link_setup - Setup VFE connections
2888 * @entity: Pointer to media entity structure
2889 * @local: Pointer to local pad
2890 * @remote: Pointer to remote pad
2891 * @flags: Link flags
2893 * Return 0 on success
2895 static int vfe_link_setup(struct media_entity *entity,
2896 const struct media_pad *local,
2897 const struct media_pad *remote, u32 flags)
2899 if (flags & MEDIA_LNK_FL_ENABLED)
2900 if (media_entity_remote_pad(local))
2906 static const struct v4l2_subdev_core_ops vfe_core_ops = {
2907 .s_power = vfe_set_power,
2910 static const struct v4l2_subdev_video_ops vfe_video_ops = {
2911 .s_stream = vfe_set_stream,
2914 static const struct v4l2_subdev_pad_ops vfe_pad_ops = {
2915 .enum_mbus_code = vfe_enum_mbus_code,
2916 .enum_frame_size = vfe_enum_frame_size,
2917 .get_fmt = vfe_get_format,
2918 .set_fmt = vfe_set_format,
2919 .get_selection = vfe_get_selection,
2920 .set_selection = vfe_set_selection,
2923 static const struct v4l2_subdev_ops vfe_v4l2_ops = {
2924 .core = &vfe_core_ops,
2925 .video = &vfe_video_ops,
2926 .pad = &vfe_pad_ops,
2929 static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = {
2930 .open = vfe_init_formats,
2933 static const struct media_entity_operations vfe_media_ops = {
2934 .link_setup = vfe_link_setup,
2935 .link_validate = v4l2_subdev_link_validate,
2938 static const struct camss_video_ops camss_vfe_video_ops = {
2939 .queue_buffer = vfe_queue_buffer,
2940 .flush_buffers = vfe_flush_buffers,
2943 void msm_vfe_stop_streaming(struct vfe_device *vfe)
2947 for (i = 0; i < ARRAY_SIZE(vfe->line); i++)
2948 msm_video_stop_streaming(&vfe->line[i].video_out);
2952 * msm_vfe_register_entities - Register subdev node for VFE module
2954 * @v4l2_dev: V4L2 device
2956 * Initialize and register a subdev node for the VFE module. Then
2957 * call msm_video_register() to register the video device node which
2958 * will be connected to this subdev node. Then actually create the
2959 * media link between them.
2961 * Return 0 on success or a negative error code otherwise
2963 int msm_vfe_register_entities(struct vfe_device *vfe,
2964 struct v4l2_device *v4l2_dev)
2966 struct device *dev = to_device(vfe);
2967 struct v4l2_subdev *sd;
2968 struct media_pad *pads;
2969 struct camss_video *video_out;
2973 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
2976 sd = &vfe->line[i].subdev;
2977 pads = vfe->line[i].pads;
2978 video_out = &vfe->line[i].video_out;
2980 v4l2_subdev_init(sd, &vfe_v4l2_ops);
2981 sd->internal_ops = &vfe_v4l2_internal_ops;
2982 sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
2983 if (i == VFE_LINE_PIX)
2984 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
2985 MSM_VFE_NAME, vfe->id, "pix");
2987 snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d",
2988 MSM_VFE_NAME, vfe->id, "rdi", i);
2990 v4l2_set_subdevdata(sd, &vfe->line[i]);
2992 ret = vfe_init_formats(sd, NULL);
2994 dev_err(dev, "Failed to init format: %d\n", ret);
2998 pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
2999 pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
3001 sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
3002 sd->entity.ops = &vfe_media_ops;
3003 ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM,
3006 dev_err(dev, "Failed to init media entity: %d\n", ret);
3010 ret = v4l2_device_register_subdev(v4l2_dev, sd);
3012 dev_err(dev, "Failed to register subdev: %d\n", ret);
3013 goto error_reg_subdev;
3016 video_out->ops = &camss_vfe_video_ops;
3017 video_out->bpl_alignment = 8;
3018 video_out->line_based = 0;
3019 if (i == VFE_LINE_PIX) {
3020 video_out->bpl_alignment = 16;
3021 video_out->line_based = 1;
3023 snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d",
3024 MSM_VFE_NAME, vfe->id, "video", i);
3025 ret = msm_video_register(video_out, v4l2_dev, name,
3026 i == VFE_LINE_PIX ? 1 : 0);
3028 dev_err(dev, "Failed to register video node: %d\n",
3030 goto error_reg_video;
3033 ret = media_create_pad_link(
3034 &sd->entity, MSM_VFE_PAD_SRC,
3035 &video_out->vdev.entity, 0,
3036 MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
3038 dev_err(dev, "Failed to link %s->%s entities: %d\n",
3039 sd->entity.name, video_out->vdev.entity.name,
3048 msm_video_unregister(video_out);
3051 v4l2_device_unregister_subdev(sd);
3054 media_entity_cleanup(&sd->entity);
3057 for (i--; i >= 0; i--) {
3058 sd = &vfe->line[i].subdev;
3059 video_out = &vfe->line[i].video_out;
3061 msm_video_unregister(video_out);
3062 v4l2_device_unregister_subdev(sd);
3063 media_entity_cleanup(&sd->entity);
3070 * msm_vfe_unregister_entities - Unregister VFE module subdev node
3073 void msm_vfe_unregister_entities(struct vfe_device *vfe)
3077 mutex_destroy(&vfe->power_lock);
3078 mutex_destroy(&vfe->stream_lock);
3080 for (i = 0; i < ARRAY_SIZE(vfe->line); i++) {
3081 struct v4l2_subdev *sd = &vfe->line[i].subdev;
3082 struct camss_video *video_out = &vfe->line[i].video_out;
3084 msm_video_unregister(video_out);
3085 v4l2_device_unregister_subdev(sd);
3086 media_entity_cleanup(&sd->entity);