1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
6 * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
7 * Copyright (C) 2018 Bootlin
9 * Based on the vim2m driver, that is:
11 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
12 * Pawel Osciak, <pawel@osciak.com>
13 * Marek Szyprowski, <m.szyprowski@samsung.com>
16 #include <linux/platform_device.h>
17 #include <linux/of_reserved_mem.h>
18 #include <linux/of_device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/interrupt.h>
21 #include <linux/clk.h>
22 #include <linux/regmap.h>
23 #include <linux/reset.h>
24 #include <linux/soc/sunxi/sunxi_sram.h>
26 #include <media/videobuf2-core.h>
27 #include <media/v4l2-mem2mem.h>
30 #include "cedrus_hw.h"
31 #include "cedrus_regs.h"
33 int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
38 * FIXME: This is only valid on 32-bits DDR's, we should test
41 reg |= VE_MODE_REC_WR_MODE_2MB;
42 reg |= VE_MODE_DDR_MODE_BW_128;
45 case CEDRUS_CODEC_MPEG2:
46 reg |= VE_MODE_DEC_MPEG;
49 case CEDRUS_CODEC_H264:
50 reg |= VE_MODE_DEC_H264;
57 cedrus_write(dev, VE_MODE, reg);
62 void cedrus_engine_disable(struct cedrus_dev *dev)
64 cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
67 void cedrus_dst_format_set(struct cedrus_dev *dev,
68 struct v4l2_pix_format *fmt)
70 unsigned int width = fmt->width;
71 unsigned int height = fmt->height;
75 switch (fmt->pixelformat) {
76 case V4L2_PIX_FMT_NV12:
77 chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
79 reg = VE_PRIMARY_OUT_FMT_NV12;
80 cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
82 reg = chroma_size / 2;
83 cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
85 reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
86 VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
87 cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
90 case V4L2_PIX_FMT_SUNXI_TILED_NV12:
92 reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
93 cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
95 reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
96 cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
102 static irqreturn_t cedrus_irq(int irq, void *data)
104 struct cedrus_dev *dev = data;
105 struct cedrus_ctx *ctx;
106 struct vb2_v4l2_buffer *src_buf, *dst_buf;
107 enum vb2_buffer_state state;
108 enum cedrus_irq_status status;
110 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
112 v4l2_err(&dev->v4l2_dev,
113 "Instance released before the end of transaction\n");
117 status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
118 if (status == CEDRUS_IRQ_NONE)
121 dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
122 dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
124 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
125 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
127 if (!src_buf || !dst_buf) {
128 v4l2_err(&dev->v4l2_dev,
129 "Missing source and/or destination buffers\n");
133 if (status == CEDRUS_IRQ_ERROR)
134 state = VB2_BUF_STATE_ERROR;
136 state = VB2_BUF_STATE_DONE;
138 v4l2_m2m_buf_done(src_buf, state);
139 v4l2_m2m_buf_done(dst_buf, state);
141 v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
146 int cedrus_hw_probe(struct cedrus_dev *dev)
148 const struct cedrus_variant *variant;
149 struct resource *res;
153 variant = of_device_get_match_data(dev->dev);
157 dev->capabilities = variant->capabilities;
159 irq_dec = platform_get_irq(dev->pdev, 0);
162 ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
163 0, dev_name(dev->dev), dev);
165 dev_err(dev->dev, "Failed to request IRQ\n");
171 * The VPU is only able to handle bus addresses so we have to subtract
172 * the RAM offset to the physcal addresses.
174 * This information will eventually be obtained from device-tree.
177 #ifdef PHYS_PFN_OFFSET
178 if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
179 dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
182 ret = of_reserved_mem_device_init(dev->dev);
183 if (ret && ret != -ENODEV) {
184 dev_err(dev->dev, "Failed to reserve memory\n");
189 ret = sunxi_sram_claim(dev->dev);
191 dev_err(dev->dev, "Failed to claim SRAM\n");
196 dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
197 if (IS_ERR(dev->ahb_clk)) {
198 dev_err(dev->dev, "Failed to get AHB clock\n");
200 ret = PTR_ERR(dev->ahb_clk);
204 dev->mod_clk = devm_clk_get(dev->dev, "mod");
205 if (IS_ERR(dev->mod_clk)) {
206 dev_err(dev->dev, "Failed to get MOD clock\n");
208 ret = PTR_ERR(dev->mod_clk);
212 dev->ram_clk = devm_clk_get(dev->dev, "ram");
213 if (IS_ERR(dev->ram_clk)) {
214 dev_err(dev->dev, "Failed to get RAM clock\n");
216 ret = PTR_ERR(dev->ram_clk);
220 dev->rstc = devm_reset_control_get(dev->dev, NULL);
221 if (IS_ERR(dev->rstc)) {
222 dev_err(dev->dev, "Failed to get reset control\n");
224 ret = PTR_ERR(dev->rstc);
228 res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
229 dev->base = devm_ioremap_resource(dev->dev, res);
230 if (IS_ERR(dev->base)) {
231 dev_err(dev->dev, "Failed to map registers\n");
233 ret = PTR_ERR(dev->base);
237 ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
239 dev_err(dev->dev, "Failed to set clock rate\n");
244 ret = clk_prepare_enable(dev->ahb_clk);
246 dev_err(dev->dev, "Failed to enable AHB clock\n");
251 ret = clk_prepare_enable(dev->mod_clk);
253 dev_err(dev->dev, "Failed to enable MOD clock\n");
258 ret = clk_prepare_enable(dev->ram_clk);
260 dev_err(dev->dev, "Failed to enable RAM clock\n");
265 ret = reset_control_reset(dev->rstc);
267 dev_err(dev->dev, "Failed to apply reset\n");
275 clk_disable_unprepare(dev->ram_clk);
277 clk_disable_unprepare(dev->mod_clk);
279 clk_disable_unprepare(dev->ahb_clk);
281 sunxi_sram_release(dev->dev);
283 of_reserved_mem_device_release(dev->dev);
288 void cedrus_hw_remove(struct cedrus_dev *dev)
290 reset_control_assert(dev->rstc);
292 clk_disable_unprepare(dev->ram_clk);
293 clk_disable_unprepare(dev->mod_clk);
294 clk_disable_unprepare(dev->ahb_clk);
296 sunxi_sram_release(dev->dev);
298 of_reserved_mem_device_release(dev->dev);