1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (c) 2013 Texas Instruments Inc.
7 * David Griego, <dagriego@biglakesoftware.com>
8 * Dale Farnsworth, <dale@farnsworth.org>
9 * Archit Taneja, <archit@ti.com>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/err.h>
15 #include <linux/firmware.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/videodev2.h>
24 #include "vpdma_priv.h"
26 #define VPDMA_FIRMWARE "vpdma-1b8.bin"
28 const struct vpdma_data_format vpdma_yuv_fmts[] = {
29 [VPDMA_DATA_FMT_Y444] = {
30 .type = VPDMA_DATA_FMT_TYPE_YUV,
31 .data_type = DATA_TYPE_Y444,
34 [VPDMA_DATA_FMT_Y422] = {
35 .type = VPDMA_DATA_FMT_TYPE_YUV,
36 .data_type = DATA_TYPE_Y422,
39 [VPDMA_DATA_FMT_Y420] = {
40 .type = VPDMA_DATA_FMT_TYPE_YUV,
41 .data_type = DATA_TYPE_Y420,
44 [VPDMA_DATA_FMT_C444] = {
45 .type = VPDMA_DATA_FMT_TYPE_YUV,
46 .data_type = DATA_TYPE_C444,
49 [VPDMA_DATA_FMT_C422] = {
50 .type = VPDMA_DATA_FMT_TYPE_YUV,
51 .data_type = DATA_TYPE_C422,
54 [VPDMA_DATA_FMT_C420] = {
55 .type = VPDMA_DATA_FMT_TYPE_YUV,
56 .data_type = DATA_TYPE_C420,
59 [VPDMA_DATA_FMT_YCR422] = {
60 .type = VPDMA_DATA_FMT_TYPE_YUV,
61 .data_type = DATA_TYPE_YCR422,
64 [VPDMA_DATA_FMT_YC444] = {
65 .type = VPDMA_DATA_FMT_TYPE_YUV,
66 .data_type = DATA_TYPE_YC444,
69 [VPDMA_DATA_FMT_CRY422] = {
70 .type = VPDMA_DATA_FMT_TYPE_YUV,
71 .data_type = DATA_TYPE_CRY422,
74 [VPDMA_DATA_FMT_CBY422] = {
75 .type = VPDMA_DATA_FMT_TYPE_YUV,
76 .data_type = DATA_TYPE_CBY422,
79 [VPDMA_DATA_FMT_YCB422] = {
80 .type = VPDMA_DATA_FMT_TYPE_YUV,
81 .data_type = DATA_TYPE_YCB422,
85 EXPORT_SYMBOL(vpdma_yuv_fmts);
87 const struct vpdma_data_format vpdma_rgb_fmts[] = {
88 [VPDMA_DATA_FMT_RGB565] = {
89 .type = VPDMA_DATA_FMT_TYPE_RGB,
90 .data_type = DATA_TYPE_RGB16_565,
93 [VPDMA_DATA_FMT_ARGB16_1555] = {
94 .type = VPDMA_DATA_FMT_TYPE_RGB,
95 .data_type = DATA_TYPE_ARGB_1555,
98 [VPDMA_DATA_FMT_ARGB16] = {
99 .type = VPDMA_DATA_FMT_TYPE_RGB,
100 .data_type = DATA_TYPE_ARGB_4444,
103 [VPDMA_DATA_FMT_RGBA16_5551] = {
104 .type = VPDMA_DATA_FMT_TYPE_RGB,
105 .data_type = DATA_TYPE_RGBA_5551,
108 [VPDMA_DATA_FMT_RGBA16] = {
109 .type = VPDMA_DATA_FMT_TYPE_RGB,
110 .data_type = DATA_TYPE_RGBA_4444,
113 [VPDMA_DATA_FMT_ARGB24] = {
114 .type = VPDMA_DATA_FMT_TYPE_RGB,
115 .data_type = DATA_TYPE_ARGB24_6666,
118 [VPDMA_DATA_FMT_RGB24] = {
119 .type = VPDMA_DATA_FMT_TYPE_RGB,
120 .data_type = DATA_TYPE_RGB24_888,
123 [VPDMA_DATA_FMT_ARGB32] = {
124 .type = VPDMA_DATA_FMT_TYPE_RGB,
125 .data_type = DATA_TYPE_ARGB32_8888,
128 [VPDMA_DATA_FMT_RGBA24] = {
129 .type = VPDMA_DATA_FMT_TYPE_RGB,
130 .data_type = DATA_TYPE_RGBA24_6666,
133 [VPDMA_DATA_FMT_RGBA32] = {
134 .type = VPDMA_DATA_FMT_TYPE_RGB,
135 .data_type = DATA_TYPE_RGBA32_8888,
138 [VPDMA_DATA_FMT_BGR565] = {
139 .type = VPDMA_DATA_FMT_TYPE_RGB,
140 .data_type = DATA_TYPE_BGR16_565,
143 [VPDMA_DATA_FMT_ABGR16_1555] = {
144 .type = VPDMA_DATA_FMT_TYPE_RGB,
145 .data_type = DATA_TYPE_ABGR_1555,
148 [VPDMA_DATA_FMT_ABGR16] = {
149 .type = VPDMA_DATA_FMT_TYPE_RGB,
150 .data_type = DATA_TYPE_ABGR_4444,
153 [VPDMA_DATA_FMT_BGRA16_5551] = {
154 .type = VPDMA_DATA_FMT_TYPE_RGB,
155 .data_type = DATA_TYPE_BGRA_5551,
158 [VPDMA_DATA_FMT_BGRA16] = {
159 .type = VPDMA_DATA_FMT_TYPE_RGB,
160 .data_type = DATA_TYPE_BGRA_4444,
163 [VPDMA_DATA_FMT_ABGR24] = {
164 .type = VPDMA_DATA_FMT_TYPE_RGB,
165 .data_type = DATA_TYPE_ABGR24_6666,
168 [VPDMA_DATA_FMT_BGR24] = {
169 .type = VPDMA_DATA_FMT_TYPE_RGB,
170 .data_type = DATA_TYPE_BGR24_888,
173 [VPDMA_DATA_FMT_ABGR32] = {
174 .type = VPDMA_DATA_FMT_TYPE_RGB,
175 .data_type = DATA_TYPE_ABGR32_8888,
178 [VPDMA_DATA_FMT_BGRA24] = {
179 .type = VPDMA_DATA_FMT_TYPE_RGB,
180 .data_type = DATA_TYPE_BGRA24_6666,
183 [VPDMA_DATA_FMT_BGRA32] = {
184 .type = VPDMA_DATA_FMT_TYPE_RGB,
185 .data_type = DATA_TYPE_BGRA32_8888,
189 EXPORT_SYMBOL(vpdma_rgb_fmts);
192 * To handle RAW format we are re-using the CBY422
193 * vpdma data type so that we use the vpdma to re-order
194 * the incoming bytes, as the parser assumes that the
195 * first byte presented on the bus is the MSB of a 2
197 * RAW8 handles from 1 to 8 bits
198 * RAW16 handles from 9 to 16 bits
200 const struct vpdma_data_format vpdma_raw_fmts[] = {
201 [VPDMA_DATA_FMT_RAW8] = {
202 .type = VPDMA_DATA_FMT_TYPE_YUV,
203 .data_type = DATA_TYPE_CBY422,
206 [VPDMA_DATA_FMT_RAW16] = {
207 .type = VPDMA_DATA_FMT_TYPE_YUV,
208 .data_type = DATA_TYPE_CBY422,
212 EXPORT_SYMBOL(vpdma_raw_fmts);
214 const struct vpdma_data_format vpdma_misc_fmts[] = {
215 [VPDMA_DATA_FMT_MV] = {
216 .type = VPDMA_DATA_FMT_TYPE_MISC,
217 .data_type = DATA_TYPE_MV,
221 EXPORT_SYMBOL(vpdma_misc_fmts);
223 struct vpdma_channel_info {
224 int num; /* VPDMA channel number */
225 int cstat_offset; /* client CSTAT register offset */
228 static const struct vpdma_channel_info chan_info[] = {
229 [VPE_CHAN_LUMA1_IN] = {
230 .num = VPE_CHAN_NUM_LUMA1_IN,
231 .cstat_offset = VPDMA_DEI_LUMA1_CSTAT,
233 [VPE_CHAN_CHROMA1_IN] = {
234 .num = VPE_CHAN_NUM_CHROMA1_IN,
235 .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT,
237 [VPE_CHAN_LUMA2_IN] = {
238 .num = VPE_CHAN_NUM_LUMA2_IN,
239 .cstat_offset = VPDMA_DEI_LUMA2_CSTAT,
241 [VPE_CHAN_CHROMA2_IN] = {
242 .num = VPE_CHAN_NUM_CHROMA2_IN,
243 .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT,
245 [VPE_CHAN_LUMA3_IN] = {
246 .num = VPE_CHAN_NUM_LUMA3_IN,
247 .cstat_offset = VPDMA_DEI_LUMA3_CSTAT,
249 [VPE_CHAN_CHROMA3_IN] = {
250 .num = VPE_CHAN_NUM_CHROMA3_IN,
251 .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT,
254 .num = VPE_CHAN_NUM_MV_IN,
255 .cstat_offset = VPDMA_DEI_MV_IN_CSTAT,
257 [VPE_CHAN_MV_OUT] = {
258 .num = VPE_CHAN_NUM_MV_OUT,
259 .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT,
261 [VPE_CHAN_LUMA_OUT] = {
262 .num = VPE_CHAN_NUM_LUMA_OUT,
263 .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
265 [VPE_CHAN_CHROMA_OUT] = {
266 .num = VPE_CHAN_NUM_CHROMA_OUT,
267 .cstat_offset = VPDMA_VIP_UP_UV_CSTAT,
269 [VPE_CHAN_RGB_OUT] = {
270 .num = VPE_CHAN_NUM_RGB_OUT,
271 .cstat_offset = VPDMA_VIP_UP_Y_CSTAT,
275 static u32 read_reg(struct vpdma_data *vpdma, int offset)
277 return ioread32(vpdma->base + offset);
280 static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
282 iowrite32(value, vpdma->base + offset);
285 static int read_field_reg(struct vpdma_data *vpdma, int offset,
288 return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
291 static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
294 u32 val = read_reg(vpdma, offset);
296 val &= ~(mask << shift);
297 val |= (field & mask) << shift;
299 write_reg(vpdma, offset, val);
302 void vpdma_dump_regs(struct vpdma_data *vpdma)
304 struct device *dev = &vpdma->pdev->dev;
306 #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
308 dev_dbg(dev, "VPDMA Registers:\n");
313 DUMPREG(LIST_STAT_SYNC);
322 * dumping registers of only group0 and group3, because VPE channels
323 * lie within group0 and group3 registers
325 DUMPREG(INT_CHAN_STAT(0));
326 DUMPREG(INT_CHAN_MASK(0));
327 DUMPREG(INT_CHAN_STAT(3));
328 DUMPREG(INT_CHAN_MASK(3));
329 DUMPREG(INT_CLIENT0_STAT);
330 DUMPREG(INT_CLIENT0_MASK);
331 DUMPREG(INT_CLIENT1_STAT);
332 DUMPREG(INT_CLIENT1_MASK);
333 DUMPREG(INT_LIST0_STAT);
334 DUMPREG(INT_LIST0_MASK);
337 * these are registers specific to VPE clients, we can make this
338 * function dump client registers specific to VPE or VIP based on
341 DUMPREG(DEI_CHROMA1_CSTAT);
342 DUMPREG(DEI_LUMA1_CSTAT);
343 DUMPREG(DEI_CHROMA2_CSTAT);
344 DUMPREG(DEI_LUMA2_CSTAT);
345 DUMPREG(DEI_CHROMA3_CSTAT);
346 DUMPREG(DEI_LUMA3_CSTAT);
347 DUMPREG(DEI_MV_IN_CSTAT);
348 DUMPREG(DEI_MV_OUT_CSTAT);
349 DUMPREG(VIP_UP_Y_CSTAT);
350 DUMPREG(VIP_UP_UV_CSTAT);
351 DUMPREG(VPI_CTL_CSTAT);
353 EXPORT_SYMBOL(vpdma_dump_regs);
356 * Allocate a DMA buffer
358 int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
362 buf->addr = kzalloc(size, GFP_KERNEL);
366 WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
370 EXPORT_SYMBOL(vpdma_alloc_desc_buf);
372 void vpdma_free_desc_buf(struct vpdma_buf *buf)
374 WARN_ON(buf->mapped);
379 EXPORT_SYMBOL(vpdma_free_desc_buf);
382 * map descriptor/payload DMA buffer, enabling DMA access
384 int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
386 struct device *dev = &vpdma->pdev->dev;
388 WARN_ON(buf->mapped);
389 buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
391 if (dma_mapping_error(dev, buf->dma_addr)) {
392 dev_err(dev, "failed to map buffer\n");
400 EXPORT_SYMBOL(vpdma_map_desc_buf);
403 * unmap descriptor/payload DMA buffer, disabling DMA access and
404 * allowing the main processor to access the data
406 void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
408 struct device *dev = &vpdma->pdev->dev;
411 dma_unmap_single(dev, buf->dma_addr, buf->size,
416 EXPORT_SYMBOL(vpdma_unmap_desc_buf);
419 * Cleanup all pending descriptors of a list
420 * First, stop the current list being processed.
421 * If the VPDMA was busy, this step makes vpdma to accept post lists.
422 * To cleanup the internal FSM, post abort list descriptor for all the
423 * channels from @channels array of size @size.
425 int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
426 int *channels, int size)
428 struct vpdma_desc_list abort_list;
429 int i, ret, timeout = 500;
431 write_reg(vpdma, VPDMA_LIST_ATTR,
432 (list_num << VPDMA_LIST_NUM_SHFT) |
433 (1 << VPDMA_LIST_STOP_SHFT));
435 if (size <= 0 || !channels)
438 ret = vpdma_create_desc_list(&abort_list,
439 size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
443 for (i = 0; i < size; i++)
444 vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
446 ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
449 ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
453 while (vpdma_list_busy(vpdma, list_num) && --timeout)
457 dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
461 vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
462 vpdma_free_desc_buf(&abort_list.buf);
466 EXPORT_SYMBOL(vpdma_list_cleanup);
469 * create a descriptor list, the user of this list will append configuration,
470 * control and data descriptors to this list, this list will be submitted to
471 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
472 * required DMA operations
474 int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
478 r = vpdma_alloc_desc_buf(&list->buf, size);
482 list->next = list->buf.addr;
488 EXPORT_SYMBOL(vpdma_create_desc_list);
491 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
492 * to allow new descriptors to be added to the list.
494 void vpdma_reset_desc_list(struct vpdma_desc_list *list)
496 list->next = list->buf.addr;
498 EXPORT_SYMBOL(vpdma_reset_desc_list);
501 * free the buffer allocated for the VPDMA descriptor list, this should be
502 * called when the user doesn't want to use VPDMA any more.
504 void vpdma_free_desc_list(struct vpdma_desc_list *list)
506 vpdma_free_desc_buf(&list->buf);
510 EXPORT_SYMBOL(vpdma_free_desc_list);
512 bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
514 return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
516 EXPORT_SYMBOL(vpdma_list_busy);
519 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
521 int vpdma_submit_descs(struct vpdma_data *vpdma,
522 struct vpdma_desc_list *list, int list_num)
527 if (vpdma_list_busy(vpdma, list_num))
530 /* 16-byte granularity */
531 list_size = (list->next - list->buf.addr) >> 4;
533 spin_lock_irqsave(&vpdma->lock, flags);
534 write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
536 write_reg(vpdma, VPDMA_LIST_ATTR,
537 (list_num << VPDMA_LIST_NUM_SHFT) |
538 (list->type << VPDMA_LIST_TYPE_SHFT) |
540 spin_unlock_irqrestore(&vpdma->lock, flags);
544 EXPORT_SYMBOL(vpdma_submit_descs);
546 static void dump_dtd(struct vpdma_dtd *dtd);
548 void vpdma_update_dma_addr(struct vpdma_data *vpdma,
549 struct vpdma_desc_list *list, dma_addr_t dma_addr,
550 void *write_dtd, int drop, int idx)
552 struct vpdma_dtd *dtd = list->buf.addr;
553 dma_addr_t write_desc_addr;
557 vpdma_unmap_desc_buf(vpdma, &list->buf);
559 dtd->start_addr = dma_addr;
561 /* Calculate write address from the offset of write_dtd from start
564 offset = (void *)write_dtd - list->buf.addr;
565 write_desc_addr = list->buf.dma_addr + offset;
568 dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
571 dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
574 vpdma_map_desc_buf(vpdma, &list->buf);
578 EXPORT_SYMBOL(vpdma_update_dma_addr);
580 void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
581 u32 width, u32 height)
583 if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
584 reg_addr != VPDMA_MAX_SIZE3)
585 reg_addr = VPDMA_MAX_SIZE1;
587 write_field_reg(vpdma, reg_addr, width - 1,
588 VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
590 write_field_reg(vpdma, reg_addr, height - 1,
591 VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
594 EXPORT_SYMBOL(vpdma_set_max_size);
596 static void dump_cfd(struct vpdma_cfd *cfd)
600 class = cfd_get_class(cfd);
602 pr_debug("config descriptor of payload class: %s\n",
603 class == CFD_CLS_BLOCK ? "simple block" :
604 "address data block");
606 if (class == CFD_CLS_BLOCK)
607 pr_debug("word0: dst_addr_offset = 0x%08x\n",
608 cfd->dest_addr_offset);
610 if (class == CFD_CLS_BLOCK)
611 pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
613 pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
615 pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
616 cfd_get_pkt_type(cfd),
617 cfd_get_direct(cfd), class, cfd_get_dest(cfd),
618 cfd_get_payload_len(cfd));
622 * append a configuration descriptor to the given descriptor list, where the
623 * payload is in the form of a simple data block specified in the descriptor
624 * header, this is used to upload scaler coefficients to the scaler module
626 void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
627 struct vpdma_buf *blk, u32 dest_offset)
629 struct vpdma_cfd *cfd;
632 WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
635 WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
637 cfd->dest_addr_offset = dest_offset;
638 cfd->block_len = len;
639 cfd->payload_addr = (u32) blk->dma_addr;
640 cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
643 list->next = cfd + 1;
647 EXPORT_SYMBOL(vpdma_add_cfd_block);
650 * append a configuration descriptor to the given descriptor list, where the
651 * payload is in the address data block format, this is used to a configure a
652 * discontiguous set of MMRs
654 void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
655 struct vpdma_buf *adb)
657 struct vpdma_cfd *cfd;
658 unsigned int len = adb->size;
660 WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
661 WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
664 BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
668 cfd->payload_addr = (u32) adb->dma_addr;
669 cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
672 list->next = cfd + 1;
676 EXPORT_SYMBOL(vpdma_add_cfd_adb);
679 * control descriptor format change based on what type of control descriptor it
680 * is, we only use 'sync on channel' control descriptors for now, so assume it's
683 static void dump_ctd(struct vpdma_ctd *ctd)
685 pr_debug("control descriptor\n");
687 pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
688 ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
692 * append a 'sync on channel' type control descriptor to the given descriptor
693 * list, this descriptor stalls the VPDMA list till the time DMA is completed
694 * on the specified channel
696 void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
697 enum vpdma_channel chan)
699 struct vpdma_ctd *ctd;
702 WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
707 ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
708 CTD_TYPE_SYNC_ON_CHANNEL);
710 list->next = ctd + 1;
714 EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
717 * append an 'abort_channel' type control descriptor to the given descriptor
718 * list, this descriptor aborts any DMA transaction happening using the
721 void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
724 struct vpdma_ctd *ctd;
727 WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
732 ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
733 CTD_TYPE_ABORT_CHANNEL);
735 list->next = ctd + 1;
739 EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
741 static void dump_dtd(struct vpdma_dtd *dtd)
745 dir = dtd_get_dir(dtd);
746 chan = dtd_get_chan(dtd);
748 pr_debug("%s data transfer descriptor for channel %d\n",
749 dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
751 pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
752 dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
753 dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
754 dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
756 if (dir == DTD_DIR_IN)
757 pr_debug("word1: line_length = %d, xfer_height = %d\n",
758 dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
760 pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
762 pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
763 dtd_get_pkt_type(dtd),
764 dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
765 dtd_get_next_chan(dtd));
767 if (dir == DTD_DIR_IN)
768 pr_debug("word4: frame_width = %d, frame_height = %d\n",
769 dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
771 pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
772 dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
773 dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
775 if (dir == DTD_DIR_IN)
776 pr_debug("word5: hor_start = %d, ver_start = %d\n",
777 dtd_get_h_start(dtd), dtd_get_v_start(dtd));
779 pr_debug("word5: max_width %d, max_height %d\n",
780 dtd_get_max_width(dtd), dtd_get_max_height(dtd));
782 pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
783 pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
787 * append an outbound data transfer descriptor to the given descriptor list,
788 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
790 * @list: vpdma desc list to which we add this descriptor
791 * @width: width of the image in pixels in memory
792 * @c_rect: compose params of output image
793 * @fmt: vpdma data format of the buffer
794 * dma_addr: dma address as seen by VPDMA
795 * max_width: enum for maximum width of data transfer
796 * max_height: enum for maximum height of data transfer
797 * chan: VPDMA channel
798 * flags: VPDMA flags to configure some descriptor fields
800 void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
801 int stride, const struct v4l2_rect *c_rect,
802 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
803 int max_w, int max_h, enum vpdma_channel chan, u32 flags)
805 vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
806 max_w, max_h, chan_info[chan].num, flags);
808 EXPORT_SYMBOL(vpdma_add_out_dtd);
810 void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
811 int stride, const struct v4l2_rect *c_rect,
812 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
813 int max_w, int max_h, int raw_vpdma_chan, u32 flags)
818 int channel, next_chan;
819 struct v4l2_rect rect = *c_rect;
820 int depth = fmt->depth;
821 struct vpdma_dtd *dtd;
823 channel = next_chan = raw_vpdma_chan;
825 if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
826 fmt->data_type == DATA_TYPE_C420) {
832 dma_addr += rect.top * stride + (rect.left * depth >> 3);
835 WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
837 dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
840 !!(flags & VPDMA_DATA_FRAME_1D),
841 !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
842 !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
845 dtd->start_addr = (u32) dma_addr;
846 dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
847 DTD_DIR_OUT, channel, priority, next_chan);
848 dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
849 dtd->max_width_height = dtd_max_width_height(max_w, max_h);
850 dtd->client_attr0 = 0;
851 dtd->client_attr1 = 0;
853 list->next = dtd + 1;
857 EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
860 * append an inbound data transfer descriptor to the given descriptor list,
861 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
863 * @list: vpdma desc list to which we add this descriptor
864 * @width: width of the image in pixels in memory(not the cropped width)
865 * @c_rect: crop params of input image
866 * @fmt: vpdma data format of the buffer
867 * dma_addr: dma address as seen by VPDMA
868 * chan: VPDMA channel
869 * field: top or bottom field info of the input image
870 * flags: VPDMA flags to configure some descriptor fields
871 * frame_width/height: the complete width/height of the image presented to the
872 * client (this makes sense when multiple channels are
873 * connected to the same client, forming a larger frame)
874 * start_h, start_v: position where the given channel starts providing pixel
875 * data to the client (makes sense when multiple channels
876 * contribute to the client)
878 void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
879 int stride, const struct v4l2_rect *c_rect,
880 const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
881 enum vpdma_channel chan, int field, u32 flags, int frame_width,
882 int frame_height, int start_h, int start_v)
886 int depth = fmt->depth;
887 int channel, next_chan;
888 struct v4l2_rect rect = *c_rect;
889 struct vpdma_dtd *dtd;
891 channel = next_chan = chan_info[chan].num;
893 if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
894 fmt->data_type == DATA_TYPE_C420) {
900 dma_addr += rect.top * stride + (rect.left * depth >> 3);
903 WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
905 dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
908 !!(flags & VPDMA_DATA_FRAME_1D),
909 !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
910 !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
913 dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
915 dtd->start_addr = (u32) dma_addr;
916 dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
917 DTD_DIR_IN, channel, priority, next_chan);
918 dtd->frame_width_height = dtd_frame_width_height(frame_width,
920 dtd->start_h_v = dtd_start_h_v(start_h, start_v);
921 dtd->client_attr0 = 0;
922 dtd->client_attr1 = 0;
924 list->next = dtd + 1;
928 EXPORT_SYMBOL(vpdma_add_in_dtd);
930 int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
932 int i, list_num = -1;
935 spin_lock_irqsave(&vpdma->lock, flags);
936 for (i = 0; i < VPDMA_MAX_NUM_LIST &&
937 vpdma->hwlist_used[i] == true; i++)
940 if (i < VPDMA_MAX_NUM_LIST) {
942 vpdma->hwlist_used[i] = true;
943 vpdma->hwlist_priv[i] = priv;
945 spin_unlock_irqrestore(&vpdma->lock, flags);
949 EXPORT_SYMBOL(vpdma_hwlist_alloc);
951 void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
953 if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
956 return vpdma->hwlist_priv[list_num];
958 EXPORT_SYMBOL(vpdma_hwlist_get_priv);
960 void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
965 spin_lock_irqsave(&vpdma->lock, flags);
966 vpdma->hwlist_used[list_num] = false;
967 priv = vpdma->hwlist_priv;
968 spin_unlock_irqrestore(&vpdma->lock, flags);
972 EXPORT_SYMBOL(vpdma_hwlist_release);
974 /* set or clear the mask for list complete interrupt */
975 void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
976 int list_num, bool enable)
978 u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
981 val = read_reg(vpdma, reg_addr);
983 val |= (1 << (list_num * 2));
985 val &= ~(1 << (list_num * 2));
986 write_reg(vpdma, reg_addr, val);
988 EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
990 /* get the LIST_STAT register */
991 unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
993 u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
995 return read_reg(vpdma, reg_addr);
997 EXPORT_SYMBOL(vpdma_get_list_stat);
999 /* get the LIST_MASK register */
1000 unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
1002 u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
1004 return read_reg(vpdma, reg_addr);
1006 EXPORT_SYMBOL(vpdma_get_list_mask);
1008 /* clear previously occurred list interrupts in the LIST_STAT register */
1009 void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
1012 u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
1014 write_reg(vpdma, reg_addr, 3 << (list_num * 2));
1016 EXPORT_SYMBOL(vpdma_clear_list_stat);
1018 void vpdma_set_bg_color(struct vpdma_data *vpdma,
1019 struct vpdma_data_format *fmt, u32 color)
1021 if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
1022 write_reg(vpdma, VPDMA_BG_RGB, color);
1023 else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
1024 write_reg(vpdma, VPDMA_BG_YUV, color);
1026 EXPORT_SYMBOL(vpdma_set_bg_color);
1029 * configures the output mode of the line buffer for the given client, the
1030 * line buffer content can either be mirrored(each line repeated twice) or
1031 * passed to the client as is
1033 void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
1034 enum vpdma_channel chan)
1036 int client_cstat = chan_info[chan].cstat_offset;
1038 write_field_reg(vpdma, client_cstat, line_mode,
1039 VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
1041 EXPORT_SYMBOL(vpdma_set_line_mode);
1044 * configures the event which should trigger VPDMA transfer for the given
1047 void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
1048 enum vpdma_frame_start_event fs_event,
1049 enum vpdma_channel chan)
1051 int client_cstat = chan_info[chan].cstat_offset;
1053 write_field_reg(vpdma, client_cstat, fs_event,
1054 VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
1056 EXPORT_SYMBOL(vpdma_set_frame_start_event);
1058 static void vpdma_firmware_cb(const struct firmware *f, void *context)
1060 struct vpdma_data *vpdma = context;
1061 struct vpdma_buf fw_dma_buf;
1064 dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
1066 if (!f || !f->data) {
1067 dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
1071 /* already initialized */
1072 if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1073 VPDMA_LIST_RDY_SHFT)) {
1074 vpdma->cb(vpdma->pdev);
1078 r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
1080 dev_err(&vpdma->pdev->dev,
1081 "failed to allocate dma buffer for firmware\n");
1085 memcpy(fw_dma_buf.addr, f->data, f->size);
1087 vpdma_map_desc_buf(vpdma, &fw_dma_buf);
1089 write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
1091 for (i = 0; i < 100; i++) { /* max 1 second */
1092 msleep_interruptible(10);
1094 if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1095 VPDMA_LIST_RDY_SHFT))
1100 dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
1104 vpdma->cb(vpdma->pdev);
1107 vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
1109 vpdma_free_desc_buf(&fw_dma_buf);
1111 release_firmware(f);
1114 static int vpdma_load_firmware(struct vpdma_data *vpdma)
1117 struct device *dev = &vpdma->pdev->dev;
1119 r = request_firmware_nowait(THIS_MODULE, 1,
1120 (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
1123 dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
1126 dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
1132 int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
1133 void (*cb)(struct platform_device *pdev))
1135 struct resource *res;
1138 dev_dbg(&pdev->dev, "vpdma_create\n");
1142 spin_lock_init(&vpdma->lock);
1144 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
1146 dev_err(&pdev->dev, "missing platform resources data\n");
1150 vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1152 dev_err(&pdev->dev, "failed to ioremap\n");
1156 r = vpdma_load_firmware(vpdma);
1158 pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
1164 EXPORT_SYMBOL(vpdma_create);
1166 MODULE_AUTHOR("Texas Instruments Inc.");
1167 MODULE_FIRMWARE(VPDMA_FIRMWARE);
1168 MODULE_LICENSE("GPL v2");