2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
76 #define XILINX_DMA_REG_DMASR 0x0004
77 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
78 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
79 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
80 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
81 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
82 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
83 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
84 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
85 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
86 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
87 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
88 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
89 #define XILINX_DMA_DMASR_IDLE BIT(1)
90 #define XILINX_DMA_DMASR_HALTED BIT(0)
91 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
92 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
94 #define XILINX_DMA_REG_CURDESC 0x0008
95 #define XILINX_DMA_REG_TAILDESC 0x0010
96 #define XILINX_DMA_REG_REG_INDEX 0x0014
97 #define XILINX_DMA_REG_FRMSTORE 0x0018
98 #define XILINX_DMA_REG_THRESHOLD 0x001c
99 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
100 #define XILINX_DMA_REG_PARK_PTR 0x0028
101 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
102 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
103 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
104 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
105 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
107 /* Register Direct Mode Registers */
108 #define XILINX_DMA_REG_VSIZE 0x0000
109 #define XILINX_DMA_REG_HSIZE 0x0004
111 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
112 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
113 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
115 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
116 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
118 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
119 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
121 /* HW specific definitions */
122 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
124 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
125 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
126 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
127 XILINX_DMA_DMASR_ERR_IRQ)
129 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
130 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
131 XILINX_DMA_DMASR_SOF_LATE_ERR | \
132 XILINX_DMA_DMASR_SG_DEC_ERR | \
133 XILINX_DMA_DMASR_SG_SLV_ERR | \
134 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
135 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
136 XILINX_DMA_DMASR_DMA_DEC_ERR | \
137 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
138 XILINX_DMA_DMASR_DMA_INT_ERR)
141 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
142 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
143 * is enabled in the h/w system.
145 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
146 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
147 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
148 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
149 XILINX_DMA_DMASR_DMA_INT_ERR)
151 /* Axi VDMA Flush on Fsync bits */
152 #define XILINX_DMA_FLUSH_S2MM 3
153 #define XILINX_DMA_FLUSH_MM2S 2
154 #define XILINX_DMA_FLUSH_BOTH 1
156 /* Delay loop counter to prevent hardware failure */
157 #define XILINX_DMA_LOOP_COUNT 1000000
159 /* AXI DMA Specific Registers/Offsets */
160 #define XILINX_DMA_REG_SRCDSTADDR 0x18
161 #define XILINX_DMA_REG_BTT 0x28
163 /* AXI DMA Specific Masks/Bit fields */
164 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
165 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
166 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
167 #define XILINX_DMA_CR_COALESCE_SHIFT 16
168 #define XILINX_DMA_BD_SOP BIT(27)
169 #define XILINX_DMA_BD_EOP BIT(26)
170 #define XILINX_DMA_COALESCE_MAX 255
171 #define XILINX_DMA_NUM_DESCS 255
172 #define XILINX_DMA_NUM_APP_WORDS 5
174 /* Multi-Channel DMA Descriptor offsets*/
175 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
176 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
178 /* Multi-Channel DMA Masks/Shifts */
179 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
180 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
181 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
182 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
183 #define XILINX_DMA_BD_STRIDE_SHIFT 0
184 #define XILINX_DMA_BD_VSIZE_SHIFT 19
186 /* AXI CDMA Specific Registers/Offsets */
187 #define XILINX_CDMA_REG_SRCADDR 0x18
188 #define XILINX_CDMA_REG_DSTADDR 0x20
190 /* AXI CDMA Specific Masks */
191 #define XILINX_CDMA_CR_SGMODE BIT(3)
193 #define xilinx_prep_dma_addr_t(addr) \
194 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
196 * struct xilinx_vdma_desc_hw - Hardware Descriptor
197 * @next_desc: Next Descriptor Pointer @0x00
198 * @pad1: Reserved @0x04
199 * @buf_addr: Buffer address @0x08
200 * @buf_addr_msb: MSB of Buffer address @0x0C
201 * @vsize: Vertical Size @0x10
202 * @hsize: Horizontal Size @0x14
203 * @stride: Number of bytes between the first
204 * pixels of each horizontal line @0x18
206 struct xilinx_vdma_desc_hw {
217 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
218 * @next_desc: Next Descriptor Pointer @0x00
219 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
220 * @buf_addr: Buffer address @0x08
221 * @buf_addr_msb: MSB of Buffer address @0x0C
222 * @mcdma_control: Control field for mcdma @0x10
223 * @vsize_stride: Vsize and Stride field for mcdma @0x14
224 * @control: Control field @0x18
225 * @status: Status field @0x1C
226 * @app: APP Fields @0x20 - 0x30
228 struct xilinx_axidma_desc_hw {
237 u32 app[XILINX_DMA_NUM_APP_WORDS];
241 * struct xilinx_cdma_desc_hw - Hardware Descriptor
242 * @next_desc: Next Descriptor Pointer @0x00
243 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
244 * @src_addr: Source address @0x08
245 * @src_addr_msb: Source address MSB @0x0C
246 * @dest_addr: Destination address @0x10
247 * @dest_addr_msb: Destination address MSB @0x14
248 * @control: Control field @0x18
249 * @status: Status field @0x1C
251 struct xilinx_cdma_desc_hw {
263 * struct xilinx_vdma_tx_segment - Descriptor segment
264 * @hw: Hardware descriptor
265 * @node: Node in the descriptor segments list
266 * @phys: Physical address of segment
268 struct xilinx_vdma_tx_segment {
269 struct xilinx_vdma_desc_hw hw;
270 struct list_head node;
275 * struct xilinx_axidma_tx_segment - Descriptor segment
276 * @hw: Hardware descriptor
277 * @node: Node in the descriptor segments list
278 * @phys: Physical address of segment
280 struct xilinx_axidma_tx_segment {
281 struct xilinx_axidma_desc_hw hw;
282 struct list_head node;
287 * struct xilinx_cdma_tx_segment - Descriptor segment
288 * @hw: Hardware descriptor
289 * @node: Node in the descriptor segments list
290 * @phys: Physical address of segment
292 struct xilinx_cdma_tx_segment {
293 struct xilinx_cdma_desc_hw hw;
294 struct list_head node;
299 * struct xilinx_dma_tx_descriptor - Per Transaction structure
300 * @async_tx: Async transaction descriptor
301 * @segments: TX segments list
302 * @node: Node in the channel descriptors list
303 * @cyclic: Check for cyclic transfers.
305 struct xilinx_dma_tx_descriptor {
306 struct dma_async_tx_descriptor async_tx;
307 struct list_head segments;
308 struct list_head node;
313 * struct xilinx_dma_chan - Driver specific DMA channel structure
314 * @xdev: Driver specific device structure
315 * @ctrl_offset: Control registers offset
316 * @desc_offset: TX descriptor registers offset
317 * @lock: Descriptor operation lock
318 * @pending_list: Descriptors waiting
319 * @active_list: Descriptors ready to submit
320 * @done_list: Complete descriptors
321 * @free_seg_list: Free descriptors
322 * @common: DMA common channel
323 * @desc_pool: Descriptors pool
324 * @dev: The dma device
327 * @direction: Transfer direction
328 * @num_frms: Number of frames
329 * @has_sg: Support scatter transfers
330 * @cyclic: Check for cyclic transfers.
331 * @genlock: Support genlock mode
332 * @err: Channel has errors
333 * @idle: Check for channel idle
334 * @tasklet: Cleanup work after irq
335 * @config: Device configuration info
336 * @flush_on_fsync: Flush on Frame sync
337 * @desc_pendingcount: Descriptor pending count
338 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
339 * @desc_submitcount: Descriptor h/w submitted count
340 * @residue: Residue for AXI DMA
341 * @seg_v: Statically allocated segments base
342 * @seg_p: Physical allocated segments base
343 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
344 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
345 * @start_transfer: Differentiate b/w DMA IP's transfer
346 * @stop_transfer: Differentiate b/w DMA IP's quiesce
347 * @tdest: TDEST value for mcdma
348 * @has_vflip: S2MM vertical flip
350 struct xilinx_dma_chan {
351 struct xilinx_dma_device *xdev;
355 struct list_head pending_list;
356 struct list_head active_list;
357 struct list_head done_list;
358 struct list_head free_seg_list;
359 struct dma_chan common;
360 struct dma_pool *desc_pool;
364 enum dma_transfer_direction direction;
371 struct tasklet_struct tasklet;
372 struct xilinx_vdma_config config;
374 u32 desc_pendingcount;
376 u32 desc_submitcount;
378 struct xilinx_axidma_tx_segment *seg_v;
380 struct xilinx_axidma_tx_segment *cyclic_seg_v;
381 dma_addr_t cyclic_seg_p;
382 void (*start_transfer)(struct xilinx_dma_chan *chan);
383 int (*stop_transfer)(struct xilinx_dma_chan *chan);
389 * enum xdma_ip_type - DMA IP type.
391 * @XDMA_TYPE_AXIDMA: Axi dma ip.
392 * @XDMA_TYPE_CDMA: Axi cdma ip.
393 * @XDMA_TYPE_VDMA: Axi vdma ip.
397 XDMA_TYPE_AXIDMA = 0,
402 struct xilinx_dma_config {
403 enum xdma_ip_type dmatype;
404 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
405 struct clk **tx_clk, struct clk **txs_clk,
406 struct clk **rx_clk, struct clk **rxs_clk);
410 * struct xilinx_dma_device - DMA device structure
411 * @regs: I/O mapped base address
412 * @dev: Device Structure
413 * @common: DMA device structure
414 * @chan: Driver specific DMA channel
415 * @has_sg: Specifies whether Scatter-Gather is present or not
416 * @mcdma: Specifies whether Multi-Channel is present or not
417 * @flush_on_fsync: Flush on frame sync
418 * @ext_addr: Indicates 64 bit addressing is supported by dma device
419 * @pdev: Platform device structure pointer
420 * @dma_config: DMA config structure
421 * @axi_clk: DMA Axi4-lite interace clock
422 * @tx_clk: DMA mm2s clock
423 * @txs_clk: DMA mm2s stream clock
424 * @rx_clk: DMA s2mm clock
425 * @rxs_clk: DMA s2mm stream clock
426 * @nr_channels: Number of channels DMA device supports
427 * @chan_id: DMA channel identifier
429 struct xilinx_dma_device {
432 struct dma_device common;
433 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
438 struct platform_device *pdev;
439 const struct xilinx_dma_config *dma_config;
450 #define to_xilinx_chan(chan) \
451 container_of(chan, struct xilinx_dma_chan, common)
452 #define to_dma_tx_descriptor(tx) \
453 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
454 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
455 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
456 cond, delay_us, timeout_us)
459 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
461 return ioread32(chan->xdev->regs + reg);
464 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
466 iowrite32(value, chan->xdev->regs + reg);
469 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
472 dma_write(chan, chan->desc_offset + reg, value);
475 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
477 return dma_read(chan, chan->ctrl_offset + reg);
480 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
483 dma_write(chan, chan->ctrl_offset + reg, value);
486 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
489 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
492 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
495 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
499 * vdma_desc_write_64 - 64-bit descriptor write
500 * @chan: Driver specific VDMA channel
501 * @reg: Register to write
502 * @value_lsb: lower address of the descriptor.
503 * @value_msb: upper address of the descriptor.
505 * Since vdma driver is trying to write to a register offset which is not a
506 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
507 * instead of a single 64 bit register write.
509 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
510 u32 value_lsb, u32 value_msb)
512 /* Write the lsb 32 bits*/
513 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
515 /* Write the msb 32 bits */
516 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
519 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
521 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
524 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
528 dma_writeq(chan, reg, addr);
530 dma_ctrl_write(chan, reg, addr);
533 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
534 struct xilinx_axidma_desc_hw *hw,
535 dma_addr_t buf_addr, size_t sg_used,
538 if (chan->ext_addr) {
539 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
540 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
543 hw->buf_addr = buf_addr + sg_used + period_len;
547 /* -----------------------------------------------------------------------------
548 * Descriptors and segments alloc and free
552 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
553 * @chan: Driver specific DMA channel
555 * Return: The allocated segment on success and NULL on failure.
557 static struct xilinx_vdma_tx_segment *
558 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
560 struct xilinx_vdma_tx_segment *segment;
563 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
567 segment->phys = phys;
573 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
574 * @chan: Driver specific DMA channel
576 * Return: The allocated segment on success and NULL on failure.
578 static struct xilinx_cdma_tx_segment *
579 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
581 struct xilinx_cdma_tx_segment *segment;
584 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
588 segment->phys = phys;
594 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
595 * @chan: Driver specific DMA channel
597 * Return: The allocated segment on success and NULL on failure.
599 static struct xilinx_axidma_tx_segment *
600 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
602 struct xilinx_axidma_tx_segment *segment = NULL;
605 spin_lock_irqsave(&chan->lock, flags);
606 if (!list_empty(&chan->free_seg_list)) {
607 segment = list_first_entry(&chan->free_seg_list,
608 struct xilinx_axidma_tx_segment,
610 list_del(&segment->node);
612 spin_unlock_irqrestore(&chan->lock, flags);
617 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
619 u32 next_desc = hw->next_desc;
620 u32 next_desc_msb = hw->next_desc_msb;
622 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
624 hw->next_desc = next_desc;
625 hw->next_desc_msb = next_desc_msb;
629 * xilinx_dma_free_tx_segment - Free transaction segment
630 * @chan: Driver specific DMA channel
631 * @segment: DMA transaction segment
633 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
634 struct xilinx_axidma_tx_segment *segment)
636 xilinx_dma_clean_hw_desc(&segment->hw);
638 list_add_tail(&segment->node, &chan->free_seg_list);
642 * xilinx_cdma_free_tx_segment - Free transaction segment
643 * @chan: Driver specific DMA channel
644 * @segment: DMA transaction segment
646 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
647 struct xilinx_cdma_tx_segment *segment)
649 dma_pool_free(chan->desc_pool, segment, segment->phys);
653 * xilinx_vdma_free_tx_segment - Free transaction segment
654 * @chan: Driver specific DMA channel
655 * @segment: DMA transaction segment
657 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
658 struct xilinx_vdma_tx_segment *segment)
660 dma_pool_free(chan->desc_pool, segment, segment->phys);
664 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
665 * @chan: Driver specific DMA channel
667 * Return: The allocated descriptor on success and NULL on failure.
669 static struct xilinx_dma_tx_descriptor *
670 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
672 struct xilinx_dma_tx_descriptor *desc;
674 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
678 INIT_LIST_HEAD(&desc->segments);
684 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
685 * @chan: Driver specific DMA channel
686 * @desc: DMA transaction descriptor
689 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
690 struct xilinx_dma_tx_descriptor *desc)
692 struct xilinx_vdma_tx_segment *segment, *next;
693 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
694 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
699 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
700 list_for_each_entry_safe(segment, next, &desc->segments, node) {
701 list_del(&segment->node);
702 xilinx_vdma_free_tx_segment(chan, segment);
704 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
705 list_for_each_entry_safe(cdma_segment, cdma_next,
706 &desc->segments, node) {
707 list_del(&cdma_segment->node);
708 xilinx_cdma_free_tx_segment(chan, cdma_segment);
711 list_for_each_entry_safe(axidma_segment, axidma_next,
712 &desc->segments, node) {
713 list_del(&axidma_segment->node);
714 xilinx_dma_free_tx_segment(chan, axidma_segment);
721 /* Required functions */
724 * xilinx_dma_free_desc_list - Free descriptors list
725 * @chan: Driver specific DMA channel
726 * @list: List to parse and delete the descriptor
728 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
729 struct list_head *list)
731 struct xilinx_dma_tx_descriptor *desc, *next;
733 list_for_each_entry_safe(desc, next, list, node) {
734 list_del(&desc->node);
735 xilinx_dma_free_tx_descriptor(chan, desc);
740 * xilinx_dma_free_descriptors - Free channel descriptors
741 * @chan: Driver specific DMA channel
743 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
747 spin_lock_irqsave(&chan->lock, flags);
749 xilinx_dma_free_desc_list(chan, &chan->pending_list);
750 xilinx_dma_free_desc_list(chan, &chan->done_list);
751 xilinx_dma_free_desc_list(chan, &chan->active_list);
753 spin_unlock_irqrestore(&chan->lock, flags);
757 * xilinx_dma_free_chan_resources - Free channel resources
758 * @dchan: DMA channel
760 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
762 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
765 dev_dbg(chan->dev, "Free all channel resources.\n");
767 xilinx_dma_free_descriptors(chan);
769 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
770 spin_lock_irqsave(&chan->lock, flags);
771 INIT_LIST_HEAD(&chan->free_seg_list);
772 spin_unlock_irqrestore(&chan->lock, flags);
774 /* Free memory that is allocated for BD */
775 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
776 XILINX_DMA_NUM_DESCS, chan->seg_v,
779 /* Free Memory that is allocated for cyclic DMA Mode */
780 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
781 chan->cyclic_seg_v, chan->cyclic_seg_p);
784 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
785 dma_pool_destroy(chan->desc_pool);
786 chan->desc_pool = NULL;
791 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
792 * @chan: Driver specific dma channel
793 * @desc: dma transaction descriptor
794 * @flags: flags for spin lock
796 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
797 struct xilinx_dma_tx_descriptor *desc,
798 unsigned long *flags)
800 dma_async_tx_callback callback;
801 void *callback_param;
803 callback = desc->async_tx.callback;
804 callback_param = desc->async_tx.callback_param;
806 spin_unlock_irqrestore(&chan->lock, *flags);
807 callback(callback_param);
808 spin_lock_irqsave(&chan->lock, *flags);
813 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
814 * @chan: Driver specific DMA channel
816 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
818 struct xilinx_dma_tx_descriptor *desc, *next;
821 spin_lock_irqsave(&chan->lock, flags);
823 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
824 struct dmaengine_desc_callback cb;
827 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
831 /* Remove from the list of running transactions */
832 list_del(&desc->node);
834 /* Run the link descriptor callback function */
835 dmaengine_desc_get_callback(&desc->async_tx, &cb);
836 if (dmaengine_desc_callback_valid(&cb)) {
837 spin_unlock_irqrestore(&chan->lock, flags);
838 dmaengine_desc_callback_invoke(&cb, NULL);
839 spin_lock_irqsave(&chan->lock, flags);
842 /* Run any dependencies, then free the descriptor */
843 dma_run_dependencies(&desc->async_tx);
844 xilinx_dma_free_tx_descriptor(chan, desc);
847 spin_unlock_irqrestore(&chan->lock, flags);
851 * xilinx_dma_do_tasklet - Schedule completion tasklet
852 * @data: Pointer to the Xilinx DMA channel structure
854 static void xilinx_dma_do_tasklet(unsigned long data)
856 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
858 xilinx_dma_chan_desc_cleanup(chan);
862 * xilinx_dma_alloc_chan_resources - Allocate channel resources
863 * @dchan: DMA channel
865 * Return: '0' on success and failure value on error
867 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
869 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
872 /* Has this channel already been allocated? */
877 * We need the descriptor to be aligned to 64bytes
878 * for meeting Xilinx VDMA specification requirement.
880 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
881 /* Allocate the buffer descriptors. */
882 chan->seg_v = dma_alloc_coherent(chan->dev,
883 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
884 &chan->seg_p, GFP_KERNEL);
887 "unable to allocate channel %d descriptors\n",
892 * For cyclic DMA mode we need to program the tail Descriptor
893 * register with a value which is not a part of the BD chain
894 * so allocating a desc segment during channel allocation for
895 * programming tail descriptor.
897 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
898 sizeof(*chan->cyclic_seg_v),
901 if (!chan->cyclic_seg_v) {
903 "unable to allocate desc segment for cyclic DMA\n");
904 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
905 XILINX_DMA_NUM_DESCS, chan->seg_v,
909 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
911 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
912 chan->seg_v[i].hw.next_desc =
913 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
914 ((i + 1) % XILINX_DMA_NUM_DESCS));
915 chan->seg_v[i].hw.next_desc_msb =
916 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
917 ((i + 1) % XILINX_DMA_NUM_DESCS));
918 chan->seg_v[i].phys = chan->seg_p +
919 sizeof(*chan->seg_v) * i;
920 list_add_tail(&chan->seg_v[i].node,
921 &chan->free_seg_list);
923 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
924 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
926 sizeof(struct xilinx_cdma_tx_segment),
927 __alignof__(struct xilinx_cdma_tx_segment),
930 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
932 sizeof(struct xilinx_vdma_tx_segment),
933 __alignof__(struct xilinx_vdma_tx_segment),
937 if (!chan->desc_pool &&
938 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
940 "unable to allocate channel %d descriptor pool\n",
945 dma_cookie_init(dchan);
947 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
948 /* For AXI DMA resetting once channel will reset the
949 * other channel as well so enable the interrupts here.
951 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
952 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
955 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
956 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
957 XILINX_CDMA_CR_SGMODE);
963 * xilinx_dma_tx_status - Get DMA transaction status
964 * @dchan: DMA channel
965 * @cookie: Transaction identifier
966 * @txstate: Transaction state
968 * Return: DMA transaction status
970 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
972 struct dma_tx_state *txstate)
974 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
975 struct xilinx_dma_tx_descriptor *desc;
976 struct xilinx_axidma_tx_segment *segment;
977 struct xilinx_axidma_desc_hw *hw;
982 ret = dma_cookie_status(dchan, cookie, txstate);
983 if (ret == DMA_COMPLETE || !txstate)
986 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
987 spin_lock_irqsave(&chan->lock, flags);
989 desc = list_last_entry(&chan->active_list,
990 struct xilinx_dma_tx_descriptor, node);
992 list_for_each_entry(segment, &desc->segments, node) {
994 residue += (hw->control - hw->status) &
995 XILINX_DMA_MAX_TRANS_LEN;
998 spin_unlock_irqrestore(&chan->lock, flags);
1000 chan->residue = residue;
1001 dma_set_residue(txstate, chan->residue);
1008 * xilinx_dma_stop_transfer - Halt DMA channel
1009 * @chan: Driver specific DMA channel
1011 * Return: '0' on success and failure value on error
1013 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1017 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1019 /* Wait for the hardware to halt */
1020 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1021 val & XILINX_DMA_DMASR_HALTED, 0,
1022 XILINX_DMA_LOOP_COUNT);
1026 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1027 * @chan: Driver specific DMA channel
1029 * Return: '0' on success and failure value on error
1031 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1035 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1036 val & XILINX_DMA_DMASR_IDLE, 0,
1037 XILINX_DMA_LOOP_COUNT);
1041 * xilinx_dma_start - Start DMA channel
1042 * @chan: Driver specific DMA channel
1044 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1049 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1051 /* Wait for the hardware to start */
1052 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1053 !(val & XILINX_DMA_DMASR_HALTED), 0,
1054 XILINX_DMA_LOOP_COUNT);
1057 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1058 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1065 * xilinx_vdma_start_transfer - Starts VDMA transfer
1066 * @chan: Driver specific channel struct pointer
1068 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1070 struct xilinx_vdma_config *config = &chan->config;
1071 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1073 struct xilinx_vdma_tx_segment *tail_segment;
1075 /* This function was invoked with lock held */
1082 if (list_empty(&chan->pending_list))
1085 desc = list_first_entry(&chan->pending_list,
1086 struct xilinx_dma_tx_descriptor, node);
1087 tail_desc = list_last_entry(&chan->pending_list,
1088 struct xilinx_dma_tx_descriptor, node);
1090 tail_segment = list_last_entry(&tail_desc->segments,
1091 struct xilinx_vdma_tx_segment, node);
1094 * If hardware is idle, then all descriptors on the running lists are
1095 * done, start new transfers
1098 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1099 desc->async_tx.phys);
1101 /* Configure the hardware using info in the config structure */
1102 if (chan->has_vflip) {
1103 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1104 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1105 reg |= config->vflip_en;
1106 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1110 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1112 if (config->frm_cnt_en)
1113 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1115 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1118 * With SG, start with circular mode, so that BDs can be fetched.
1119 * In direct register mode, if not parking, enable circular mode
1121 if (chan->has_sg || !config->park)
1122 reg |= XILINX_DMA_DMACR_CIRC_EN;
1125 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1127 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1129 j = chan->desc_submitcount;
1130 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1131 if (chan->direction == DMA_MEM_TO_DEV) {
1132 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1133 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1135 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1136 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1138 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1140 /* Start the hardware */
1141 xilinx_dma_start(chan);
1146 /* Start the transfer */
1148 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1149 tail_segment->phys);
1150 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1151 chan->desc_pendingcount = 0;
1153 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1156 if (chan->desc_submitcount < chan->num_frms)
1157 i = chan->desc_submitcount;
1159 list_for_each_entry(segment, &desc->segments, node) {
1161 vdma_desc_write_64(chan,
1162 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1163 segment->hw.buf_addr,
1164 segment->hw.buf_addr_msb);
1166 vdma_desc_write(chan,
1167 XILINX_VDMA_REG_START_ADDRESS(i++),
1168 segment->hw.buf_addr);
1176 /* HW expects these parameters to be same for one transaction */
1177 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1178 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1180 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1182 chan->desc_submitcount++;
1183 chan->desc_pendingcount--;
1184 list_del(&desc->node);
1185 list_add_tail(&desc->node, &chan->active_list);
1186 if (chan->desc_submitcount == chan->num_frms)
1187 chan->desc_submitcount = 0;
1194 * xilinx_cdma_start_transfer - Starts cdma transfer
1195 * @chan: Driver specific channel struct pointer
1197 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1199 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1200 struct xilinx_cdma_tx_segment *tail_segment;
1201 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1209 if (list_empty(&chan->pending_list))
1212 head_desc = list_first_entry(&chan->pending_list,
1213 struct xilinx_dma_tx_descriptor, node);
1214 tail_desc = list_last_entry(&chan->pending_list,
1215 struct xilinx_dma_tx_descriptor, node);
1216 tail_segment = list_last_entry(&tail_desc->segments,
1217 struct xilinx_cdma_tx_segment, node);
1219 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1220 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1221 ctrl_reg |= chan->desc_pendingcount <<
1222 XILINX_DMA_CR_COALESCE_SHIFT;
1223 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1227 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1228 XILINX_CDMA_CR_SGMODE);
1230 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1231 XILINX_CDMA_CR_SGMODE);
1233 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1234 head_desc->async_tx.phys);
1236 /* Update tail ptr register which will start the transfer */
1237 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1238 tail_segment->phys);
1240 /* In simple mode */
1241 struct xilinx_cdma_tx_segment *segment;
1242 struct xilinx_cdma_desc_hw *hw;
1244 segment = list_first_entry(&head_desc->segments,
1245 struct xilinx_cdma_tx_segment,
1250 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1251 xilinx_prep_dma_addr_t(hw->src_addr));
1252 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1253 xilinx_prep_dma_addr_t(hw->dest_addr));
1255 /* Start the transfer */
1256 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1257 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1260 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1261 chan->desc_pendingcount = 0;
1266 * xilinx_dma_start_transfer - Starts DMA transfer
1267 * @chan: Driver specific channel struct pointer
1269 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1271 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1272 struct xilinx_axidma_tx_segment *tail_segment;
1278 if (list_empty(&chan->pending_list))
1284 head_desc = list_first_entry(&chan->pending_list,
1285 struct xilinx_dma_tx_descriptor, node);
1286 tail_desc = list_last_entry(&chan->pending_list,
1287 struct xilinx_dma_tx_descriptor, node);
1288 tail_segment = list_last_entry(&tail_desc->segments,
1289 struct xilinx_axidma_tx_segment, node);
1291 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1293 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1294 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1295 reg |= chan->desc_pendingcount <<
1296 XILINX_DMA_CR_COALESCE_SHIFT;
1297 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1300 if (chan->has_sg && !chan->xdev->mcdma)
1301 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1302 head_desc->async_tx.phys);
1304 if (chan->has_sg && chan->xdev->mcdma) {
1305 if (chan->direction == DMA_MEM_TO_DEV) {
1306 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1307 head_desc->async_tx.phys);
1310 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1311 head_desc->async_tx.phys);
1313 dma_ctrl_write(chan,
1314 XILINX_DMA_MCRX_CDESC(chan->tdest),
1315 head_desc->async_tx.phys);
1320 xilinx_dma_start(chan);
1325 /* Start the transfer */
1326 if (chan->has_sg && !chan->xdev->mcdma) {
1328 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1329 chan->cyclic_seg_v->phys);
1331 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1332 tail_segment->phys);
1333 } else if (chan->has_sg && chan->xdev->mcdma) {
1334 if (chan->direction == DMA_MEM_TO_DEV) {
1335 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1336 tail_segment->phys);
1339 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1340 tail_segment->phys);
1342 dma_ctrl_write(chan,
1343 XILINX_DMA_MCRX_TDESC(chan->tdest),
1344 tail_segment->phys);
1348 struct xilinx_axidma_tx_segment *segment;
1349 struct xilinx_axidma_desc_hw *hw;
1351 segment = list_first_entry(&head_desc->segments,
1352 struct xilinx_axidma_tx_segment,
1356 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1358 /* Start the transfer */
1359 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1360 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1363 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1364 chan->desc_pendingcount = 0;
1369 * xilinx_dma_issue_pending - Issue pending transactions
1370 * @dchan: DMA channel
1372 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1374 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1375 unsigned long flags;
1377 spin_lock_irqsave(&chan->lock, flags);
1378 chan->start_transfer(chan);
1379 spin_unlock_irqrestore(&chan->lock, flags);
1383 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1384 * @chan : xilinx DMA channel
1388 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1390 struct xilinx_dma_tx_descriptor *desc, *next;
1392 /* This function was invoked with lock held */
1393 if (list_empty(&chan->active_list))
1396 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1397 list_del(&desc->node);
1399 dma_cookie_complete(&desc->async_tx);
1400 list_add_tail(&desc->node, &chan->done_list);
1405 * xilinx_dma_reset - Reset DMA channel
1406 * @chan: Driver specific DMA channel
1408 * Return: '0' on success and failure value on error
1410 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1415 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1417 /* Wait for the hardware to finish reset */
1418 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1419 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1420 XILINX_DMA_LOOP_COUNT);
1423 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1424 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1425 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1431 chan->desc_submitcount = 0;
1437 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1438 * @chan: Driver specific DMA channel
1440 * Return: '0' on success and failure value on error
1442 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1447 err = xilinx_dma_reset(chan);
1451 /* Enable interrupts */
1452 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1453 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1459 * xilinx_dma_irq_handler - DMA Interrupt handler
1461 * @data: Pointer to the Xilinx DMA channel structure
1463 * Return: IRQ_HANDLED/IRQ_NONE
1465 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1467 struct xilinx_dma_chan *chan = data;
1470 /* Read the status and ack the interrupts. */
1471 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1472 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1475 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1476 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1478 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1480 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1481 * error is recoverable, ignore it. Otherwise flag the error.
1483 * Only recoverable errors can be cleared in the DMASR register,
1484 * make sure not to write to other error bits to 1.
1486 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1488 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1489 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1491 if (!chan->flush_on_fsync ||
1492 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1494 "Channel %p has errors %x, cdr %x tdr %x\n",
1496 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1497 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1502 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1504 * Device takes too long to do the transfer when user requires
1507 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1510 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1511 spin_lock(&chan->lock);
1512 xilinx_dma_complete_descriptor(chan);
1514 chan->start_transfer(chan);
1515 spin_unlock(&chan->lock);
1518 tasklet_schedule(&chan->tasklet);
1523 * append_desc_queue - Queuing descriptor
1524 * @chan: Driver specific dma channel
1525 * @desc: dma transaction descriptor
1527 static void append_desc_queue(struct xilinx_dma_chan *chan,
1528 struct xilinx_dma_tx_descriptor *desc)
1530 struct xilinx_vdma_tx_segment *tail_segment;
1531 struct xilinx_dma_tx_descriptor *tail_desc;
1532 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1533 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1535 if (list_empty(&chan->pending_list))
1539 * Add the hardware descriptor to the chain of hardware descriptors
1540 * that already exists in memory.
1542 tail_desc = list_last_entry(&chan->pending_list,
1543 struct xilinx_dma_tx_descriptor, node);
1544 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1545 tail_segment = list_last_entry(&tail_desc->segments,
1546 struct xilinx_vdma_tx_segment,
1548 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1549 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1550 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1551 struct xilinx_cdma_tx_segment,
1553 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1555 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1556 struct xilinx_axidma_tx_segment,
1558 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1562 * Add the software descriptor and all children to the list
1563 * of pending transactions
1566 list_add_tail(&desc->node, &chan->pending_list);
1567 chan->desc_pendingcount++;
1569 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1570 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1571 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1572 chan->desc_pendingcount = chan->num_frms;
1577 * xilinx_dma_tx_submit - Submit DMA transaction
1578 * @tx: Async transaction descriptor
1580 * Return: cookie value on success and failure value on error
1582 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1584 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1585 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1586 dma_cookie_t cookie;
1587 unsigned long flags;
1591 xilinx_dma_free_tx_descriptor(chan, desc);
1597 * If reset fails, need to hard reset the system.
1598 * Channel is no longer functional
1600 err = xilinx_dma_chan_reset(chan);
1605 spin_lock_irqsave(&chan->lock, flags);
1607 cookie = dma_cookie_assign(tx);
1609 /* Put this transaction onto the tail of the pending queue */
1610 append_desc_queue(chan, desc);
1613 chan->cyclic = true;
1615 spin_unlock_irqrestore(&chan->lock, flags);
1621 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1622 * DMA_SLAVE transaction
1623 * @dchan: DMA channel
1624 * @xt: Interleaved template pointer
1625 * @flags: transfer ack flags
1627 * Return: Async transaction descriptor on success and NULL on failure
1629 static struct dma_async_tx_descriptor *
1630 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1631 struct dma_interleaved_template *xt,
1632 unsigned long flags)
1634 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1635 struct xilinx_dma_tx_descriptor *desc;
1636 struct xilinx_vdma_tx_segment *segment;
1637 struct xilinx_vdma_desc_hw *hw;
1639 if (!is_slave_direction(xt->dir))
1642 if (!xt->numf || !xt->sgl[0].size)
1645 if (xt->frame_size != 1)
1648 /* Allocate a transaction descriptor. */
1649 desc = xilinx_dma_alloc_tx_descriptor(chan);
1653 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1654 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1655 async_tx_ack(&desc->async_tx);
1657 /* Allocate the link descriptor from DMA pool */
1658 segment = xilinx_vdma_alloc_tx_segment(chan);
1662 /* Fill in the hardware descriptor */
1664 hw->vsize = xt->numf;
1665 hw->hsize = xt->sgl[0].size;
1666 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1667 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1668 hw->stride |= chan->config.frm_dly <<
1669 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1671 if (xt->dir != DMA_MEM_TO_DEV) {
1672 if (chan->ext_addr) {
1673 hw->buf_addr = lower_32_bits(xt->dst_start);
1674 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1676 hw->buf_addr = xt->dst_start;
1679 if (chan->ext_addr) {
1680 hw->buf_addr = lower_32_bits(xt->src_start);
1681 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1683 hw->buf_addr = xt->src_start;
1687 /* Insert the segment into the descriptor segments list. */
1688 list_add_tail(&segment->node, &desc->segments);
1690 /* Link the last hardware descriptor with the first. */
1691 segment = list_first_entry(&desc->segments,
1692 struct xilinx_vdma_tx_segment, node);
1693 desc->async_tx.phys = segment->phys;
1695 return &desc->async_tx;
1698 xilinx_dma_free_tx_descriptor(chan, desc);
1703 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1704 * @dchan: DMA channel
1705 * @dma_dst: destination address
1706 * @dma_src: source address
1707 * @len: transfer length
1708 * @flags: transfer ack flags
1710 * Return: Async transaction descriptor on success and NULL on failure
1712 static struct dma_async_tx_descriptor *
1713 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1714 dma_addr_t dma_src, size_t len, unsigned long flags)
1716 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1717 struct xilinx_dma_tx_descriptor *desc;
1718 struct xilinx_cdma_tx_segment *segment;
1719 struct xilinx_cdma_desc_hw *hw;
1721 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1724 desc = xilinx_dma_alloc_tx_descriptor(chan);
1728 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1729 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1731 /* Allocate the link descriptor from DMA pool */
1732 segment = xilinx_cdma_alloc_tx_segment(chan);
1738 hw->src_addr = dma_src;
1739 hw->dest_addr = dma_dst;
1740 if (chan->ext_addr) {
1741 hw->src_addr_msb = upper_32_bits(dma_src);
1742 hw->dest_addr_msb = upper_32_bits(dma_dst);
1745 /* Insert the segment into the descriptor segments list. */
1746 list_add_tail(&segment->node, &desc->segments);
1748 desc->async_tx.phys = segment->phys;
1749 hw->next_desc = segment->phys;
1751 return &desc->async_tx;
1754 xilinx_dma_free_tx_descriptor(chan, desc);
1759 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1760 * @dchan: DMA channel
1761 * @sgl: scatterlist to transfer to/from
1762 * @sg_len: number of entries in @scatterlist
1763 * @direction: DMA direction
1764 * @flags: transfer ack flags
1765 * @context: APP words of the descriptor
1767 * Return: Async transaction descriptor on success and NULL on failure
1769 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1770 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1771 enum dma_transfer_direction direction, unsigned long flags,
1774 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1775 struct xilinx_dma_tx_descriptor *desc;
1776 struct xilinx_axidma_tx_segment *segment = NULL;
1777 u32 *app_w = (u32 *)context;
1778 struct scatterlist *sg;
1783 if (!is_slave_direction(direction))
1786 /* Allocate a transaction descriptor. */
1787 desc = xilinx_dma_alloc_tx_descriptor(chan);
1791 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1792 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1794 /* Build transactions using information in the scatter gather list */
1795 for_each_sg(sgl, sg, sg_len, i) {
1798 /* Loop until the entire scatterlist entry is used */
1799 while (sg_used < sg_dma_len(sg)) {
1800 struct xilinx_axidma_desc_hw *hw;
1802 /* Get a free segment */
1803 segment = xilinx_axidma_alloc_tx_segment(chan);
1808 * Calculate the maximum number of bytes to transfer,
1809 * making sure it is less than the hw limit
1811 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1812 XILINX_DMA_MAX_TRANS_LEN);
1815 /* Fill in the descriptor */
1816 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1821 if (chan->direction == DMA_MEM_TO_DEV) {
1823 memcpy(hw->app, app_w, sizeof(u32) *
1824 XILINX_DMA_NUM_APP_WORDS);
1830 * Insert the segment into the descriptor segments
1833 list_add_tail(&segment->node, &desc->segments);
1837 segment = list_first_entry(&desc->segments,
1838 struct xilinx_axidma_tx_segment, node);
1839 desc->async_tx.phys = segment->phys;
1841 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1842 if (chan->direction == DMA_MEM_TO_DEV) {
1843 segment->hw.control |= XILINX_DMA_BD_SOP;
1844 segment = list_last_entry(&desc->segments,
1845 struct xilinx_axidma_tx_segment,
1847 segment->hw.control |= XILINX_DMA_BD_EOP;
1850 return &desc->async_tx;
1853 xilinx_dma_free_tx_descriptor(chan, desc);
1858 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1859 * @dchan: DMA channel
1860 * @buf_addr: Physical address of the buffer
1861 * @buf_len: Total length of the cyclic buffers
1862 * @period_len: length of individual cyclic buffer
1863 * @direction: DMA direction
1864 * @flags: transfer ack flags
1866 * Return: Async transaction descriptor on success and NULL on failure
1868 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1869 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1870 size_t period_len, enum dma_transfer_direction direction,
1871 unsigned long flags)
1873 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1874 struct xilinx_dma_tx_descriptor *desc;
1875 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1876 size_t copy, sg_used;
1877 unsigned int num_periods;
1884 num_periods = buf_len / period_len;
1889 if (!is_slave_direction(direction))
1892 /* Allocate a transaction descriptor. */
1893 desc = xilinx_dma_alloc_tx_descriptor(chan);
1897 chan->direction = direction;
1898 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1899 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1901 for (i = 0; i < num_periods; ++i) {
1904 while (sg_used < period_len) {
1905 struct xilinx_axidma_desc_hw *hw;
1907 /* Get a free segment */
1908 segment = xilinx_axidma_alloc_tx_segment(chan);
1913 * Calculate the maximum number of bytes to transfer,
1914 * making sure it is less than the hw limit
1916 copy = min_t(size_t, period_len - sg_used,
1917 XILINX_DMA_MAX_TRANS_LEN);
1919 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1924 prev->hw.next_desc = segment->phys;
1930 * Insert the segment into the descriptor segments
1933 list_add_tail(&segment->node, &desc->segments);
1937 head_segment = list_first_entry(&desc->segments,
1938 struct xilinx_axidma_tx_segment, node);
1939 desc->async_tx.phys = head_segment->phys;
1941 desc->cyclic = true;
1942 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1943 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1944 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1946 segment = list_last_entry(&desc->segments,
1947 struct xilinx_axidma_tx_segment,
1949 segment->hw.next_desc = (u32) head_segment->phys;
1951 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1952 if (direction == DMA_MEM_TO_DEV) {
1953 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1954 segment->hw.control |= XILINX_DMA_BD_EOP;
1957 return &desc->async_tx;
1960 xilinx_dma_free_tx_descriptor(chan, desc);
1965 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1966 * DMA_SLAVE transaction
1967 * @dchan: DMA channel
1968 * @xt: Interleaved template pointer
1969 * @flags: transfer ack flags
1971 * Return: Async transaction descriptor on success and NULL on failure
1973 static struct dma_async_tx_descriptor *
1974 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1975 struct dma_interleaved_template *xt,
1976 unsigned long flags)
1978 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1979 struct xilinx_dma_tx_descriptor *desc;
1980 struct xilinx_axidma_tx_segment *segment;
1981 struct xilinx_axidma_desc_hw *hw;
1983 if (!is_slave_direction(xt->dir))
1986 if (!xt->numf || !xt->sgl[0].size)
1989 if (xt->frame_size != 1)
1992 /* Allocate a transaction descriptor. */
1993 desc = xilinx_dma_alloc_tx_descriptor(chan);
1997 chan->direction = xt->dir;
1998 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1999 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2001 /* Get a free segment */
2002 segment = xilinx_axidma_alloc_tx_segment(chan);
2008 /* Fill in the descriptor */
2009 if (xt->dir != DMA_MEM_TO_DEV)
2010 hw->buf_addr = xt->dst_start;
2012 hw->buf_addr = xt->src_start;
2014 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2015 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2016 XILINX_DMA_BD_VSIZE_MASK;
2017 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2018 XILINX_DMA_BD_STRIDE_MASK;
2019 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2022 * Insert the segment into the descriptor segments
2025 list_add_tail(&segment->node, &desc->segments);
2028 segment = list_first_entry(&desc->segments,
2029 struct xilinx_axidma_tx_segment, node);
2030 desc->async_tx.phys = segment->phys;
2032 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2033 if (xt->dir == DMA_MEM_TO_DEV) {
2034 segment->hw.control |= XILINX_DMA_BD_SOP;
2035 segment = list_last_entry(&desc->segments,
2036 struct xilinx_axidma_tx_segment,
2038 segment->hw.control |= XILINX_DMA_BD_EOP;
2041 return &desc->async_tx;
2044 xilinx_dma_free_tx_descriptor(chan, desc);
2049 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2050 * @dchan: Driver specific DMA Channel pointer
2052 * Return: '0' always.
2054 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2056 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2061 xilinx_dma_chan_reset(chan);
2063 err = chan->stop_transfer(chan);
2065 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2066 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2070 /* Remove and free all of the descriptors in the lists */
2071 xilinx_dma_free_descriptors(chan);
2075 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2076 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2077 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2078 chan->cyclic = false;
2081 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2082 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2083 XILINX_CDMA_CR_SGMODE);
2089 * xilinx_dma_channel_set_config - Configure VDMA channel
2090 * Run-time configuration for Axi VDMA, supports:
2091 * . halt the channel
2092 * . configure interrupt coalescing and inter-packet delay threshold
2093 * . start/stop parking
2096 * @dchan: DMA channel
2097 * @cfg: VDMA device configuration pointer
2099 * Return: '0' on success and failure value on error
2101 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2102 struct xilinx_vdma_config *cfg)
2104 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2108 return xilinx_dma_chan_reset(chan);
2110 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2112 chan->config.frm_dly = cfg->frm_dly;
2113 chan->config.park = cfg->park;
2115 /* genlock settings */
2116 chan->config.gen_lock = cfg->gen_lock;
2117 chan->config.master = cfg->master;
2119 if (cfg->gen_lock && chan->genlock) {
2120 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2121 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2124 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2125 chan->config.vflip_en = cfg->vflip_en;
2128 chan->config.park_frm = cfg->park_frm;
2130 chan->config.park_frm = -1;
2132 chan->config.coalesc = cfg->coalesc;
2133 chan->config.delay = cfg->delay;
2135 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2136 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2137 chan->config.coalesc = cfg->coalesc;
2140 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2141 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2142 chan->config.delay = cfg->delay;
2145 /* FSync Source selection */
2146 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2147 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2149 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2153 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2155 /* -----------------------------------------------------------------------------
2160 * xilinx_dma_chan_remove - Per Channel remove function
2161 * @chan: Driver specific DMA channel
2163 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2165 /* Disable all interrupts */
2166 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2167 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2170 free_irq(chan->irq, chan);
2172 tasklet_kill(&chan->tasklet);
2174 list_del(&chan->common.device_node);
2177 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2178 struct clk **tx_clk, struct clk **rx_clk,
2179 struct clk **sg_clk, struct clk **tmp_clk)
2185 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2186 if (IS_ERR(*axi_clk)) {
2187 err = PTR_ERR(*axi_clk);
2188 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2192 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2193 if (IS_ERR(*tx_clk))
2196 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2197 if (IS_ERR(*rx_clk))
2200 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2201 if (IS_ERR(*sg_clk))
2204 err = clk_prepare_enable(*axi_clk);
2206 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2210 err = clk_prepare_enable(*tx_clk);
2212 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2213 goto err_disable_axiclk;
2216 err = clk_prepare_enable(*rx_clk);
2218 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2219 goto err_disable_txclk;
2222 err = clk_prepare_enable(*sg_clk);
2224 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2225 goto err_disable_rxclk;
2231 clk_disable_unprepare(*rx_clk);
2233 clk_disable_unprepare(*tx_clk);
2235 clk_disable_unprepare(*axi_clk);
2240 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2241 struct clk **dev_clk, struct clk **tmp_clk,
2242 struct clk **tmp1_clk, struct clk **tmp2_clk)
2250 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2251 if (IS_ERR(*axi_clk)) {
2252 err = PTR_ERR(*axi_clk);
2253 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2257 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2258 if (IS_ERR(*dev_clk)) {
2259 err = PTR_ERR(*dev_clk);
2260 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2264 err = clk_prepare_enable(*axi_clk);
2266 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2270 err = clk_prepare_enable(*dev_clk);
2272 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2273 goto err_disable_axiclk;
2279 clk_disable_unprepare(*axi_clk);
2284 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2285 struct clk **tx_clk, struct clk **txs_clk,
2286 struct clk **rx_clk, struct clk **rxs_clk)
2290 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2291 if (IS_ERR(*axi_clk)) {
2292 err = PTR_ERR(*axi_clk);
2293 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2297 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2298 if (IS_ERR(*tx_clk))
2301 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2302 if (IS_ERR(*txs_clk))
2305 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2306 if (IS_ERR(*rx_clk))
2309 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2310 if (IS_ERR(*rxs_clk))
2313 err = clk_prepare_enable(*axi_clk);
2315 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2319 err = clk_prepare_enable(*tx_clk);
2321 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2322 goto err_disable_axiclk;
2325 err = clk_prepare_enable(*txs_clk);
2327 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2328 goto err_disable_txclk;
2331 err = clk_prepare_enable(*rx_clk);
2333 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2334 goto err_disable_txsclk;
2337 err = clk_prepare_enable(*rxs_clk);
2339 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2340 goto err_disable_rxclk;
2346 clk_disable_unprepare(*rx_clk);
2348 clk_disable_unprepare(*txs_clk);
2350 clk_disable_unprepare(*tx_clk);
2352 clk_disable_unprepare(*axi_clk);
2357 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2359 clk_disable_unprepare(xdev->rxs_clk);
2360 clk_disable_unprepare(xdev->rx_clk);
2361 clk_disable_unprepare(xdev->txs_clk);
2362 clk_disable_unprepare(xdev->tx_clk);
2363 clk_disable_unprepare(xdev->axi_clk);
2367 * xilinx_dma_chan_probe - Per Channel Probing
2368 * It get channel features from the device tree entry and
2369 * initialize special channel handling routines
2371 * @xdev: Driver specific device structure
2372 * @node: Device node
2373 * @chan_id: DMA Channel id
2375 * Return: '0' on success and failure value on error
2377 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2378 struct device_node *node, int chan_id)
2380 struct xilinx_dma_chan *chan;
2381 bool has_dre = false;
2385 /* Allocate and initialize the channel structure */
2386 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2390 chan->dev = xdev->dev;
2392 chan->has_sg = xdev->has_sg;
2393 chan->desc_pendingcount = 0x0;
2394 chan->ext_addr = xdev->ext_addr;
2395 /* This variable ensures that descriptors are not
2396 * Submitted when dma engine is in progress. This variable is
2397 * Added to avoid polling for a bit in the status register to
2398 * Know dma state in the driver hot path.
2402 spin_lock_init(&chan->lock);
2403 INIT_LIST_HEAD(&chan->pending_list);
2404 INIT_LIST_HEAD(&chan->done_list);
2405 INIT_LIST_HEAD(&chan->active_list);
2406 INIT_LIST_HEAD(&chan->free_seg_list);
2408 /* Retrieve the channel properties from the device tree */
2409 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2411 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2413 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2415 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2418 width = value >> 3; /* Convert bits to bytes */
2420 /* If data width is greater than 8 bytes, DRE is not in hw */
2425 xdev->common.copy_align = fls(width - 1);
2427 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2428 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2429 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2430 chan->direction = DMA_MEM_TO_DEV;
2432 chan->tdest = chan_id;
2434 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2435 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2436 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2437 chan->config.park = 1;
2439 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2440 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2441 chan->flush_on_fsync = true;
2443 } else if (of_device_is_compatible(node,
2444 "xlnx,axi-vdma-s2mm-channel") ||
2445 of_device_is_compatible(node,
2446 "xlnx,axi-dma-s2mm-channel")) {
2447 chan->direction = DMA_DEV_TO_MEM;
2449 chan->tdest = chan_id - xdev->nr_channels;
2450 chan->has_vflip = of_property_read_bool(node,
2451 "xlnx,enable-vert-flip");
2452 if (chan->has_vflip) {
2453 chan->config.vflip_en = dma_read(chan,
2454 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2455 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2458 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2459 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2460 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2461 chan->config.park = 1;
2463 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2464 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2465 chan->flush_on_fsync = true;
2468 dev_err(xdev->dev, "Invalid channel compatible node\n");
2472 /* Request the interrupt */
2473 chan->irq = irq_of_parse_and_map(node, 0);
2474 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2475 "xilinx-dma-controller", chan);
2477 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2481 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2482 chan->start_transfer = xilinx_dma_start_transfer;
2483 chan->stop_transfer = xilinx_dma_stop_transfer;
2484 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2485 chan->start_transfer = xilinx_cdma_start_transfer;
2486 chan->stop_transfer = xilinx_cdma_stop_transfer;
2488 chan->start_transfer = xilinx_vdma_start_transfer;
2489 chan->stop_transfer = xilinx_dma_stop_transfer;
2492 /* Initialize the tasklet */
2493 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2494 (unsigned long)chan);
2497 * Initialize the DMA channel and add it to the DMA engine channels
2500 chan->common.device = &xdev->common;
2502 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2503 xdev->chan[chan->id] = chan;
2505 /* Reset the channel */
2506 err = xilinx_dma_chan_reset(chan);
2508 dev_err(xdev->dev, "Reset channel failed\n");
2516 * xilinx_dma_child_probe - Per child node probe
2517 * It get number of dma-channels per child node from
2518 * device-tree and initializes all the channels.
2520 * @xdev: Driver specific device structure
2521 * @node: Device node
2525 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2526 struct device_node *node)
2528 int ret, i, nr_channels = 1;
2530 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2531 if ((ret < 0) && xdev->mcdma)
2532 dev_warn(xdev->dev, "missing dma-channels property\n");
2534 for (i = 0; i < nr_channels; i++)
2535 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2537 xdev->nr_channels += nr_channels;
2543 * of_dma_xilinx_xlate - Translation function
2544 * @dma_spec: Pointer to DMA specifier as found in the device tree
2545 * @ofdma: Pointer to DMA controller data
2547 * Return: DMA channel pointer on success and NULL on error
2549 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2550 struct of_dma *ofdma)
2552 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2553 int chan_id = dma_spec->args[0];
2555 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2558 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2561 static const struct xilinx_dma_config axidma_config = {
2562 .dmatype = XDMA_TYPE_AXIDMA,
2563 .clk_init = axidma_clk_init,
2566 static const struct xilinx_dma_config axicdma_config = {
2567 .dmatype = XDMA_TYPE_CDMA,
2568 .clk_init = axicdma_clk_init,
2571 static const struct xilinx_dma_config axivdma_config = {
2572 .dmatype = XDMA_TYPE_VDMA,
2573 .clk_init = axivdma_clk_init,
2576 static const struct of_device_id xilinx_dma_of_ids[] = {
2577 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2578 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2579 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2582 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2585 * xilinx_dma_probe - Driver probe function
2586 * @pdev: Pointer to the platform_device structure
2588 * Return: '0' on success and failure value on error
2590 static int xilinx_dma_probe(struct platform_device *pdev)
2592 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2593 struct clk **, struct clk **, struct clk **)
2595 struct device_node *node = pdev->dev.of_node;
2596 struct xilinx_dma_device *xdev;
2597 struct device_node *child, *np = pdev->dev.of_node;
2598 struct resource *io;
2599 u32 num_frames, addr_width;
2602 /* Allocate and initialize the DMA engine structure */
2603 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2607 xdev->dev = &pdev->dev;
2609 const struct of_device_id *match;
2611 match = of_match_node(xilinx_dma_of_ids, np);
2612 if (match && match->data) {
2613 xdev->dma_config = match->data;
2614 clk_init = xdev->dma_config->clk_init;
2618 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2619 &xdev->rx_clk, &xdev->rxs_clk);
2623 /* Request and map I/O memory */
2624 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2625 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2626 if (IS_ERR(xdev->regs))
2627 return PTR_ERR(xdev->regs);
2629 /* Retrieve the DMA engine properties from the device tree */
2630 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2631 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2632 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2634 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2635 err = of_property_read_u32(node, "xlnx,num-fstores",
2639 "missing xlnx,num-fstores property\n");
2643 err = of_property_read_u32(node, "xlnx,flush-fsync",
2644 &xdev->flush_on_fsync);
2647 "missing xlnx,flush-fsync property\n");
2650 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2652 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2654 if (addr_width > 32)
2655 xdev->ext_addr = true;
2657 xdev->ext_addr = false;
2659 /* Set the dma mask bits */
2660 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2662 /* Initialize the DMA engine */
2663 xdev->common.dev = &pdev->dev;
2665 INIT_LIST_HEAD(&xdev->common.channels);
2666 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2667 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2668 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2671 xdev->common.device_alloc_chan_resources =
2672 xilinx_dma_alloc_chan_resources;
2673 xdev->common.device_free_chan_resources =
2674 xilinx_dma_free_chan_resources;
2675 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2676 xdev->common.device_tx_status = xilinx_dma_tx_status;
2677 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2678 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2679 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2680 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2681 xdev->common.device_prep_dma_cyclic =
2682 xilinx_dma_prep_dma_cyclic;
2683 xdev->common.device_prep_interleaved_dma =
2684 xilinx_dma_prep_interleaved;
2685 /* Residue calculation is supported by only AXI DMA */
2686 xdev->common.residue_granularity =
2687 DMA_RESIDUE_GRANULARITY_SEGMENT;
2688 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2689 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2690 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2692 xdev->common.device_prep_interleaved_dma =
2693 xilinx_vdma_dma_prep_interleaved;
2696 platform_set_drvdata(pdev, xdev);
2698 /* Initialize the channels */
2699 for_each_child_of_node(node, child) {
2700 err = xilinx_dma_child_probe(xdev, child);
2705 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2706 for (i = 0; i < xdev->nr_channels; i++)
2708 xdev->chan[i]->num_frms = num_frames;
2711 /* Register the DMA engine with the core */
2712 dma_async_device_register(&xdev->common);
2714 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2717 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2718 dma_async_device_unregister(&xdev->common);
2722 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2723 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2724 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2725 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2727 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2732 xdma_disable_allclks(xdev);
2734 for (i = 0; i < xdev->nr_channels; i++)
2736 xilinx_dma_chan_remove(xdev->chan[i]);
2742 * xilinx_dma_remove - Driver remove function
2743 * @pdev: Pointer to the platform_device structure
2745 * Return: Always '0'
2747 static int xilinx_dma_remove(struct platform_device *pdev)
2749 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2752 of_dma_controller_free(pdev->dev.of_node);
2754 dma_async_device_unregister(&xdev->common);
2756 for (i = 0; i < xdev->nr_channels; i++)
2758 xilinx_dma_chan_remove(xdev->chan[i]);
2760 xdma_disable_allclks(xdev);
2765 static struct platform_driver xilinx_vdma_driver = {
2767 .name = "xilinx-vdma",
2768 .of_match_table = xilinx_dma_of_ids,
2770 .probe = xilinx_dma_probe,
2771 .remove = xilinx_dma_remove,
2774 module_platform_driver(xilinx_vdma_driver);
2776 MODULE_AUTHOR("Xilinx, Inc.");
2777 MODULE_DESCRIPTION("Xilinx VDMA driver");
2778 MODULE_LICENSE("GPL v2");