1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA v0 core
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
9 #include <linux/bitfield.h>
11 #include "dw-edma-core.h"
12 #include "dw-edma-v0-core.h"
13 #include "dw-edma-v0-regs.h"
14 #include "dw-edma-v0-debugfs.h"
16 enum dw_edma_control {
17 DW_EDMA_V0_CB = BIT(0),
18 DW_EDMA_V0_TCB = BIT(1),
19 DW_EDMA_V0_LLP = BIT(2),
20 DW_EDMA_V0_LIE = BIT(3),
21 DW_EDMA_V0_RIE = BIT(4),
22 DW_EDMA_V0_CCS = BIT(8),
23 DW_EDMA_V0_LLE = BIT(9),
26 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
28 return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
31 #define SET(dw, name, value) \
32 writel(value, &(__dw_regs(dw)->name))
34 #define GET(dw, name) \
35 readl(&(__dw_regs(dw)->name))
37 #define SET_RW(dw, dir, name, value) \
39 if ((dir) == EDMA_DIR_WRITE) \
40 SET(dw, wr_##name, value); \
42 SET(dw, rd_##name, value); \
45 #define GET_RW(dw, dir, name) \
46 ((dir) == EDMA_DIR_WRITE \
47 ? GET(dw, wr_##name) \
50 #define SET_BOTH(dw, name, value) \
52 SET(dw, wr_##name, value); \
53 SET(dw, rd_##name, value); \
56 static inline struct dw_edma_v0_ch_regs __iomem *
57 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
59 if (dw->mode == EDMA_MODE_LEGACY)
60 return &(__dw_regs(dw)->type.legacy.ch);
62 if (dir == EDMA_DIR_WRITE)
63 return &__dw_regs(dw)->type.unroll.ch[ch].wr;
65 return &__dw_regs(dw)->type.unroll.ch[ch].rd;
68 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
69 u32 value, void __iomem *addr)
71 if (dw->mode == EDMA_MODE_LEGACY) {
75 raw_spin_lock_irqsave(&dw->lock, flags);
77 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
78 if (dir == EDMA_DIR_READ)
79 viewport_sel |= BIT(31);
82 &(__dw_regs(dw)->type.legacy.viewport_sel));
85 raw_spin_unlock_irqrestore(&dw->lock, flags);
91 static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
92 const void __iomem *addr)
96 if (dw->mode == EDMA_MODE_LEGACY) {
100 raw_spin_lock_irqsave(&dw->lock, flags);
102 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
103 if (dir == EDMA_DIR_READ)
104 viewport_sel |= BIT(31);
107 &(__dw_regs(dw)->type.legacy.viewport_sel));
110 raw_spin_unlock_irqrestore(&dw->lock, flags);
118 #define SET_CH(dw, dir, ch, name, value) \
119 writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
121 #define GET_CH(dw, dir, ch, name) \
122 readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
124 #define SET_LL(ll, value) \
127 /* eDMA management callbacks */
128 void dw_edma_v0_core_off(struct dw_edma *dw)
130 SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
131 SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
132 SET_BOTH(dw, engine_en, 0);
135 u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
139 if (dir == EDMA_DIR_WRITE)
140 num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
142 num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
144 if (num_ch > EDMA_V0_MAX_NR_CH)
145 num_ch = EDMA_V0_MAX_NR_CH;
150 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
152 struct dw_edma *dw = chan->chip->dw;
155 tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
156 GET_CH(dw, chan->dir, chan->id, ch_control1));
159 return DMA_IN_PROGRESS;
166 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
168 struct dw_edma *dw = chan->chip->dw;
170 SET_RW(dw, chan->dir, int_clear,
171 FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
174 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
176 struct dw_edma *dw = chan->chip->dw;
178 SET_RW(dw, chan->dir, int_clear,
179 FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
182 u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
184 return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
187 u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
189 return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
192 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
194 struct dw_edma_burst *child;
195 struct dw_edma_v0_lli *lli;
196 struct dw_edma_v0_llp *llp;
197 u32 control = 0, i = 0;
201 lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
204 control = DW_EDMA_V0_CB;
206 j = chunk->bursts_alloc;
207 list_for_each_entry(child, &chunk->burst->list, list) {
210 control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
212 /* Channel control */
213 SET_LL(&lli[i].control, control);
215 SET_LL(&lli[i].transfer_size, child->sz);
216 /* SAR - low, high */
217 sar = cpu_to_le64(child->sar);
218 SET_LL(&lli[i].sar_low, lower_32_bits(sar));
219 SET_LL(&lli[i].sar_high, upper_32_bits(sar));
220 /* DAR - low, high */
221 dar = cpu_to_le64(child->dar);
222 SET_LL(&lli[i].dar_low, lower_32_bits(dar));
223 SET_LL(&lli[i].dar_high, upper_32_bits(dar));
227 llp = (struct dw_edma_v0_llp *)&lli[i];
228 control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
230 control |= DW_EDMA_V0_CB;
232 /* Channel control */
233 SET_LL(&llp->control, control);
234 /* Linked list - low, high */
235 addr = cpu_to_le64(chunk->ll_region.paddr);
236 SET_LL(&llp->llp_low, lower_32_bits(addr));
237 SET_LL(&llp->llp_high, upper_32_bits(addr));
240 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
242 struct dw_edma_chan *chan = chunk->chan;
243 struct dw_edma *dw = chan->chip->dw;
247 dw_edma_v0_core_write_chunk(chunk);
251 SET_RW(dw, chan->dir, engine_en, BIT(0));
252 /* Interrupt unmask - done, abort */
253 tmp = GET_RW(dw, chan->dir, int_mask);
254 tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
255 tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
256 SET_RW(dw, chan->dir, int_mask, tmp);
257 /* Linked list error */
258 tmp = GET_RW(dw, chan->dir, linked_list_err_en);
259 tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
260 SET_RW(dw, chan->dir, linked_list_err_en, tmp);
261 /* Channel control */
262 SET_CH(dw, chan->dir, chan->id, ch_control1,
263 (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
264 /* Linked list - low, high */
265 llp = cpu_to_le64(chunk->ll_region.paddr);
266 SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
267 SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
270 SET_RW(dw, chan->dir, doorbell,
271 FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
274 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
276 struct dw_edma *dw = chan->chip->dw;
279 /* MSI done addr - low, high */
280 SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
281 SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
282 /* MSI abort addr - low, high */
283 SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
284 SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
285 /* MSI data - low, high */
289 tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
294 tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
299 tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
304 tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
308 if (chan->id & BIT(0)) {
309 /* Channel odd {1, 3, 5, 7} */
310 tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
311 tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
314 /* Channel even {0, 2, 4, 6} */
315 tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
316 tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
323 SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
328 SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
333 SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
338 SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
345 /* eDMA debugfs callbacks */
346 void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
348 dw_edma_v0_debugfs_on(chip);
351 void dw_edma_v0_core_debugfs_off(void)
353 dw_edma_v0_debugfs_off();