soc: ixp4xx: Protect IXP4xx SoC drivers by ARCH_IXP4XX || COMPILE_TEST
[sfrench/cifs-2.6.git] / drivers / dma / dw-edma / dw-edma-v0-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4  * Synopsys DesignWare eDMA v0 core
5  *
6  * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
7  */
8
9 #include <linux/bitfield.h>
10
11 #include "dw-edma-core.h"
12 #include "dw-edma-v0-core.h"
13 #include "dw-edma-v0-regs.h"
14 #include "dw-edma-v0-debugfs.h"
15
16 enum dw_edma_control {
17         DW_EDMA_V0_CB                                   = BIT(0),
18         DW_EDMA_V0_TCB                                  = BIT(1),
19         DW_EDMA_V0_LLP                                  = BIT(2),
20         DW_EDMA_V0_LIE                                  = BIT(3),
21         DW_EDMA_V0_RIE                                  = BIT(4),
22         DW_EDMA_V0_CCS                                  = BIT(8),
23         DW_EDMA_V0_LLE                                  = BIT(9),
24 };
25
26 static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
27 {
28         return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
29 }
30
31 #define SET(dw, name, value)                            \
32         writel(value, &(__dw_regs(dw)->name))
33
34 #define GET(dw, name)                                   \
35         readl(&(__dw_regs(dw)->name))
36
37 #define SET_RW(dw, dir, name, value)                    \
38         do {                                            \
39                 if ((dir) == EDMA_DIR_WRITE)            \
40                         SET(dw, wr_##name, value);      \
41                 else                                    \
42                         SET(dw, rd_##name, value);      \
43         } while (0)
44
45 #define GET_RW(dw, dir, name)                           \
46         ((dir) == EDMA_DIR_WRITE                        \
47           ? GET(dw, wr_##name)                          \
48           : GET(dw, rd_##name))
49
50 #define SET_BOTH(dw, name, value)                       \
51         do {                                            \
52                 SET(dw, wr_##name, value);              \
53                 SET(dw, rd_##name, value);              \
54         } while (0)
55
56 static inline struct dw_edma_v0_ch_regs __iomem *
57 __dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
58 {
59         if (dw->mode == EDMA_MODE_LEGACY)
60                 return &(__dw_regs(dw)->type.legacy.ch);
61
62         if (dir == EDMA_DIR_WRITE)
63                 return &__dw_regs(dw)->type.unroll.ch[ch].wr;
64
65         return &__dw_regs(dw)->type.unroll.ch[ch].rd;
66 }
67
68 static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
69                              u32 value, void __iomem *addr)
70 {
71         if (dw->mode == EDMA_MODE_LEGACY) {
72                 u32 viewport_sel;
73                 unsigned long flags;
74
75                 raw_spin_lock_irqsave(&dw->lock, flags);
76
77                 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
78                 if (dir == EDMA_DIR_READ)
79                         viewport_sel |= BIT(31);
80
81                 writel(viewport_sel,
82                        &(__dw_regs(dw)->type.legacy.viewport_sel));
83                 writel(value, addr);
84
85                 raw_spin_unlock_irqrestore(&dw->lock, flags);
86         } else {
87                 writel(value, addr);
88         }
89 }
90
91 static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
92                            const void __iomem *addr)
93 {
94         u32 value;
95
96         if (dw->mode == EDMA_MODE_LEGACY) {
97                 u32 viewport_sel;
98                 unsigned long flags;
99
100                 raw_spin_lock_irqsave(&dw->lock, flags);
101
102                 viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
103                 if (dir == EDMA_DIR_READ)
104                         viewport_sel |= BIT(31);
105
106                 writel(viewport_sel,
107                        &(__dw_regs(dw)->type.legacy.viewport_sel));
108                 value = readl(addr);
109
110                 raw_spin_unlock_irqrestore(&dw->lock, flags);
111         } else {
112                 value = readl(addr);
113         }
114
115         return value;
116 }
117
118 #define SET_CH(dw, dir, ch, name, value) \
119         writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
120
121 #define GET_CH(dw, dir, ch, name) \
122         readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
123
124 #define SET_LL(ll, value) \
125         writel(value, ll)
126
127 /* eDMA management callbacks */
128 void dw_edma_v0_core_off(struct dw_edma *dw)
129 {
130         SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
131         SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
132         SET_BOTH(dw, engine_en, 0);
133 }
134
135 u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
136 {
137         u32 num_ch;
138
139         if (dir == EDMA_DIR_WRITE)
140                 num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
141         else
142                 num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
143
144         if (num_ch > EDMA_V0_MAX_NR_CH)
145                 num_ch = EDMA_V0_MAX_NR_CH;
146
147         return (u16)num_ch;
148 }
149
150 enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
151 {
152         struct dw_edma *dw = chan->chip->dw;
153         u32 tmp;
154
155         tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
156                         GET_CH(dw, chan->dir, chan->id, ch_control1));
157
158         if (tmp == 1)
159                 return DMA_IN_PROGRESS;
160         else if (tmp == 3)
161                 return DMA_COMPLETE;
162         else
163                 return DMA_ERROR;
164 }
165
166 void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
167 {
168         struct dw_edma *dw = chan->chip->dw;
169
170         SET_RW(dw, chan->dir, int_clear,
171                FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
172 }
173
174 void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
175 {
176         struct dw_edma *dw = chan->chip->dw;
177
178         SET_RW(dw, chan->dir, int_clear,
179                FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
180 }
181
182 u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
183 {
184         return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
185 }
186
187 u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
188 {
189         return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
190 }
191
192 static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
193 {
194         struct dw_edma_burst *child;
195         struct dw_edma_v0_lli *lli;
196         struct dw_edma_v0_llp *llp;
197         u32 control = 0, i = 0;
198         u64 sar, dar, addr;
199         int j;
200
201         lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
202
203         if (chunk->cb)
204                 control = DW_EDMA_V0_CB;
205
206         j = chunk->bursts_alloc;
207         list_for_each_entry(child, &chunk->burst->list, list) {
208                 j--;
209                 if (!j)
210                         control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
211
212                 /* Channel control */
213                 SET_LL(&lli[i].control, control);
214                 /* Transfer size */
215                 SET_LL(&lli[i].transfer_size, child->sz);
216                 /* SAR - low, high */
217                 sar = cpu_to_le64(child->sar);
218                 SET_LL(&lli[i].sar_low, lower_32_bits(sar));
219                 SET_LL(&lli[i].sar_high, upper_32_bits(sar));
220                 /* DAR - low, high */
221                 dar = cpu_to_le64(child->dar);
222                 SET_LL(&lli[i].dar_low, lower_32_bits(dar));
223                 SET_LL(&lli[i].dar_high, upper_32_bits(dar));
224                 i++;
225         }
226
227         llp = (struct dw_edma_v0_llp *)&lli[i];
228         control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
229         if (!chunk->cb)
230                 control |= DW_EDMA_V0_CB;
231
232         /* Channel control */
233         SET_LL(&llp->control, control);
234         /* Linked list  - low, high */
235         addr = cpu_to_le64(chunk->ll_region.paddr);
236         SET_LL(&llp->llp_low, lower_32_bits(addr));
237         SET_LL(&llp->llp_high, upper_32_bits(addr));
238 }
239
240 void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
241 {
242         struct dw_edma_chan *chan = chunk->chan;
243         struct dw_edma *dw = chan->chip->dw;
244         u32 tmp;
245         u64 llp;
246
247         dw_edma_v0_core_write_chunk(chunk);
248
249         if (first) {
250                 /* Enable engine */
251                 SET_RW(dw, chan->dir, engine_en, BIT(0));
252                 /* Interrupt unmask - done, abort */
253                 tmp = GET_RW(dw, chan->dir, int_mask);
254                 tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
255                 tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
256                 SET_RW(dw, chan->dir, int_mask, tmp);
257                 /* Linked list error */
258                 tmp = GET_RW(dw, chan->dir, linked_list_err_en);
259                 tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
260                 SET_RW(dw, chan->dir, linked_list_err_en, tmp);
261                 /* Channel control */
262                 SET_CH(dw, chan->dir, chan->id, ch_control1,
263                        (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
264                 /* Linked list - low, high */
265                 llp = cpu_to_le64(chunk->ll_region.paddr);
266                 SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
267                 SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
268         }
269         /* Doorbell */
270         SET_RW(dw, chan->dir, doorbell,
271                FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
272 }
273
274 int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
275 {
276         struct dw_edma *dw = chan->chip->dw;
277         u32 tmp = 0;
278
279         /* MSI done addr - low, high */
280         SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
281         SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
282         /* MSI abort addr - low, high */
283         SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
284         SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
285         /* MSI data - low, high */
286         switch (chan->id) {
287         case 0:
288         case 1:
289                 tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
290                 break;
291
292         case 2:
293         case 3:
294                 tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
295                 break;
296
297         case 4:
298         case 5:
299                 tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
300                 break;
301
302         case 6:
303         case 7:
304                 tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
305                 break;
306         }
307
308         if (chan->id & BIT(0)) {
309                 /* Channel odd {1, 3, 5, 7} */
310                 tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
311                 tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
312                                   chan->msi.data);
313         } else {
314                 /* Channel even {0, 2, 4, 6} */
315                 tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
316                 tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
317                                   chan->msi.data);
318         }
319
320         switch (chan->id) {
321         case 0:
322         case 1:
323                 SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
324                 break;
325
326         case 2:
327         case 3:
328                 SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
329                 break;
330
331         case 4:
332         case 5:
333                 SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
334                 break;
335
336         case 6:
337         case 7:
338                 SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
339                 break;
340         }
341
342         return 0;
343 }
344
345 /* eDMA debugfs callbacks */
346 void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
347 {
348         dw_edma_v0_debugfs_on(chip);
349 }
350
351 void dw_edma_v0_core_debugfs_off(void)
352 {
353         dw_edma_v0_debugfs_off();
354 }