Merge tag 'mips_6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux
[sfrench/cifs-2.6.git] / drivers / dma / fsl-edma-common.c
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/pm_domain.h>
12
13 #include "fsl-edma-common.h"
14
15 #define EDMA_CR                 0x00
16 #define EDMA_ES                 0x04
17 #define EDMA_ERQ                0x0C
18 #define EDMA_EEI                0x14
19 #define EDMA_SERQ               0x1B
20 #define EDMA_CERQ               0x1A
21 #define EDMA_SEEI               0x19
22 #define EDMA_CEEI               0x18
23 #define EDMA_CINT               0x1F
24 #define EDMA_CERR               0x1E
25 #define EDMA_SSRT               0x1D
26 #define EDMA_CDNE               0x1C
27 #define EDMA_INTR               0x24
28 #define EDMA_ERR                0x2C
29
30 #define EDMA64_ERQH             0x08
31 #define EDMA64_EEIH             0x10
32 #define EDMA64_SERQ             0x18
33 #define EDMA64_CERQ             0x19
34 #define EDMA64_SEEI             0x1a
35 #define EDMA64_CEEI             0x1b
36 #define EDMA64_CINT             0x1c
37 #define EDMA64_CERR             0x1d
38 #define EDMA64_SSRT             0x1e
39 #define EDMA64_CDNE             0x1f
40 #define EDMA64_INTH             0x20
41 #define EDMA64_INTL             0x24
42 #define EDMA64_ERRH             0x28
43 #define EDMA64_ERRL             0x2c
44
45 void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
46 {
47         spin_lock(&fsl_chan->vchan.lock);
48
49         if (!fsl_chan->edesc) {
50                 /* terminate_all called before */
51                 spin_unlock(&fsl_chan->vchan.lock);
52                 return;
53         }
54
55         if (!fsl_chan->edesc->iscyclic) {
56                 list_del(&fsl_chan->edesc->vdesc.node);
57                 vchan_cookie_complete(&fsl_chan->edesc->vdesc);
58                 fsl_chan->edesc = NULL;
59                 fsl_chan->status = DMA_COMPLETE;
60                 fsl_chan->idle = true;
61         } else {
62                 vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
63         }
64
65         if (!fsl_chan->edesc)
66                 fsl_edma_xfer_desc(fsl_chan);
67
68         spin_unlock(&fsl_chan->vchan.lock);
69 }
70
71 static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
72 {
73         u32 val, flags;
74
75         flags = fsl_edma_drvflags(fsl_chan);
76         val = edma_readl_chreg(fsl_chan, ch_sbr);
77         /* Remote/local swapped wrongly on iMX8 QM Audio edma */
78         if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
79                 if (!fsl_chan->is_rxchan)
80                         val |= EDMA_V3_CH_SBR_RD;
81                 else
82                         val |= EDMA_V3_CH_SBR_WR;
83         } else {
84                 if (fsl_chan->is_rxchan)
85                         val |= EDMA_V3_CH_SBR_RD;
86                 else
87                         val |= EDMA_V3_CH_SBR_WR;
88         }
89
90         if (fsl_chan->is_remote)
91                 val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
92
93         edma_writel_chreg(fsl_chan, val, ch_sbr);
94
95         if (flags & FSL_EDMA_DRV_HAS_CHMUX)
96                 edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
97
98         val = edma_readl_chreg(fsl_chan, ch_csr);
99         val |= EDMA_V3_CH_CSR_ERQ;
100         edma_writel_chreg(fsl_chan, val, ch_csr);
101 }
102
103 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
104 {
105         struct edma_regs *regs = &fsl_chan->edma->regs;
106         u32 ch = fsl_chan->vchan.chan.chan_id;
107
108         if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
109                 return fsl_edma3_enable_request(fsl_chan);
110
111         if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
112                 edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
113                 edma_writeb(fsl_chan->edma, ch, regs->serq);
114         } else {
115                 /* ColdFire is big endian, and accesses natively
116                  * big endian I/O peripherals
117                  */
118                 iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
119                 iowrite8(ch, regs->serq);
120         }
121 }
122
123 static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
124 {
125         u32 val = edma_readl_chreg(fsl_chan, ch_csr);
126         u32 flags;
127
128         flags = fsl_edma_drvflags(fsl_chan);
129
130         if (flags & FSL_EDMA_DRV_HAS_CHMUX)
131                 edma_writel_chreg(fsl_chan, 0, ch_mux);
132
133         val &= ~EDMA_V3_CH_CSR_ERQ;
134         edma_writel_chreg(fsl_chan, val, ch_csr);
135 }
136
137 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
138 {
139         struct edma_regs *regs = &fsl_chan->edma->regs;
140         u32 ch = fsl_chan->vchan.chan.chan_id;
141
142         if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
143                 return fsl_edma3_disable_request(fsl_chan);
144
145         if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
146                 edma_writeb(fsl_chan->edma, ch, regs->cerq);
147                 edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
148         } else {
149                 /* ColdFire is big endian, and accesses natively
150                  * big endian I/O peripherals
151                  */
152                 iowrite8(ch, regs->cerq);
153                 iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
154         }
155 }
156
157 static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
158                            u32 off, u32 slot, bool enable)
159 {
160         u8 val8;
161
162         if (enable)
163                 val8 = EDMAMUX_CHCFG_ENBL | slot;
164         else
165                 val8 = EDMAMUX_CHCFG_DIS;
166
167         iowrite8(val8, addr + off);
168 }
169
170 static void mux_configure32(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
171                             u32 off, u32 slot, bool enable)
172 {
173         u32 val;
174
175         if (enable)
176                 val = EDMAMUX_CHCFG_ENBL << 24 | slot;
177         else
178                 val = EDMAMUX_CHCFG_DIS;
179
180         iowrite32(val, addr + off * 4);
181 }
182
183 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
184                        unsigned int slot, bool enable)
185 {
186         u32 ch = fsl_chan->vchan.chan.chan_id;
187         void __iomem *muxaddr;
188         unsigned int chans_per_mux, ch_off;
189         int endian_diff[4] = {3, 1, -1, -3};
190         u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
191
192         if (!dmamux_nr)
193                 return;
194
195         chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
196         ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
197
198         if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
199                 ch_off += endian_diff[ch_off % 4];
200
201         muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
202         slot = EDMAMUX_CHCFG_SOURCE(slot);
203
204         if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
205                 mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
206         else
207                 mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
208 }
209
210 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
211 {
212         u32 val;
213
214         if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
215                 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
216
217         val = ffs(addr_width) - 1;
218         return val | (val << 8);
219 }
220
221 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
222 {
223         struct fsl_edma_desc *fsl_desc;
224         int i;
225
226         fsl_desc = to_fsl_edma_desc(vdesc);
227         for (i = 0; i < fsl_desc->n_tcds; i++)
228                 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
229                               fsl_desc->tcd[i].ptcd);
230         kfree(fsl_desc);
231 }
232
233 int fsl_edma_terminate_all(struct dma_chan *chan)
234 {
235         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
236         unsigned long flags;
237         LIST_HEAD(head);
238
239         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
240         fsl_edma_disable_request(fsl_chan);
241         fsl_chan->edesc = NULL;
242         fsl_chan->idle = true;
243         vchan_get_all_descriptors(&fsl_chan->vchan, &head);
244         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
245         vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
246
247         if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
248                 pm_runtime_allow(fsl_chan->pd_dev);
249
250         return 0;
251 }
252
253 int fsl_edma_pause(struct dma_chan *chan)
254 {
255         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
256         unsigned long flags;
257
258         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
259         if (fsl_chan->edesc) {
260                 fsl_edma_disable_request(fsl_chan);
261                 fsl_chan->status = DMA_PAUSED;
262                 fsl_chan->idle = true;
263         }
264         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
265         return 0;
266 }
267
268 int fsl_edma_resume(struct dma_chan *chan)
269 {
270         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
271         unsigned long flags;
272
273         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
274         if (fsl_chan->edesc) {
275                 fsl_edma_enable_request(fsl_chan);
276                 fsl_chan->status = DMA_IN_PROGRESS;
277                 fsl_chan->idle = false;
278         }
279         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
280         return 0;
281 }
282
283 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
284 {
285         if (fsl_chan->dma_dir != DMA_NONE)
286                 dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
287                                    fsl_chan->dma_dev_addr,
288                                    fsl_chan->dma_dev_size,
289                                    fsl_chan->dma_dir, 0);
290         fsl_chan->dma_dir = DMA_NONE;
291 }
292
293 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
294                                     enum dma_transfer_direction dir)
295 {
296         struct device *dev = fsl_chan->vchan.chan.device->dev;
297         enum dma_data_direction dma_dir;
298         phys_addr_t addr = 0;
299         u32 size = 0;
300
301         switch (dir) {
302         case DMA_MEM_TO_DEV:
303                 dma_dir = DMA_FROM_DEVICE;
304                 addr = fsl_chan->cfg.dst_addr;
305                 size = fsl_chan->cfg.dst_maxburst;
306                 break;
307         case DMA_DEV_TO_MEM:
308                 dma_dir = DMA_TO_DEVICE;
309                 addr = fsl_chan->cfg.src_addr;
310                 size = fsl_chan->cfg.src_maxburst;
311                 break;
312         default:
313                 dma_dir = DMA_NONE;
314                 break;
315         }
316
317         /* Already mapped for this config? */
318         if (fsl_chan->dma_dir == dma_dir)
319                 return true;
320
321         fsl_edma_unprep_slave_dma(fsl_chan);
322
323         fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
324         if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
325                 return false;
326         fsl_chan->dma_dev_size = size;
327         fsl_chan->dma_dir = dma_dir;
328
329         return true;
330 }
331
332 int fsl_edma_slave_config(struct dma_chan *chan,
333                                  struct dma_slave_config *cfg)
334 {
335         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
336
337         memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
338         fsl_edma_unprep_slave_dma(fsl_chan);
339
340         return 0;
341 }
342
343 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
344                 struct virt_dma_desc *vdesc, bool in_progress)
345 {
346         struct fsl_edma_desc *edesc = fsl_chan->edesc;
347         enum dma_transfer_direction dir = edesc->dirn;
348         dma_addr_t cur_addr, dma_addr;
349         size_t len, size;
350         u32 nbytes = 0;
351         int i;
352
353         /* calculate the total size in this desc */
354         for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
355                 nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
356                 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
357                         nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
358                 len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
359         }
360
361         if (!in_progress)
362                 return len;
363
364         if (dir == DMA_MEM_TO_DEV)
365                 cur_addr = edma_read_tcdreg(fsl_chan, saddr);
366         else
367                 cur_addr = edma_read_tcdreg(fsl_chan, daddr);
368
369         /* figure out the finished and calculate the residue */
370         for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
371                 nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
372                 if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
373                         nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
374
375                 size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
376
377                 if (dir == DMA_MEM_TO_DEV)
378                         dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
379                 else
380                         dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
381
382                 len -= size;
383                 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
384                         len += dma_addr + size - cur_addr;
385                         break;
386                 }
387         }
388
389         return len;
390 }
391
392 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
393                 dma_cookie_t cookie, struct dma_tx_state *txstate)
394 {
395         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
396         struct virt_dma_desc *vdesc;
397         enum dma_status status;
398         unsigned long flags;
399
400         status = dma_cookie_status(chan, cookie, txstate);
401         if (status == DMA_COMPLETE)
402                 return status;
403
404         if (!txstate)
405                 return fsl_chan->status;
406
407         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
408         vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
409         if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
410                 txstate->residue =
411                         fsl_edma_desc_residue(fsl_chan, vdesc, true);
412         else if (vdesc)
413                 txstate->residue =
414                         fsl_edma_desc_residue(fsl_chan, vdesc, false);
415         else
416                 txstate->residue = 0;
417
418         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
419
420         return fsl_chan->status;
421 }
422
423 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
424                                   struct fsl_edma_hw_tcd *tcd)
425 {
426         u16 csr = 0;
427
428         /*
429          * TCD parameters are stored in struct fsl_edma_hw_tcd in little
430          * endian format. However, we need to load the TCD registers in
431          * big- or little-endian obeying the eDMA engine model endian,
432          * and this is performed from specific edma_write functions
433          */
434         edma_write_tcdreg(fsl_chan, 0, csr);
435
436         edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
437         edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
438
439         edma_write_tcdreg(fsl_chan, tcd->attr, attr);
440         edma_write_tcdreg(fsl_chan, tcd->soff, soff);
441
442         edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
443         edma_write_tcdreg(fsl_chan, tcd->slast, slast);
444
445         edma_write_tcdreg(fsl_chan, tcd->citer, citer);
446         edma_write_tcdreg(fsl_chan, tcd->biter, biter);
447         edma_write_tcdreg(fsl_chan, tcd->doff, doff);
448
449         edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
450
451         if (fsl_chan->is_sw) {
452                 csr = le16_to_cpu(tcd->csr);
453                 csr |= EDMA_TCD_CSR_START;
454                 tcd->csr = cpu_to_le16(csr);
455         }
456
457         edma_write_tcdreg(fsl_chan, tcd->csr, csr);
458 }
459
460 static inline
461 void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
462                        struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
463                        u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
464                        u16 biter, u16 doff, u32 dlast_sga, bool major_int,
465                        bool disable_req, bool enable_sg)
466 {
467         struct dma_slave_config *cfg = &fsl_chan->cfg;
468         u16 csr = 0;
469         u32 burst;
470
471         /*
472          * eDMA hardware SGs require the TCDs to be stored in little
473          * endian format irrespective of the register endian model.
474          * So we put the value in little endian in memory, waiting
475          * for fsl_edma_set_tcd_regs doing the swap.
476          */
477         tcd->saddr = cpu_to_le32(src);
478         tcd->daddr = cpu_to_le32(dst);
479
480         tcd->attr = cpu_to_le16(attr);
481
482         tcd->soff = cpu_to_le16(soff);
483
484         if (fsl_chan->is_multi_fifo) {
485                 /* set mloff to support multiple fifo */
486                 burst = cfg->direction == DMA_DEV_TO_MEM ?
487                                 cfg->src_addr_width : cfg->dst_addr_width;
488                 nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
489                 /* enable DMLOE/SMLOE */
490                 if (cfg->direction == DMA_MEM_TO_DEV) {
491                         nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
492                         nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
493                 } else {
494                         nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
495                         nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
496                 }
497         }
498
499         tcd->nbytes = cpu_to_le32(nbytes);
500         tcd->slast = cpu_to_le32(slast);
501
502         tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
503         tcd->doff = cpu_to_le16(doff);
504
505         tcd->dlast_sga = cpu_to_le32(dlast_sga);
506
507         tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
508         if (major_int)
509                 csr |= EDMA_TCD_CSR_INT_MAJOR;
510
511         if (disable_req)
512                 csr |= EDMA_TCD_CSR_D_REQ;
513
514         if (enable_sg)
515                 csr |= EDMA_TCD_CSR_E_SG;
516
517         if (fsl_chan->is_rxchan)
518                 csr |= EDMA_TCD_CSR_ACTIVE;
519
520         if (fsl_chan->is_sw)
521                 csr |= EDMA_TCD_CSR_START;
522
523         tcd->csr = cpu_to_le16(csr);
524 }
525
526 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
527                 int sg_len)
528 {
529         struct fsl_edma_desc *fsl_desc;
530         int i;
531
532         fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
533         if (!fsl_desc)
534                 return NULL;
535
536         fsl_desc->echan = fsl_chan;
537         fsl_desc->n_tcds = sg_len;
538         for (i = 0; i < sg_len; i++) {
539                 fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
540                                         GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
541                 if (!fsl_desc->tcd[i].vtcd)
542                         goto err;
543         }
544         return fsl_desc;
545
546 err:
547         while (--i >= 0)
548                 dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
549                                 fsl_desc->tcd[i].ptcd);
550         kfree(fsl_desc);
551         return NULL;
552 }
553
554 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
555                 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
556                 size_t period_len, enum dma_transfer_direction direction,
557                 unsigned long flags)
558 {
559         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
560         struct fsl_edma_desc *fsl_desc;
561         dma_addr_t dma_buf_next;
562         bool major_int = true;
563         int sg_len, i;
564         u32 src_addr, dst_addr, last_sg, nbytes;
565         u16 soff, doff, iter;
566
567         if (!is_slave_direction(direction))
568                 return NULL;
569
570         if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
571                 return NULL;
572
573         sg_len = buf_len / period_len;
574         fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
575         if (!fsl_desc)
576                 return NULL;
577         fsl_desc->iscyclic = true;
578         fsl_desc->dirn = direction;
579
580         dma_buf_next = dma_addr;
581         if (direction == DMA_MEM_TO_DEV) {
582                 fsl_chan->attr =
583                         fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
584                 nbytes = fsl_chan->cfg.dst_addr_width *
585                         fsl_chan->cfg.dst_maxburst;
586         } else {
587                 fsl_chan->attr =
588                         fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
589                 nbytes = fsl_chan->cfg.src_addr_width *
590                         fsl_chan->cfg.src_maxburst;
591         }
592
593         iter = period_len / nbytes;
594
595         for (i = 0; i < sg_len; i++) {
596                 if (dma_buf_next >= dma_addr + buf_len)
597                         dma_buf_next = dma_addr;
598
599                 /* get next sg's physical address */
600                 last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
601
602                 if (direction == DMA_MEM_TO_DEV) {
603                         src_addr = dma_buf_next;
604                         dst_addr = fsl_chan->dma_dev_addr;
605                         soff = fsl_chan->cfg.dst_addr_width;
606                         doff = fsl_chan->is_multi_fifo ? 4 : 0;
607                 } else if (direction == DMA_DEV_TO_MEM) {
608                         src_addr = fsl_chan->dma_dev_addr;
609                         dst_addr = dma_buf_next;
610                         soff = fsl_chan->is_multi_fifo ? 4 : 0;
611                         doff = fsl_chan->cfg.src_addr_width;
612                 } else {
613                         /* DMA_DEV_TO_DEV */
614                         src_addr = fsl_chan->cfg.src_addr;
615                         dst_addr = fsl_chan->cfg.dst_addr;
616                         soff = doff = 0;
617                         major_int = false;
618                 }
619
620                 fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
621                                   fsl_chan->attr, soff, nbytes, 0, iter,
622                                   iter, doff, last_sg, major_int, false, true);
623                 dma_buf_next += period_len;
624         }
625
626         return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
627 }
628
629 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
630                 struct dma_chan *chan, struct scatterlist *sgl,
631                 unsigned int sg_len, enum dma_transfer_direction direction,
632                 unsigned long flags, void *context)
633 {
634         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
635         struct fsl_edma_desc *fsl_desc;
636         struct scatterlist *sg;
637         u32 src_addr, dst_addr, last_sg, nbytes;
638         u16 soff, doff, iter;
639         int i;
640
641         if (!is_slave_direction(direction))
642                 return NULL;
643
644         if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
645                 return NULL;
646
647         fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
648         if (!fsl_desc)
649                 return NULL;
650         fsl_desc->iscyclic = false;
651         fsl_desc->dirn = direction;
652
653         if (direction == DMA_MEM_TO_DEV) {
654                 fsl_chan->attr =
655                         fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
656                 nbytes = fsl_chan->cfg.dst_addr_width *
657                         fsl_chan->cfg.dst_maxburst;
658         } else {
659                 fsl_chan->attr =
660                         fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
661                 nbytes = fsl_chan->cfg.src_addr_width *
662                         fsl_chan->cfg.src_maxburst;
663         }
664
665         for_each_sg(sgl, sg, sg_len, i) {
666                 if (direction == DMA_MEM_TO_DEV) {
667                         src_addr = sg_dma_address(sg);
668                         dst_addr = fsl_chan->dma_dev_addr;
669                         soff = fsl_chan->cfg.dst_addr_width;
670                         doff = 0;
671                 } else if (direction == DMA_DEV_TO_MEM) {
672                         src_addr = fsl_chan->dma_dev_addr;
673                         dst_addr = sg_dma_address(sg);
674                         soff = 0;
675                         doff = fsl_chan->cfg.src_addr_width;
676                 } else {
677                         /* DMA_DEV_TO_DEV */
678                         src_addr = fsl_chan->cfg.src_addr;
679                         dst_addr = fsl_chan->cfg.dst_addr;
680                         soff = 0;
681                         doff = 0;
682                 }
683
684                 /*
685                  * Choose the suitable burst length if sg_dma_len is not
686                  * multiple of burst length so that the whole transfer length is
687                  * multiple of minor loop(burst length).
688                  */
689                 if (sg_dma_len(sg) % nbytes) {
690                         u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
691                         u32 burst = (direction == DMA_DEV_TO_MEM) ?
692                                                 fsl_chan->cfg.src_maxburst :
693                                                 fsl_chan->cfg.dst_maxburst;
694                         int j;
695
696                         for (j = burst; j > 1; j--) {
697                                 if (!(sg_dma_len(sg) % (j * width))) {
698                                         nbytes = j * width;
699                                         break;
700                                 }
701                         }
702                         /* Set burst size as 1 if there's no suitable one */
703                         if (j == 1)
704                                 nbytes = width;
705                 }
706                 iter = sg_dma_len(sg) / nbytes;
707                 if (i < sg_len - 1) {
708                         last_sg = fsl_desc->tcd[(i + 1)].ptcd;
709                         fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
710                                           dst_addr, fsl_chan->attr, soff,
711                                           nbytes, 0, iter, iter, doff, last_sg,
712                                           false, false, true);
713                 } else {
714                         last_sg = 0;
715                         fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
716                                           dst_addr, fsl_chan->attr, soff,
717                                           nbytes, 0, iter, iter, doff, last_sg,
718                                           true, true, false);
719                 }
720         }
721
722         return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
723 }
724
725 struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
726                                                      dma_addr_t dma_dst, dma_addr_t dma_src,
727                                                      size_t len, unsigned long flags)
728 {
729         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
730         struct fsl_edma_desc *fsl_desc;
731
732         fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
733         if (!fsl_desc)
734                 return NULL;
735         fsl_desc->iscyclic = false;
736
737         fsl_chan->is_sw = true;
738
739         /* To match with copy_align and max_seg_size so 1 tcd is enough */
740         fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
741                         fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
742                         32, len, 0, 1, 1, 32, 0, true, true, false);
743
744         return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
745 }
746
747 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
748 {
749         struct virt_dma_desc *vdesc;
750
751         lockdep_assert_held(&fsl_chan->vchan.lock);
752
753         vdesc = vchan_next_desc(&fsl_chan->vchan);
754         if (!vdesc)
755                 return;
756         fsl_chan->edesc = to_fsl_edma_desc(vdesc);
757         fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
758         fsl_edma_enable_request(fsl_chan);
759         fsl_chan->status = DMA_IN_PROGRESS;
760         fsl_chan->idle = false;
761 }
762
763 void fsl_edma_issue_pending(struct dma_chan *chan)
764 {
765         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
766         unsigned long flags;
767
768         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
769
770         if (unlikely(fsl_chan->pm_state != RUNNING)) {
771                 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
772                 /* cannot submit due to suspend */
773                 return;
774         }
775
776         if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
777                 fsl_edma_xfer_desc(fsl_chan);
778
779         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
780 }
781
782 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
783 {
784         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
785
786         fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
787                                 sizeof(struct fsl_edma_hw_tcd),
788                                 32, 0);
789         return 0;
790 }
791
792 void fsl_edma_free_chan_resources(struct dma_chan *chan)
793 {
794         struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
795         struct fsl_edma_engine *edma = fsl_chan->edma;
796         unsigned long flags;
797         LIST_HEAD(head);
798
799         spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
800         fsl_edma_disable_request(fsl_chan);
801         if (edma->drvdata->dmamuxs)
802                 fsl_edma_chan_mux(fsl_chan, 0, false);
803         fsl_chan->edesc = NULL;
804         vchan_get_all_descriptors(&fsl_chan->vchan, &head);
805         fsl_edma_unprep_slave_dma(fsl_chan);
806         spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
807
808         vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
809         dma_pool_destroy(fsl_chan->tcd_pool);
810         fsl_chan->tcd_pool = NULL;
811         fsl_chan->is_sw = false;
812 }
813
814 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
815 {
816         struct fsl_edma_chan *chan, *_chan;
817
818         list_for_each_entry_safe(chan, _chan,
819                                 &dmadev->channels, vchan.chan.device_node) {
820                 list_del(&chan->vchan.chan.device_node);
821                 tasklet_kill(&chan->vchan.task);
822         }
823 }
824
825 /*
826  * On the 32 channels Vybrid/mpc577x edma version, register offsets are
827  * different compared to ColdFire mcf5441x 64 channels edma.
828  *
829  * This function sets up register offsets as per proper declared version
830  * so must be called in xxx_edma_probe() just after setting the
831  * edma "version" and "membase" appropriately.
832  */
833 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
834 {
835         bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
836
837         edma->regs.cr = edma->membase + EDMA_CR;
838         edma->regs.es = edma->membase + EDMA_ES;
839         edma->regs.erql = edma->membase + EDMA_ERQ;
840         edma->regs.eeil = edma->membase + EDMA_EEI;
841
842         edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
843         edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
844         edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
845         edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
846         edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
847         edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
848         edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
849         edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
850         edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
851         edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
852
853         if (is64) {
854                 edma->regs.erqh = edma->membase + EDMA64_ERQH;
855                 edma->regs.eeih = edma->membase + EDMA64_EEIH;
856                 edma->regs.errh = edma->membase + EDMA64_ERRH;
857                 edma->regs.inth = edma->membase + EDMA64_INTH;
858         }
859 }
860
861 MODULE_LICENSE("GPL v2");