dmaengine: edma: Rename set_bits and remove unused clear_bits helper
[sfrench/cifs-2.6.git] / drivers / dma / edma.c
1 /*
2  * TI EDMA DMA engine driver
3  *
4  * Copyright 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/of.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
33
34 #include <linux/platform_data/edma.h>
35
36 #include "dmaengine.h"
37 #include "virt-dma.h"
38
39 /* Offsets matching "struct edmacc_param" */
40 #define PARM_OPT                0x00
41 #define PARM_SRC                0x04
42 #define PARM_A_B_CNT            0x08
43 #define PARM_DST                0x0c
44 #define PARM_SRC_DST_BIDX       0x10
45 #define PARM_LINK_BCNTRLD       0x14
46 #define PARM_SRC_DST_CIDX       0x18
47 #define PARM_CCNT               0x1c
48
49 #define PARM_SIZE               0x20
50
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER                   0x00    /* 64 bits */
53 #define SH_ECR                  0x08    /* 64 bits */
54 #define SH_ESR                  0x10    /* 64 bits */
55 #define SH_CER                  0x18    /* 64 bits */
56 #define SH_EER                  0x20    /* 64 bits */
57 #define SH_EECR                 0x28    /* 64 bits */
58 #define SH_EESR                 0x30    /* 64 bits */
59 #define SH_SER                  0x38    /* 64 bits */
60 #define SH_SECR                 0x40    /* 64 bits */
61 #define SH_IER                  0x50    /* 64 bits */
62 #define SH_IECR                 0x58    /* 64 bits */
63 #define SH_IESR                 0x60    /* 64 bits */
64 #define SH_IPR                  0x68    /* 64 bits */
65 #define SH_ICR                  0x70    /* 64 bits */
66 #define SH_IEVAL                0x78
67 #define SH_QER                  0x80
68 #define SH_QEER                 0x84
69 #define SH_QEECR                0x88
70 #define SH_QEESR                0x8c
71 #define SH_QSER                 0x90
72 #define SH_QSECR                0x94
73 #define SH_SIZE                 0x200
74
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV                0x0000
77 #define EDMA_CCCFG              0x0004
78 #define EDMA_QCHMAP             0x0200  /* 8 registers */
79 #define EDMA_DMAQNUM            0x0240  /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM           0x0260
81 #define EDMA_QUETCMAP           0x0280
82 #define EDMA_QUEPRI             0x0284
83 #define EDMA_EMR                0x0300  /* 64 bits */
84 #define EDMA_EMCR               0x0308  /* 64 bits */
85 #define EDMA_QEMR               0x0310
86 #define EDMA_QEMCR              0x0314
87 #define EDMA_CCERR              0x0318
88 #define EDMA_CCERRCLR           0x031c
89 #define EDMA_EEVAL              0x0320
90 #define EDMA_DRAE               0x0340  /* 4 x 64 bits*/
91 #define EDMA_QRAE               0x0380  /* 4 registers */
92 #define EDMA_QUEEVTENTRY        0x0400  /* 2 x 16 registers */
93 #define EDMA_QSTAT              0x0600  /* 2 registers */
94 #define EDMA_QWMTHRA            0x0620
95 #define EDMA_QWMTHRB            0x0624
96 #define EDMA_CCSTAT             0x0640
97
98 #define EDMA_M                  0x1000  /* global channel registers */
99 #define EDMA_ECR                0x1008
100 #define EDMA_ECRH               0x100C
101 #define EDMA_SHADOW0            0x2000  /* 4 shadow regions */
102 #define EDMA_PARM               0x4000  /* PaRAM entries */
103
104 #define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
105
106 #define EDMA_DCHMAP             0x0100  /* 64 registers */
107
108 /* CCCFG register */
109 #define GET_NUM_DMACH(x)        (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x)       ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x)      ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x)        ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x)         ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST             BIT(24)
115
116 /* CCSTAT register */
117 #define EDMA_CCSTAT_ACTV        BIT(4)
118
119 /*
120  * Max of 20 segments per channel to conserve PaRAM slots
121  * Also note that MAX_NR_SG should be atleast the no.of periods
122  * that are required for ASoC, otherwise DMA prep calls will
123  * fail. Today davinci-pcm is the only user of this driver and
124  * requires atleast 17 slots, so we setup the default to 20.
125  */
126 #define MAX_NR_SG               20
127 #define EDMA_MAX_SLOTS          MAX_NR_SG
128 #define EDMA_DESCRIPTORS        16
129
130 #define EDMA_CHANNEL_ANY                -1      /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY                   -1      /* for edma_alloc_slot() */
132 #define EDMA_CONT_PARAMS_ANY             1001
133 #define EDMA_CONT_PARAMS_FIXED_EXACT     1002
134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
135
136 /* PaRAM slots are laid out like this */
137 struct edmacc_param {
138         u32 opt;
139         u32 src;
140         u32 a_b_cnt;
141         u32 dst;
142         u32 src_dst_bidx;
143         u32 link_bcntrld;
144         u32 src_dst_cidx;
145         u32 ccnt;
146 } __packed;
147
148 /* fields in edmacc_param.opt */
149 #define SAM             BIT(0)
150 #define DAM             BIT(1)
151 #define SYNCDIM         BIT(2)
152 #define STATIC          BIT(3)
153 #define EDMA_FWID       (0x07 << 8)
154 #define TCCMODE         BIT(11)
155 #define EDMA_TCC(t)     ((t) << 12)
156 #define TCINTEN         BIT(20)
157 #define ITCINTEN        BIT(21)
158 #define TCCHEN          BIT(22)
159 #define ITCCHEN         BIT(23)
160
161 struct edma_pset {
162         u32                             len;
163         dma_addr_t                      addr;
164         struct edmacc_param             param;
165 };
166
167 struct edma_desc {
168         struct virt_dma_desc            vdesc;
169         struct list_head                node;
170         enum dma_transfer_direction     direction;
171         int                             cyclic;
172         int                             absync;
173         int                             pset_nr;
174         struct edma_chan                *echan;
175         int                             processed;
176
177         /*
178          * The following 4 elements are used for residue accounting.
179          *
180          * - processed_stat: the number of SG elements we have traversed
181          * so far to cover accounting. This is updated directly to processed
182          * during edma_callback and is always <= processed, because processed
183          * refers to the number of pending transfer (programmed to EDMA
184          * controller), where as processed_stat tracks number of transfers
185          * accounted for so far.
186          *
187          * - residue: The amount of bytes we have left to transfer for this desc
188          *
189          * - residue_stat: The residue in bytes of data we have covered
190          * so far for accounting. This is updated directly to residue
191          * during callbacks to keep it current.
192          *
193          * - sg_len: Tracks the length of the current intermediate transfer,
194          * this is required to update the residue during intermediate transfer
195          * completion callback.
196          */
197         int                             processed_stat;
198         u32                             sg_len;
199         u32                             residue;
200         u32                             residue_stat;
201
202         struct edma_pset                pset[0];
203 };
204
205 struct edma_cc;
206
207 struct edma_tc {
208         struct device_node              *node;
209         u16                             id;
210 };
211
212 struct edma_chan {
213         struct virt_dma_chan            vchan;
214         struct list_head                node;
215         struct edma_desc                *edesc;
216         struct edma_cc                  *ecc;
217         struct edma_tc                  *tc;
218         int                             ch_num;
219         bool                            alloced;
220         bool                            hw_triggered;
221         int                             slot[EDMA_MAX_SLOTS];
222         int                             missed;
223         struct dma_slave_config         cfg;
224 };
225
226 struct edma_cc {
227         struct device                   *dev;
228         struct edma_soc_info            *info;
229         void __iomem                    *base;
230         int                             id;
231         bool                            legacy_mode;
232
233         /* eDMA3 resource information */
234         unsigned                        num_channels;
235         unsigned                        num_qchannels;
236         unsigned                        num_region;
237         unsigned                        num_slots;
238         unsigned                        num_tc;
239         bool                            chmap_exist;
240         enum dma_event_q                default_queue;
241
242         unsigned int                    ccint;
243         unsigned int                    ccerrint;
244
245         /*
246          * The slot_inuse bit for each PaRAM slot is clear unless the slot is
247          * in use by Linux or if it is allocated to be used by DSP.
248          */
249         unsigned long *slot_inuse;
250
251         struct dma_device               dma_slave;
252         struct dma_device               *dma_memcpy;
253         struct edma_chan                *slave_chans;
254         struct edma_tc                  *tc_list;
255         int                             dummy_slot;
256 };
257
258 /* dummy param set used to (re)initialize parameter RAM slots */
259 static const struct edmacc_param dummy_paramset = {
260         .link_bcntrld = 0xffff,
261         .ccnt = 1,
262 };
263
264 #define EDMA_BINDING_LEGACY     0
265 #define EDMA_BINDING_TPCC       1
266 static const u32 edma_binding_type[] = {
267         [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
268         [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
269 };
270
271 static const struct of_device_id edma_of_ids[] = {
272         {
273                 .compatible = "ti,edma3",
274                 .data = &edma_binding_type[EDMA_BINDING_LEGACY],
275         },
276         {
277                 .compatible = "ti,edma3-tpcc",
278                 .data = &edma_binding_type[EDMA_BINDING_TPCC],
279         },
280         {}
281 };
282 MODULE_DEVICE_TABLE(of, edma_of_ids);
283
284 static const struct of_device_id edma_tptc_of_ids[] = {
285         { .compatible = "ti,edma3-tptc", },
286         {}
287 };
288 MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
289
290 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
291 {
292         return (unsigned int)__raw_readl(ecc->base + offset);
293 }
294
295 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
296 {
297         __raw_writel(val, ecc->base + offset);
298 }
299
300 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
301                                unsigned or)
302 {
303         unsigned val = edma_read(ecc, offset);
304
305         val &= and;
306         val |= or;
307         edma_write(ecc, offset, val);
308 }
309
310 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
311 {
312         unsigned val = edma_read(ecc, offset);
313
314         val &= and;
315         edma_write(ecc, offset, val);
316 }
317
318 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
319 {
320         unsigned val = edma_read(ecc, offset);
321
322         val |= or;
323         edma_write(ecc, offset, val);
324 }
325
326 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
327                                            int i)
328 {
329         return edma_read(ecc, offset + (i << 2));
330 }
331
332 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
333                                     unsigned val)
334 {
335         edma_write(ecc, offset + (i << 2), val);
336 }
337
338 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
339                                      unsigned and, unsigned or)
340 {
341         edma_modify(ecc, offset + (i << 2), and, or);
342 }
343
344 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
345                                  unsigned or)
346 {
347         edma_or(ecc, offset + (i << 2), or);
348 }
349
350 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
351                                   unsigned or)
352 {
353         edma_or(ecc, offset + ((i * 2 + j) << 2), or);
354 }
355
356 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
357                                      int j, unsigned val)
358 {
359         edma_write(ecc, offset + ((i * 2 + j) << 2), val);
360 }
361
362 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
363 {
364         return edma_read(ecc, EDMA_SHADOW0 + offset);
365 }
366
367 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
368                                                    int offset, int i)
369 {
370         return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
371 }
372
373 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
374                                       unsigned val)
375 {
376         edma_write(ecc, EDMA_SHADOW0 + offset, val);
377 }
378
379 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
380                                             int i, unsigned val)
381 {
382         edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
383 }
384
385 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
386                                            int param_no)
387 {
388         return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
389 }
390
391 static inline void edma_param_write(struct edma_cc *ecc, int offset,
392                                     int param_no, unsigned val)
393 {
394         edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
395 }
396
397 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
398                                      int param_no, unsigned and, unsigned or)
399 {
400         edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
401 }
402
403 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
404                                   unsigned and)
405 {
406         edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
407 }
408
409 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
410                                  unsigned or)
411 {
412         edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
413 }
414
415 static inline void edma_set_bits(int offset, int len, unsigned long *p)
416 {
417         for (; len > 0; len--)
418                 set_bit(offset + (len - 1), p);
419 }
420
421 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
422                                           int priority)
423 {
424         int bit = queue_no * 4;
425
426         edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
427 }
428
429 static void edma_set_chmap(struct edma_chan *echan, int slot)
430 {
431         struct edma_cc *ecc = echan->ecc;
432         int channel = EDMA_CHAN_SLOT(echan->ch_num);
433
434         if (ecc->chmap_exist) {
435                 slot = EDMA_CHAN_SLOT(slot);
436                 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
437         }
438 }
439
440 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
441 {
442         struct edma_cc *ecc = echan->ecc;
443         int channel = EDMA_CHAN_SLOT(echan->ch_num);
444
445         if (enable) {
446                 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
447                                          BIT(channel & 0x1f));
448                 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
449                                          BIT(channel & 0x1f));
450         } else {
451                 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
452                                          BIT(channel & 0x1f));
453         }
454 }
455
456 /*
457  * paRAM slot management functions
458  */
459 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
460                             const struct edmacc_param *param)
461 {
462         slot = EDMA_CHAN_SLOT(slot);
463         if (slot >= ecc->num_slots)
464                 return;
465         memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
466 }
467
468 static void edma_read_slot(struct edma_cc *ecc, unsigned slot,
469                            struct edmacc_param *param)
470 {
471         slot = EDMA_CHAN_SLOT(slot);
472         if (slot >= ecc->num_slots)
473                 return;
474         memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
475 }
476
477 /**
478  * edma_alloc_slot - allocate DMA parameter RAM
479  * @ecc: pointer to edma_cc struct
480  * @slot: specific slot to allocate; negative for "any unused slot"
481  *
482  * This allocates a parameter RAM slot, initializing it to hold a
483  * dummy transfer.  Slots allocated using this routine have not been
484  * mapped to a hardware DMA channel, and will normally be used by
485  * linking to them from a slot associated with a DMA channel.
486  *
487  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
488  * slots may be allocated on behalf of DSP firmware.
489  *
490  * Returns the number of the slot, else negative errno.
491  */
492 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
493 {
494         if (slot >= 0) {
495                 slot = EDMA_CHAN_SLOT(slot);
496                 /* Requesting entry paRAM slot for a HW triggered channel. */
497                 if (ecc->chmap_exist && slot < ecc->num_channels)
498                         slot = EDMA_SLOT_ANY;
499         }
500
501         if (slot < 0) {
502                 if (ecc->chmap_exist)
503                         slot = 0;
504                 else
505                         slot = ecc->num_channels;
506                 for (;;) {
507                         slot = find_next_zero_bit(ecc->slot_inuse,
508                                                   ecc->num_slots,
509                                                   slot);
510                         if (slot == ecc->num_slots)
511                                 return -ENOMEM;
512                         if (!test_and_set_bit(slot, ecc->slot_inuse))
513                                 break;
514                 }
515         } else if (slot >= ecc->num_slots) {
516                 return -EINVAL;
517         } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
518                 return -EBUSY;
519         }
520
521         edma_write_slot(ecc, slot, &dummy_paramset);
522
523         return EDMA_CTLR_CHAN(ecc->id, slot);
524 }
525
526 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
527 {
528         slot = EDMA_CHAN_SLOT(slot);
529         if (slot >= ecc->num_slots)
530                 return;
531
532         edma_write_slot(ecc, slot, &dummy_paramset);
533         clear_bit(slot, ecc->slot_inuse);
534 }
535
536 /**
537  * edma_link - link one parameter RAM slot to another
538  * @ecc: pointer to edma_cc struct
539  * @from: parameter RAM slot originating the link
540  * @to: parameter RAM slot which is the link target
541  *
542  * The originating slot should not be part of any active DMA transfer.
543  */
544 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
545 {
546         if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
547                 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
548
549         from = EDMA_CHAN_SLOT(from);
550         to = EDMA_CHAN_SLOT(to);
551         if (from >= ecc->num_slots || to >= ecc->num_slots)
552                 return;
553
554         edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
555                           PARM_OFFSET(to));
556 }
557
558 /**
559  * edma_get_position - returns the current transfer point
560  * @ecc: pointer to edma_cc struct
561  * @slot: parameter RAM slot being examined
562  * @dst:  true selects the dest position, false the source
563  *
564  * Returns the position of the current active slot
565  */
566 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
567                                     bool dst)
568 {
569         u32 offs;
570
571         slot = EDMA_CHAN_SLOT(slot);
572         offs = PARM_OFFSET(slot);
573         offs += dst ? PARM_DST : PARM_SRC;
574
575         return edma_read(ecc, offs);
576 }
577
578 /*
579  * Channels with event associations will be triggered by their hardware
580  * events, and channels without such associations will be triggered by
581  * software.  (At this writing there is no interface for using software
582  * triggers except with channels that don't support hardware triggers.)
583  */
584 static void edma_start(struct edma_chan *echan)
585 {
586         struct edma_cc *ecc = echan->ecc;
587         int channel = EDMA_CHAN_SLOT(echan->ch_num);
588         int j = (channel >> 5);
589         unsigned int mask = BIT(channel & 0x1f);
590
591         if (!echan->hw_triggered) {
592                 /* EDMA channels without event association */
593                 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
594                         edma_shadow0_read_array(ecc, SH_ESR, j));
595                 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
596         } else {
597                 /* EDMA channel with event association */
598                 dev_dbg(ecc->dev, "ER%d %08x\n", j,
599                         edma_shadow0_read_array(ecc, SH_ER, j));
600                 /* Clear any pending event or error */
601                 edma_write_array(ecc, EDMA_ECR, j, mask);
602                 edma_write_array(ecc, EDMA_EMCR, j, mask);
603                 /* Clear any SER */
604                 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
605                 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
606                 dev_dbg(ecc->dev, "EER%d %08x\n", j,
607                         edma_shadow0_read_array(ecc, SH_EER, j));
608         }
609 }
610
611 static void edma_stop(struct edma_chan *echan)
612 {
613         struct edma_cc *ecc = echan->ecc;
614         int channel = EDMA_CHAN_SLOT(echan->ch_num);
615         int j = (channel >> 5);
616         unsigned int mask = BIT(channel & 0x1f);
617
618         edma_shadow0_write_array(ecc, SH_EECR, j, mask);
619         edma_shadow0_write_array(ecc, SH_ECR, j, mask);
620         edma_shadow0_write_array(ecc, SH_SECR, j, mask);
621         edma_write_array(ecc, EDMA_EMCR, j, mask);
622
623         /* clear possibly pending completion interrupt */
624         edma_shadow0_write_array(ecc, SH_ICR, j, mask);
625
626         dev_dbg(ecc->dev, "EER%d %08x\n", j,
627                 edma_shadow0_read_array(ecc, SH_EER, j));
628
629         /* REVISIT:  consider guarding against inappropriate event
630          * chaining by overwriting with dummy_paramset.
631          */
632 }
633
634 /*
635  * Temporarily disable EDMA hardware events on the specified channel,
636  * preventing them from triggering new transfers
637  */
638 static void edma_pause(struct edma_chan *echan)
639 {
640         int channel = EDMA_CHAN_SLOT(echan->ch_num);
641         unsigned int mask = BIT(channel & 0x1f);
642
643         edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
644 }
645
646 /* Re-enable EDMA hardware events on the specified channel.  */
647 static void edma_resume(struct edma_chan *echan)
648 {
649         int channel = EDMA_CHAN_SLOT(echan->ch_num);
650         unsigned int mask = BIT(channel & 0x1f);
651
652         edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
653 }
654
655 static void edma_trigger_channel(struct edma_chan *echan)
656 {
657         struct edma_cc *ecc = echan->ecc;
658         int channel = EDMA_CHAN_SLOT(echan->ch_num);
659         unsigned int mask = BIT(channel & 0x1f);
660
661         edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
662
663         dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
664                 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
665 }
666
667 static void edma_clean_channel(struct edma_chan *echan)
668 {
669         struct edma_cc *ecc = echan->ecc;
670         int channel = EDMA_CHAN_SLOT(echan->ch_num);
671         int j = (channel >> 5);
672         unsigned int mask = BIT(channel & 0x1f);
673
674         dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
675         edma_shadow0_write_array(ecc, SH_ECR, j, mask);
676         /* Clear the corresponding EMR bits */
677         edma_write_array(ecc, EDMA_EMCR, j, mask);
678         /* Clear any SER */
679         edma_shadow0_write_array(ecc, SH_SECR, j, mask);
680         edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
681 }
682
683 /* Move channel to a specific event queue */
684 static void edma_assign_channel_eventq(struct edma_chan *echan,
685                                        enum dma_event_q eventq_no)
686 {
687         struct edma_cc *ecc = echan->ecc;
688         int channel = EDMA_CHAN_SLOT(echan->ch_num);
689         int bit = (channel & 0x7) * 4;
690
691         /* default to low priority queue */
692         if (eventq_no == EVENTQ_DEFAULT)
693                 eventq_no = ecc->default_queue;
694         if (eventq_no >= ecc->num_tc)
695                 return;
696
697         eventq_no &= 7;
698         edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
699                           eventq_no << bit);
700 }
701
702 static int edma_alloc_channel(struct edma_chan *echan,
703                               enum dma_event_q eventq_no)
704 {
705         struct edma_cc *ecc = echan->ecc;
706         int channel = EDMA_CHAN_SLOT(echan->ch_num);
707
708         /* ensure access through shadow region 0 */
709         edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
710
711         /* ensure no events are pending */
712         edma_stop(echan);
713
714         edma_setup_interrupt(echan, true);
715
716         edma_assign_channel_eventq(echan, eventq_no);
717
718         return 0;
719 }
720
721 static void edma_free_channel(struct edma_chan *echan)
722 {
723         /* ensure no events are pending */
724         edma_stop(echan);
725         /* REVISIT should probably take out of shadow region 0 */
726         edma_setup_interrupt(echan, false);
727 }
728
729 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
730 {
731         return container_of(d, struct edma_cc, dma_slave);
732 }
733
734 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
735 {
736         return container_of(c, struct edma_chan, vchan.chan);
737 }
738
739 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
740 {
741         return container_of(tx, struct edma_desc, vdesc.tx);
742 }
743
744 static void edma_desc_free(struct virt_dma_desc *vdesc)
745 {
746         kfree(container_of(vdesc, struct edma_desc, vdesc));
747 }
748
749 /* Dispatch a queued descriptor to the controller (caller holds lock) */
750 static void edma_execute(struct edma_chan *echan)
751 {
752         struct edma_cc *ecc = echan->ecc;
753         struct virt_dma_desc *vdesc;
754         struct edma_desc *edesc;
755         struct device *dev = echan->vchan.chan.device->dev;
756         int i, j, left, nslots;
757
758         if (!echan->edesc) {
759                 /* Setup is needed for the first transfer */
760                 vdesc = vchan_next_desc(&echan->vchan);
761                 if (!vdesc)
762                         return;
763                 list_del(&vdesc->node);
764                 echan->edesc = to_edma_desc(&vdesc->tx);
765         }
766
767         edesc = echan->edesc;
768
769         /* Find out how many left */
770         left = edesc->pset_nr - edesc->processed;
771         nslots = min(MAX_NR_SG, left);
772         edesc->sg_len = 0;
773
774         /* Write descriptor PaRAM set(s) */
775         for (i = 0; i < nslots; i++) {
776                 j = i + edesc->processed;
777                 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
778                 edesc->sg_len += edesc->pset[j].len;
779                 dev_vdbg(dev,
780                          "\n pset[%d]:\n"
781                          "  chnum\t%d\n"
782                          "  slot\t%d\n"
783                          "  opt\t%08x\n"
784                          "  src\t%08x\n"
785                          "  dst\t%08x\n"
786                          "  abcnt\t%08x\n"
787                          "  ccnt\t%08x\n"
788                          "  bidx\t%08x\n"
789                          "  cidx\t%08x\n"
790                          "  lkrld\t%08x\n",
791                          j, echan->ch_num, echan->slot[i],
792                          edesc->pset[j].param.opt,
793                          edesc->pset[j].param.src,
794                          edesc->pset[j].param.dst,
795                          edesc->pset[j].param.a_b_cnt,
796                          edesc->pset[j].param.ccnt,
797                          edesc->pset[j].param.src_dst_bidx,
798                          edesc->pset[j].param.src_dst_cidx,
799                          edesc->pset[j].param.link_bcntrld);
800                 /* Link to the previous slot if not the last set */
801                 if (i != (nslots - 1))
802                         edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
803         }
804
805         edesc->processed += nslots;
806
807         /*
808          * If this is either the last set in a set of SG-list transactions
809          * then setup a link to the dummy slot, this results in all future
810          * events being absorbed and that's OK because we're done
811          */
812         if (edesc->processed == edesc->pset_nr) {
813                 if (edesc->cyclic)
814                         edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
815                 else
816                         edma_link(ecc, echan->slot[nslots - 1],
817                                   echan->ecc->dummy_slot);
818         }
819
820         if (echan->missed) {
821                 /*
822                  * This happens due to setup times between intermediate
823                  * transfers in long SG lists which have to be broken up into
824                  * transfers of MAX_NR_SG
825                  */
826                 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
827                 edma_clean_channel(echan);
828                 edma_stop(echan);
829                 edma_start(echan);
830                 edma_trigger_channel(echan);
831                 echan->missed = 0;
832         } else if (edesc->processed <= MAX_NR_SG) {
833                 dev_dbg(dev, "first transfer starting on channel %d\n",
834                         echan->ch_num);
835                 edma_start(echan);
836         } else {
837                 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
838                         echan->ch_num, edesc->processed);
839                 edma_resume(echan);
840         }
841 }
842
843 static int edma_terminate_all(struct dma_chan *chan)
844 {
845         struct edma_chan *echan = to_edma_chan(chan);
846         unsigned long flags;
847         LIST_HEAD(head);
848
849         spin_lock_irqsave(&echan->vchan.lock, flags);
850
851         /*
852          * Stop DMA activity: we assume the callback will not be called
853          * after edma_dma() returns (even if it does, it will see
854          * echan->edesc is NULL and exit.)
855          */
856         if (echan->edesc) {
857                 edma_stop(echan);
858                 /* Move the cyclic channel back to default queue */
859                 if (!echan->tc && echan->edesc->cyclic)
860                         edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
861                 /*
862                  * free the running request descriptor
863                  * since it is not in any of the vdesc lists
864                  */
865                 edma_desc_free(&echan->edesc->vdesc);
866                 echan->edesc = NULL;
867         }
868
869         vchan_get_all_descriptors(&echan->vchan, &head);
870         spin_unlock_irqrestore(&echan->vchan.lock, flags);
871         vchan_dma_desc_free_list(&echan->vchan, &head);
872
873         return 0;
874 }
875
876 static void edma_synchronize(struct dma_chan *chan)
877 {
878         struct edma_chan *echan = to_edma_chan(chan);
879
880         vchan_synchronize(&echan->vchan);
881 }
882
883 static int edma_slave_config(struct dma_chan *chan,
884         struct dma_slave_config *cfg)
885 {
886         struct edma_chan *echan = to_edma_chan(chan);
887
888         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
889             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
890                 return -EINVAL;
891
892         memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
893
894         return 0;
895 }
896
897 static int edma_dma_pause(struct dma_chan *chan)
898 {
899         struct edma_chan *echan = to_edma_chan(chan);
900
901         if (!echan->edesc)
902                 return -EINVAL;
903
904         edma_pause(echan);
905         return 0;
906 }
907
908 static int edma_dma_resume(struct dma_chan *chan)
909 {
910         struct edma_chan *echan = to_edma_chan(chan);
911
912         edma_resume(echan);
913         return 0;
914 }
915
916 /*
917  * A PaRAM set configuration abstraction used by other modes
918  * @chan: Channel who's PaRAM set we're configuring
919  * @pset: PaRAM set to initialize and setup.
920  * @src_addr: Source address of the DMA
921  * @dst_addr: Destination address of the DMA
922  * @burst: In units of dev_width, how much to send
923  * @dev_width: How much is the dev_width
924  * @dma_length: Total length of the DMA transfer
925  * @direction: Direction of the transfer
926  */
927 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
928                             dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
929                             unsigned int acnt, unsigned int dma_length,
930                             enum dma_transfer_direction direction)
931 {
932         struct edma_chan *echan = to_edma_chan(chan);
933         struct device *dev = chan->device->dev;
934         struct edmacc_param *param = &epset->param;
935         int bcnt, ccnt, cidx;
936         int src_bidx, dst_bidx, src_cidx, dst_cidx;
937         int absync;
938
939         /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
940         if (!burst)
941                 burst = 1;
942         /*
943          * If the maxburst is equal to the fifo width, use
944          * A-synced transfers. This allows for large contiguous
945          * buffer transfers using only one PaRAM set.
946          */
947         if (burst == 1) {
948                 /*
949                  * For the A-sync case, bcnt and ccnt are the remainder
950                  * and quotient respectively of the division of:
951                  * (dma_length / acnt) by (SZ_64K -1). This is so
952                  * that in case bcnt over flows, we have ccnt to use.
953                  * Note: In A-sync tranfer only, bcntrld is used, but it
954                  * only applies for sg_dma_len(sg) >= SZ_64K.
955                  * In this case, the best way adopted is- bccnt for the
956                  * first frame will be the remainder below. Then for
957                  * every successive frame, bcnt will be SZ_64K-1. This
958                  * is assured as bcntrld = 0xffff in end of function.
959                  */
960                 absync = false;
961                 ccnt = dma_length / acnt / (SZ_64K - 1);
962                 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
963                 /*
964                  * If bcnt is non-zero, we have a remainder and hence an
965                  * extra frame to transfer, so increment ccnt.
966                  */
967                 if (bcnt)
968                         ccnt++;
969                 else
970                         bcnt = SZ_64K - 1;
971                 cidx = acnt;
972         } else {
973                 /*
974                  * If maxburst is greater than the fifo address_width,
975                  * use AB-synced transfers where A count is the fifo
976                  * address_width and B count is the maxburst. In this
977                  * case, we are limited to transfers of C count frames
978                  * of (address_width * maxburst) where C count is limited
979                  * to SZ_64K-1. This places an upper bound on the length
980                  * of an SG segment that can be handled.
981                  */
982                 absync = true;
983                 bcnt = burst;
984                 ccnt = dma_length / (acnt * bcnt);
985                 if (ccnt > (SZ_64K - 1)) {
986                         dev_err(dev, "Exceeded max SG segment size\n");
987                         return -EINVAL;
988                 }
989                 cidx = acnt * bcnt;
990         }
991
992         epset->len = dma_length;
993
994         if (direction == DMA_MEM_TO_DEV) {
995                 src_bidx = acnt;
996                 src_cidx = cidx;
997                 dst_bidx = 0;
998                 dst_cidx = 0;
999                 epset->addr = src_addr;
1000         } else if (direction == DMA_DEV_TO_MEM)  {
1001                 src_bidx = 0;
1002                 src_cidx = 0;
1003                 dst_bidx = acnt;
1004                 dst_cidx = cidx;
1005                 epset->addr = dst_addr;
1006         } else if (direction == DMA_MEM_TO_MEM)  {
1007                 src_bidx = acnt;
1008                 src_cidx = cidx;
1009                 dst_bidx = acnt;
1010                 dst_cidx = cidx;
1011         } else {
1012                 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1013                 return -EINVAL;
1014         }
1015
1016         param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1017         /* Configure A or AB synchronized transfers */
1018         if (absync)
1019                 param->opt |= SYNCDIM;
1020
1021         param->src = src_addr;
1022         param->dst = dst_addr;
1023
1024         param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1025         param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1026
1027         param->a_b_cnt = bcnt << 16 | acnt;
1028         param->ccnt = ccnt;
1029         /*
1030          * Only time when (bcntrld) auto reload is required is for
1031          * A-sync case, and in this case, a requirement of reload value
1032          * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1033          * and then later will be populated by edma_execute.
1034          */
1035         param->link_bcntrld = 0xffffffff;
1036         return absync;
1037 }
1038
1039 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1040         struct dma_chan *chan, struct scatterlist *sgl,
1041         unsigned int sg_len, enum dma_transfer_direction direction,
1042         unsigned long tx_flags, void *context)
1043 {
1044         struct edma_chan *echan = to_edma_chan(chan);
1045         struct device *dev = chan->device->dev;
1046         struct edma_desc *edesc;
1047         dma_addr_t src_addr = 0, dst_addr = 0;
1048         enum dma_slave_buswidth dev_width;
1049         u32 burst;
1050         struct scatterlist *sg;
1051         int i, nslots, ret;
1052
1053         if (unlikely(!echan || !sgl || !sg_len))
1054                 return NULL;
1055
1056         if (direction == DMA_DEV_TO_MEM) {
1057                 src_addr = echan->cfg.src_addr;
1058                 dev_width = echan->cfg.src_addr_width;
1059                 burst = echan->cfg.src_maxburst;
1060         } else if (direction == DMA_MEM_TO_DEV) {
1061                 dst_addr = echan->cfg.dst_addr;
1062                 dev_width = echan->cfg.dst_addr_width;
1063                 burst = echan->cfg.dst_maxburst;
1064         } else {
1065                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1066                 return NULL;
1067         }
1068
1069         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1070                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1071                 return NULL;
1072         }
1073
1074         edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1075                         GFP_ATOMIC);
1076         if (!edesc)
1077                 return NULL;
1078
1079         edesc->pset_nr = sg_len;
1080         edesc->residue = 0;
1081         edesc->direction = direction;
1082         edesc->echan = echan;
1083
1084         /* Allocate a PaRAM slot, if needed */
1085         nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1086
1087         for (i = 0; i < nslots; i++) {
1088                 if (echan->slot[i] < 0) {
1089                         echan->slot[i] =
1090                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1091                         if (echan->slot[i] < 0) {
1092                                 kfree(edesc);
1093                                 dev_err(dev, "%s: Failed to allocate slot\n",
1094                                         __func__);
1095                                 return NULL;
1096                         }
1097                 }
1098         }
1099
1100         /* Configure PaRAM sets for each SG */
1101         for_each_sg(sgl, sg, sg_len, i) {
1102                 /* Get address for each SG */
1103                 if (direction == DMA_DEV_TO_MEM)
1104                         dst_addr = sg_dma_address(sg);
1105                 else
1106                         src_addr = sg_dma_address(sg);
1107
1108                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1109                                        dst_addr, burst, dev_width,
1110                                        sg_dma_len(sg), direction);
1111                 if (ret < 0) {
1112                         kfree(edesc);
1113                         return NULL;
1114                 }
1115
1116                 edesc->absync = ret;
1117                 edesc->residue += sg_dma_len(sg);
1118
1119                 if (i == sg_len - 1)
1120                         /* Enable completion interrupt */
1121                         edesc->pset[i].param.opt |= TCINTEN;
1122                 else if (!((i+1) % MAX_NR_SG))
1123                         /*
1124                          * Enable early completion interrupt for the
1125                          * intermediateset. In this case the driver will be
1126                          * notified when the paRAM set is submitted to TC. This
1127                          * will allow more time to set up the next set of slots.
1128                          */
1129                         edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
1130         }
1131         edesc->residue_stat = edesc->residue;
1132
1133         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1134 }
1135
1136 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1137         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1138         size_t len, unsigned long tx_flags)
1139 {
1140         int ret, nslots;
1141         struct edma_desc *edesc;
1142         struct device *dev = chan->device->dev;
1143         struct edma_chan *echan = to_edma_chan(chan);
1144         unsigned int width, pset_len;
1145
1146         if (unlikely(!echan || !len))
1147                 return NULL;
1148
1149         if (len < SZ_64K) {
1150                 /*
1151                  * Transfer size less than 64K can be handled with one paRAM
1152                  * slot and with one burst.
1153                  * ACNT = length
1154                  */
1155                 width = len;
1156                 pset_len = len;
1157                 nslots = 1;
1158         } else {
1159                 /*
1160                  * Transfer size bigger than 64K will be handled with maximum of
1161                  * two paRAM slots.
1162                  * slot1: (full_length / 32767) times 32767 bytes bursts.
1163                  *        ACNT = 32767, length1: (full_length / 32767) * 32767
1164                  * slot2: the remaining amount of data after slot1.
1165                  *        ACNT = full_length - length1, length2 = ACNT
1166                  *
1167                  * When the full_length is multibple of 32767 one slot can be
1168                  * used to complete the transfer.
1169                  */
1170                 width = SZ_32K - 1;
1171                 pset_len = rounddown(len, width);
1172                 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1173                 if (unlikely(pset_len == len))
1174                         nslots = 1;
1175                 else
1176                         nslots = 2;
1177         }
1178
1179         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1180                         GFP_ATOMIC);
1181         if (!edesc)
1182                 return NULL;
1183
1184         edesc->pset_nr = nslots;
1185         edesc->residue = edesc->residue_stat = len;
1186         edesc->direction = DMA_MEM_TO_MEM;
1187         edesc->echan = echan;
1188
1189         ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1190                                width, pset_len, DMA_MEM_TO_MEM);
1191         if (ret < 0) {
1192                 kfree(edesc);
1193                 return NULL;
1194         }
1195
1196         edesc->absync = ret;
1197
1198         edesc->pset[0].param.opt |= ITCCHEN;
1199         if (nslots == 1) {
1200                 /* Enable transfer complete interrupt */
1201                 edesc->pset[0].param.opt |= TCINTEN;
1202         } else {
1203                 /* Enable transfer complete chaining for the first slot */
1204                 edesc->pset[0].param.opt |= TCCHEN;
1205
1206                 if (echan->slot[1] < 0) {
1207                         echan->slot[1] = edma_alloc_slot(echan->ecc,
1208                                                          EDMA_SLOT_ANY);
1209                         if (echan->slot[1] < 0) {
1210                                 kfree(edesc);
1211                                 dev_err(dev, "%s: Failed to allocate slot\n",
1212                                         __func__);
1213                                 return NULL;
1214                         }
1215                 }
1216                 dest += pset_len;
1217                 src += pset_len;
1218                 pset_len = width = len % (SZ_32K - 1);
1219
1220                 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1221                                        width, pset_len, DMA_MEM_TO_MEM);
1222                 if (ret < 0) {
1223                         kfree(edesc);
1224                         return NULL;
1225                 }
1226
1227                 edesc->pset[1].param.opt |= ITCCHEN;
1228                 edesc->pset[1].param.opt |= TCINTEN;
1229         }
1230
1231         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1232 }
1233
1234 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1235         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1236         size_t period_len, enum dma_transfer_direction direction,
1237         unsigned long tx_flags)
1238 {
1239         struct edma_chan *echan = to_edma_chan(chan);
1240         struct device *dev = chan->device->dev;
1241         struct edma_desc *edesc;
1242         dma_addr_t src_addr, dst_addr;
1243         enum dma_slave_buswidth dev_width;
1244         bool use_intermediate = false;
1245         u32 burst;
1246         int i, ret, nslots;
1247
1248         if (unlikely(!echan || !buf_len || !period_len))
1249                 return NULL;
1250
1251         if (direction == DMA_DEV_TO_MEM) {
1252                 src_addr = echan->cfg.src_addr;
1253                 dst_addr = buf_addr;
1254                 dev_width = echan->cfg.src_addr_width;
1255                 burst = echan->cfg.src_maxburst;
1256         } else if (direction == DMA_MEM_TO_DEV) {
1257                 src_addr = buf_addr;
1258                 dst_addr = echan->cfg.dst_addr;
1259                 dev_width = echan->cfg.dst_addr_width;
1260                 burst = echan->cfg.dst_maxburst;
1261         } else {
1262                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1263                 return NULL;
1264         }
1265
1266         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1267                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1268                 return NULL;
1269         }
1270
1271         if (unlikely(buf_len % period_len)) {
1272                 dev_err(dev, "Period should be multiple of Buffer length\n");
1273                 return NULL;
1274         }
1275
1276         nslots = (buf_len / period_len) + 1;
1277
1278         /*
1279          * Cyclic DMA users such as audio cannot tolerate delays introduced
1280          * by cases where the number of periods is more than the maximum
1281          * number of SGs the EDMA driver can handle at a time. For DMA types
1282          * such as Slave SGs, such delays are tolerable and synchronized,
1283          * but the synchronization is difficult to achieve with Cyclic and
1284          * cannot be guaranteed, so we error out early.
1285          */
1286         if (nslots > MAX_NR_SG) {
1287                 /*
1288                  * If the burst and period sizes are the same, we can put
1289                  * the full buffer into a single period and activate
1290                  * intermediate interrupts. This will produce interrupts
1291                  * after each burst, which is also after each desired period.
1292                  */
1293                 if (burst == period_len) {
1294                         period_len = buf_len;
1295                         nslots = 2;
1296                         use_intermediate = true;
1297                 } else {
1298                         return NULL;
1299                 }
1300         }
1301
1302         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1303                         GFP_ATOMIC);
1304         if (!edesc)
1305                 return NULL;
1306
1307         edesc->cyclic = 1;
1308         edesc->pset_nr = nslots;
1309         edesc->residue = edesc->residue_stat = buf_len;
1310         edesc->direction = direction;
1311         edesc->echan = echan;
1312
1313         dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1314                 __func__, echan->ch_num, nslots, period_len, buf_len);
1315
1316         for (i = 0; i < nslots; i++) {
1317                 /* Allocate a PaRAM slot, if needed */
1318                 if (echan->slot[i] < 0) {
1319                         echan->slot[i] =
1320                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1321                         if (echan->slot[i] < 0) {
1322                                 kfree(edesc);
1323                                 dev_err(dev, "%s: Failed to allocate slot\n",
1324                                         __func__);
1325                                 return NULL;
1326                         }
1327                 }
1328
1329                 if (i == nslots - 1) {
1330                         memcpy(&edesc->pset[i], &edesc->pset[0],
1331                                sizeof(edesc->pset[0]));
1332                         break;
1333                 }
1334
1335                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1336                                        dst_addr, burst, dev_width, period_len,
1337                                        direction);
1338                 if (ret < 0) {
1339                         kfree(edesc);
1340                         return NULL;
1341                 }
1342
1343                 if (direction == DMA_DEV_TO_MEM)
1344                         dst_addr += period_len;
1345                 else
1346                         src_addr += period_len;
1347
1348                 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1349                 dev_vdbg(dev,
1350                         "\n pset[%d]:\n"
1351                         "  chnum\t%d\n"
1352                         "  slot\t%d\n"
1353                         "  opt\t%08x\n"
1354                         "  src\t%08x\n"
1355                         "  dst\t%08x\n"
1356                         "  abcnt\t%08x\n"
1357                         "  ccnt\t%08x\n"
1358                         "  bidx\t%08x\n"
1359                         "  cidx\t%08x\n"
1360                         "  lkrld\t%08x\n",
1361                         i, echan->ch_num, echan->slot[i],
1362                         edesc->pset[i].param.opt,
1363                         edesc->pset[i].param.src,
1364                         edesc->pset[i].param.dst,
1365                         edesc->pset[i].param.a_b_cnt,
1366                         edesc->pset[i].param.ccnt,
1367                         edesc->pset[i].param.src_dst_bidx,
1368                         edesc->pset[i].param.src_dst_cidx,
1369                         edesc->pset[i].param.link_bcntrld);
1370
1371                 edesc->absync = ret;
1372
1373                 /*
1374                  * Enable period interrupt only if it is requested
1375                  */
1376                 if (tx_flags & DMA_PREP_INTERRUPT) {
1377                         edesc->pset[i].param.opt |= TCINTEN;
1378
1379                         /* Also enable intermediate interrupts if necessary */
1380                         if (use_intermediate)
1381                                 edesc->pset[i].param.opt |= ITCINTEN;
1382                 }
1383         }
1384
1385         /* Place the cyclic channel to highest priority queue */
1386         if (!echan->tc)
1387                 edma_assign_channel_eventq(echan, EVENTQ_0);
1388
1389         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1390 }
1391
1392 static void edma_completion_handler(struct edma_chan *echan)
1393 {
1394         struct device *dev = echan->vchan.chan.device->dev;
1395         struct edma_desc *edesc;
1396
1397         spin_lock(&echan->vchan.lock);
1398         edesc = echan->edesc;
1399         if (edesc) {
1400                 if (edesc->cyclic) {
1401                         vchan_cyclic_callback(&edesc->vdesc);
1402                         spin_unlock(&echan->vchan.lock);
1403                         return;
1404                 } else if (edesc->processed == edesc->pset_nr) {
1405                         edesc->residue = 0;
1406                         edma_stop(echan);
1407                         vchan_cookie_complete(&edesc->vdesc);
1408                         echan->edesc = NULL;
1409
1410                         dev_dbg(dev, "Transfer completed on channel %d\n",
1411                                 echan->ch_num);
1412                 } else {
1413                         dev_dbg(dev, "Sub transfer completed on channel %d\n",
1414                                 echan->ch_num);
1415
1416                         edma_pause(echan);
1417
1418                         /* Update statistics for tx_status */
1419                         edesc->residue -= edesc->sg_len;
1420                         edesc->residue_stat = edesc->residue;
1421                         edesc->processed_stat = edesc->processed;
1422                 }
1423                 edma_execute(echan);
1424         }
1425
1426         spin_unlock(&echan->vchan.lock);
1427 }
1428
1429 /* eDMA interrupt handler */
1430 static irqreturn_t dma_irq_handler(int irq, void *data)
1431 {
1432         struct edma_cc *ecc = data;
1433         int ctlr;
1434         u32 sh_ier;
1435         u32 sh_ipr;
1436         u32 bank;
1437
1438         ctlr = ecc->id;
1439         if (ctlr < 0)
1440                 return IRQ_NONE;
1441
1442         dev_vdbg(ecc->dev, "dma_irq_handler\n");
1443
1444         sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1445         if (!sh_ipr) {
1446                 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1447                 if (!sh_ipr)
1448                         return IRQ_NONE;
1449                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1450                 bank = 1;
1451         } else {
1452                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1453                 bank = 0;
1454         }
1455
1456         do {
1457                 u32 slot;
1458                 u32 channel;
1459
1460                 slot = __ffs(sh_ipr);
1461                 sh_ipr &= ~(BIT(slot));
1462
1463                 if (sh_ier & BIT(slot)) {
1464                         channel = (bank << 5) | slot;
1465                         /* Clear the corresponding IPR bits */
1466                         edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1467                         edma_completion_handler(&ecc->slave_chans[channel]);
1468                 }
1469         } while (sh_ipr);
1470
1471         edma_shadow0_write(ecc, SH_IEVAL, 1);
1472         return IRQ_HANDLED;
1473 }
1474
1475 static void edma_error_handler(struct edma_chan *echan)
1476 {
1477         struct edma_cc *ecc = echan->ecc;
1478         struct device *dev = echan->vchan.chan.device->dev;
1479         struct edmacc_param p;
1480
1481         if (!echan->edesc)
1482                 return;
1483
1484         spin_lock(&echan->vchan.lock);
1485
1486         edma_read_slot(ecc, echan->slot[0], &p);
1487         /*
1488          * Issue later based on missed flag which will be sure
1489          * to happen as:
1490          * (1) we finished transmitting an intermediate slot and
1491          *     edma_execute is coming up.
1492          * (2) or we finished current transfer and issue will
1493          *     call edma_execute.
1494          *
1495          * Important note: issuing can be dangerous here and
1496          * lead to some nasty recursion when we are in a NULL
1497          * slot. So we avoid doing so and set the missed flag.
1498          */
1499         if (p.a_b_cnt == 0 && p.ccnt == 0) {
1500                 dev_dbg(dev, "Error on null slot, setting miss\n");
1501                 echan->missed = 1;
1502         } else {
1503                 /*
1504                  * The slot is already programmed but the event got
1505                  * missed, so its safe to issue it here.
1506                  */
1507                 dev_dbg(dev, "Missed event, TRIGGERING\n");
1508                 edma_clean_channel(echan);
1509                 edma_stop(echan);
1510                 edma_start(echan);
1511                 edma_trigger_channel(echan);
1512         }
1513         spin_unlock(&echan->vchan.lock);
1514 }
1515
1516 static inline bool edma_error_pending(struct edma_cc *ecc)
1517 {
1518         if (edma_read_array(ecc, EDMA_EMR, 0) ||
1519             edma_read_array(ecc, EDMA_EMR, 1) ||
1520             edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1521                 return true;
1522
1523         return false;
1524 }
1525
1526 /* eDMA error interrupt handler */
1527 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1528 {
1529         struct edma_cc *ecc = data;
1530         int i, j;
1531         int ctlr;
1532         unsigned int cnt = 0;
1533         unsigned int val;
1534
1535         ctlr = ecc->id;
1536         if (ctlr < 0)
1537                 return IRQ_NONE;
1538
1539         dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1540
1541         if (!edma_error_pending(ecc)) {
1542                 /*
1543                  * The registers indicate no pending error event but the irq
1544                  * handler has been called.
1545                  * Ask eDMA to re-evaluate the error registers.
1546                  */
1547                 dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
1548                         __func__);
1549                 edma_write(ecc, EDMA_EEVAL, 1);
1550                 return IRQ_NONE;
1551         }
1552
1553         while (1) {
1554                 /* Event missed register(s) */
1555                 for (j = 0; j < 2; j++) {
1556                         unsigned long emr;
1557
1558                         val = edma_read_array(ecc, EDMA_EMR, j);
1559                         if (!val)
1560                                 continue;
1561
1562                         dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1563                         emr = val;
1564                         for (i = find_next_bit(&emr, 32, 0); i < 32;
1565                              i = find_next_bit(&emr, 32, i + 1)) {
1566                                 int k = (j << 5) + i;
1567
1568                                 /* Clear the corresponding EMR bits */
1569                                 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1570                                 /* Clear any SER */
1571                                 edma_shadow0_write_array(ecc, SH_SECR, j,
1572                                                          BIT(i));
1573                                 edma_error_handler(&ecc->slave_chans[k]);
1574                         }
1575                 }
1576
1577                 val = edma_read(ecc, EDMA_QEMR);
1578                 if (val) {
1579                         dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1580                         /* Not reported, just clear the interrupt reason. */
1581                         edma_write(ecc, EDMA_QEMCR, val);
1582                         edma_shadow0_write(ecc, SH_QSECR, val);
1583                 }
1584
1585                 val = edma_read(ecc, EDMA_CCERR);
1586                 if (val) {
1587                         dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1588                         /* Not reported, just clear the interrupt reason. */
1589                         edma_write(ecc, EDMA_CCERRCLR, val);
1590                 }
1591
1592                 if (!edma_error_pending(ecc))
1593                         break;
1594                 cnt++;
1595                 if (cnt > 10)
1596                         break;
1597         }
1598         edma_write(ecc, EDMA_EEVAL, 1);
1599         return IRQ_HANDLED;
1600 }
1601
1602 /* Alloc channel resources */
1603 static int edma_alloc_chan_resources(struct dma_chan *chan)
1604 {
1605         struct edma_chan *echan = to_edma_chan(chan);
1606         struct edma_cc *ecc = echan->ecc;
1607         struct device *dev = ecc->dev;
1608         enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1609         int ret;
1610
1611         if (echan->tc) {
1612                 eventq_no = echan->tc->id;
1613         } else if (ecc->tc_list) {
1614                 /* memcpy channel */
1615                 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1616                 eventq_no = echan->tc->id;
1617         }
1618
1619         ret = edma_alloc_channel(echan, eventq_no);
1620         if (ret)
1621                 return ret;
1622
1623         echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1624         if (echan->slot[0] < 0) {
1625                 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1626                         EDMA_CHAN_SLOT(echan->ch_num));
1627                 goto err_slot;
1628         }
1629
1630         /* Set up channel -> slot mapping for the entry slot */
1631         edma_set_chmap(echan, echan->slot[0]);
1632         echan->alloced = true;
1633
1634         dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1635                 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1636                 echan->hw_triggered ? "HW" : "SW");
1637
1638         return 0;
1639
1640 err_slot:
1641         edma_free_channel(echan);
1642         return ret;
1643 }
1644
1645 /* Free channel resources */
1646 static void edma_free_chan_resources(struct dma_chan *chan)
1647 {
1648         struct edma_chan *echan = to_edma_chan(chan);
1649         struct device *dev = echan->ecc->dev;
1650         int i;
1651
1652         /* Terminate transfers */
1653         edma_stop(echan);
1654
1655         vchan_free_chan_resources(&echan->vchan);
1656
1657         /* Free EDMA PaRAM slots */
1658         for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1659                 if (echan->slot[i] >= 0) {
1660                         edma_free_slot(echan->ecc, echan->slot[i]);
1661                         echan->slot[i] = -1;
1662                 }
1663         }
1664
1665         /* Set entry slot to the dummy slot */
1666         edma_set_chmap(echan, echan->ecc->dummy_slot);
1667
1668         /* Free EDMA channel */
1669         if (echan->alloced) {
1670                 edma_free_channel(echan);
1671                 echan->alloced = false;
1672         }
1673
1674         echan->tc = NULL;
1675         echan->hw_triggered = false;
1676
1677         dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1678                 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1679 }
1680
1681 /* Send pending descriptor to hardware */
1682 static void edma_issue_pending(struct dma_chan *chan)
1683 {
1684         struct edma_chan *echan = to_edma_chan(chan);
1685         unsigned long flags;
1686
1687         spin_lock_irqsave(&echan->vchan.lock, flags);
1688         if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1689                 edma_execute(echan);
1690         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1691 }
1692
1693 /*
1694  * This limit exists to avoid a possible infinite loop when waiting for proof
1695  * that a particular transfer is completed. This limit can be hit if there
1696  * are large bursts to/from slow devices or the CPU is never able to catch
1697  * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1698  * RX-FIFO, as many as 55 loops have been seen.
1699  */
1700 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1701
1702 static u32 edma_residue(struct edma_desc *edesc)
1703 {
1704         bool dst = edesc->direction == DMA_DEV_TO_MEM;
1705         int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1706         struct edma_chan *echan = edesc->echan;
1707         struct edma_pset *pset = edesc->pset;
1708         dma_addr_t done, pos;
1709         int i;
1710
1711         /*
1712          * We always read the dst/src position from the first RamPar
1713          * pset. That's the one which is active now.
1714          */
1715         pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1716
1717         /*
1718          * "pos" may represent a transfer request that is still being
1719          * processed by the EDMACC or EDMATC. We will busy wait until
1720          * any one of the situations occurs:
1721          *   1. the DMA hardware is idle
1722          *   2. a new transfer request is setup
1723          *   3. we hit the loop limit
1724          */
1725         while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1726                 /* check if a new transfer request is setup */
1727                 if (edma_get_position(echan->ecc,
1728                                       echan->slot[0], dst) != pos) {
1729                         break;
1730                 }
1731
1732                 if (!--loop_count) {
1733                         dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1734                                 "%s: timeout waiting for PaRAM update\n",
1735                                 __func__);
1736                         break;
1737                 }
1738
1739                 cpu_relax();
1740         }
1741
1742         /*
1743          * Cyclic is simple. Just subtract pset[0].addr from pos.
1744          *
1745          * We never update edesc->residue in the cyclic case, so we
1746          * can tell the remaining room to the end of the circular
1747          * buffer.
1748          */
1749         if (edesc->cyclic) {
1750                 done = pos - pset->addr;
1751                 edesc->residue_stat = edesc->residue - done;
1752                 return edesc->residue_stat;
1753         }
1754
1755         /*
1756          * For SG operation we catch up with the last processed
1757          * status.
1758          */
1759         pset += edesc->processed_stat;
1760
1761         for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1762                 /*
1763                  * If we are inside this pset address range, we know
1764                  * this is the active one. Get the current delta and
1765                  * stop walking the psets.
1766                  */
1767                 if (pos >= pset->addr && pos < pset->addr + pset->len)
1768                         return edesc->residue_stat - (pos - pset->addr);
1769
1770                 /* Otherwise mark it done and update residue_stat. */
1771                 edesc->processed_stat++;
1772                 edesc->residue_stat -= pset->len;
1773         }
1774         return edesc->residue_stat;
1775 }
1776
1777 /* Check request completion status */
1778 static enum dma_status edma_tx_status(struct dma_chan *chan,
1779                                       dma_cookie_t cookie,
1780                                       struct dma_tx_state *txstate)
1781 {
1782         struct edma_chan *echan = to_edma_chan(chan);
1783         struct virt_dma_desc *vdesc;
1784         enum dma_status ret;
1785         unsigned long flags;
1786
1787         ret = dma_cookie_status(chan, cookie, txstate);
1788         if (ret == DMA_COMPLETE || !txstate)
1789                 return ret;
1790
1791         spin_lock_irqsave(&echan->vchan.lock, flags);
1792         if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1793                 txstate->residue = edma_residue(echan->edesc);
1794         else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1795                 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1796         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1797
1798         return ret;
1799 }
1800
1801 static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1802 {
1803         if (!memcpy_channels)
1804                 return false;
1805         while (*memcpy_channels != -1) {
1806                 if (*memcpy_channels == ch_num)
1807                         return true;
1808                 memcpy_channels++;
1809         }
1810         return false;
1811 }
1812
1813 #define EDMA_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1814                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1815                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1816                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1817
1818 static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1819 {
1820         struct dma_device *s_ddev = &ecc->dma_slave;
1821         struct dma_device *m_ddev = NULL;
1822         s32 *memcpy_channels = ecc->info->memcpy_channels;
1823         int i, j;
1824
1825         dma_cap_zero(s_ddev->cap_mask);
1826         dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1827         dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1828         if (ecc->legacy_mode && !memcpy_channels) {
1829                 dev_warn(ecc->dev,
1830                          "Legacy memcpy is enabled, things might not work\n");
1831
1832                 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1833                 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1834                 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1835         }
1836
1837         s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1838         s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1839         s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1840         s_ddev->device_free_chan_resources = edma_free_chan_resources;
1841         s_ddev->device_issue_pending = edma_issue_pending;
1842         s_ddev->device_tx_status = edma_tx_status;
1843         s_ddev->device_config = edma_slave_config;
1844         s_ddev->device_pause = edma_dma_pause;
1845         s_ddev->device_resume = edma_dma_resume;
1846         s_ddev->device_terminate_all = edma_terminate_all;
1847         s_ddev->device_synchronize = edma_synchronize;
1848
1849         s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1850         s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1851         s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1852         s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1853
1854         s_ddev->dev = ecc->dev;
1855         INIT_LIST_HEAD(&s_ddev->channels);
1856
1857         if (memcpy_channels) {
1858                 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1859                 ecc->dma_memcpy = m_ddev;
1860
1861                 dma_cap_zero(m_ddev->cap_mask);
1862                 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1863
1864                 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1865                 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1866                 m_ddev->device_free_chan_resources = edma_free_chan_resources;
1867                 m_ddev->device_issue_pending = edma_issue_pending;
1868                 m_ddev->device_tx_status = edma_tx_status;
1869                 m_ddev->device_config = edma_slave_config;
1870                 m_ddev->device_pause = edma_dma_pause;
1871                 m_ddev->device_resume = edma_dma_resume;
1872                 m_ddev->device_terminate_all = edma_terminate_all;
1873                 m_ddev->device_synchronize = edma_synchronize;
1874
1875                 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1876                 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1877                 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1878                 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1879
1880                 m_ddev->dev = ecc->dev;
1881                 INIT_LIST_HEAD(&m_ddev->channels);
1882         } else if (!ecc->legacy_mode) {
1883                 dev_info(ecc->dev, "memcpy is disabled\n");
1884         }
1885
1886         for (i = 0; i < ecc->num_channels; i++) {
1887                 struct edma_chan *echan = &ecc->slave_chans[i];
1888                 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1889                 echan->ecc = ecc;
1890                 echan->vchan.desc_free = edma_desc_free;
1891
1892                 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
1893                         vchan_init(&echan->vchan, m_ddev);
1894                 else
1895                         vchan_init(&echan->vchan, s_ddev);
1896
1897                 INIT_LIST_HEAD(&echan->node);
1898                 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1899                         echan->slot[j] = -1;
1900         }
1901 }
1902
1903 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1904                               struct edma_cc *ecc)
1905 {
1906         int i;
1907         u32 value, cccfg;
1908         s8 (*queue_priority_map)[2];
1909
1910         /* Decode the eDMA3 configuration from CCCFG register */
1911         cccfg = edma_read(ecc, EDMA_CCCFG);
1912
1913         value = GET_NUM_REGN(cccfg);
1914         ecc->num_region = BIT(value);
1915
1916         value = GET_NUM_DMACH(cccfg);
1917         ecc->num_channels = BIT(value + 1);
1918
1919         value = GET_NUM_QDMACH(cccfg);
1920         ecc->num_qchannels = value * 2;
1921
1922         value = GET_NUM_PAENTRY(cccfg);
1923         ecc->num_slots = BIT(value + 4);
1924
1925         value = GET_NUM_EVQUE(cccfg);
1926         ecc->num_tc = value + 1;
1927
1928         ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
1929
1930         dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
1931         dev_dbg(dev, "num_region: %u\n", ecc->num_region);
1932         dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
1933         dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
1934         dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
1935         dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
1936         dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
1937
1938         /* Nothing need to be done if queue priority is provided */
1939         if (pdata->queue_priority_mapping)
1940                 return 0;
1941
1942         /*
1943          * Configure TC/queue priority as follows:
1944          * Q0 - priority 0
1945          * Q1 - priority 1
1946          * Q2 - priority 2
1947          * ...
1948          * The meaning of priority numbers: 0 highest priority, 7 lowest
1949          * priority. So Q0 is the highest priority queue and the last queue has
1950          * the lowest priority.
1951          */
1952         queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
1953                                           GFP_KERNEL);
1954         if (!queue_priority_map)
1955                 return -ENOMEM;
1956
1957         for (i = 0; i < ecc->num_tc; i++) {
1958                 queue_priority_map[i][0] = i;
1959                 queue_priority_map[i][1] = i;
1960         }
1961         queue_priority_map[i][0] = -1;
1962         queue_priority_map[i][1] = -1;
1963
1964         pdata->queue_priority_mapping = queue_priority_map;
1965         /* Default queue has the lowest priority */
1966         pdata->default_queue = i - 1;
1967
1968         return 0;
1969 }
1970
1971 #if IS_ENABLED(CONFIG_OF)
1972 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1973                                size_t sz)
1974 {
1975         const char pname[] = "ti,edma-xbar-event-map";
1976         struct resource res;
1977         void __iomem *xbar;
1978         s16 (*xbar_chans)[2];
1979         size_t nelm = sz / sizeof(s16);
1980         u32 shift, offset, mux;
1981         int ret, i;
1982
1983         xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
1984         if (!xbar_chans)
1985                 return -ENOMEM;
1986
1987         ret = of_address_to_resource(dev->of_node, 1, &res);
1988         if (ret)
1989                 return -ENOMEM;
1990
1991         xbar = devm_ioremap(dev, res.start, resource_size(&res));
1992         if (!xbar)
1993                 return -ENOMEM;
1994
1995         ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
1996                                          nelm);
1997         if (ret)
1998                 return -EIO;
1999
2000         /* Invalidate last entry for the other user of this mess */
2001         nelm >>= 1;
2002         xbar_chans[nelm][0] = -1;
2003         xbar_chans[nelm][1] = -1;
2004
2005         for (i = 0; i < nelm; i++) {
2006                 shift = (xbar_chans[i][1] & 0x03) << 3;
2007                 offset = xbar_chans[i][1] & 0xfffffffc;
2008                 mux = readl(xbar + offset);
2009                 mux &= ~(0xff << shift);
2010                 mux |= xbar_chans[i][0] << shift;
2011                 writel(mux, (xbar + offset));
2012         }
2013
2014         pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2015         return 0;
2016 }
2017
2018 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2019                                                      bool legacy_mode)
2020 {
2021         struct edma_soc_info *info;
2022         struct property *prop;
2023         int sz, ret;
2024
2025         info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2026         if (!info)
2027                 return ERR_PTR(-ENOMEM);
2028
2029         if (legacy_mode) {
2030                 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2031                                         &sz);
2032                 if (prop) {
2033                         ret = edma_xbar_event_map(dev, info, sz);
2034                         if (ret)
2035                                 return ERR_PTR(ret);
2036                 }
2037                 return info;
2038         }
2039
2040         /* Get the list of channels allocated to be used for memcpy */
2041         prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2042         if (prop) {
2043                 const char pname[] = "ti,edma-memcpy-channels";
2044                 size_t nelm = sz / sizeof(s32);
2045                 s32 *memcpy_ch;
2046
2047                 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2048                                          GFP_KERNEL);
2049                 if (!memcpy_ch)
2050                         return ERR_PTR(-ENOMEM);
2051
2052                 ret = of_property_read_u32_array(dev->of_node, pname,
2053                                                  (u32 *)memcpy_ch, nelm);
2054                 if (ret)
2055                         return ERR_PTR(ret);
2056
2057                 memcpy_ch[nelm] = -1;
2058                 info->memcpy_channels = memcpy_ch;
2059         }
2060
2061         prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2062                                 &sz);
2063         if (prop) {
2064                 const char pname[] = "ti,edma-reserved-slot-ranges";
2065                 u32 (*tmp)[2];
2066                 s16 (*rsv_slots)[2];
2067                 size_t nelm = sz / sizeof(*tmp);
2068                 struct edma_rsv_info *rsv_info;
2069                 int i;
2070
2071                 if (!nelm)
2072                         return info;
2073
2074                 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2075                 if (!tmp)
2076                         return ERR_PTR(-ENOMEM);
2077
2078                 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2079                 if (!rsv_info) {
2080                         kfree(tmp);
2081                         return ERR_PTR(-ENOMEM);
2082                 }
2083
2084                 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2085                                          GFP_KERNEL);
2086                 if (!rsv_slots) {
2087                         kfree(tmp);
2088                         return ERR_PTR(-ENOMEM);
2089                 }
2090
2091                 ret = of_property_read_u32_array(dev->of_node, pname,
2092                                                  (u32 *)tmp, nelm * 2);
2093                 if (ret) {
2094                         kfree(tmp);
2095                         return ERR_PTR(ret);
2096                 }
2097
2098                 for (i = 0; i < nelm; i++) {
2099                         rsv_slots[i][0] = tmp[i][0];
2100                         rsv_slots[i][1] = tmp[i][1];
2101                 }
2102                 rsv_slots[nelm][0] = -1;
2103                 rsv_slots[nelm][1] = -1;
2104
2105                 info->rsv = rsv_info;
2106                 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2107
2108                 kfree(tmp);
2109         }
2110
2111         return info;
2112 }
2113
2114 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2115                                       struct of_dma *ofdma)
2116 {
2117         struct edma_cc *ecc = ofdma->of_dma_data;
2118         struct dma_chan *chan = NULL;
2119         struct edma_chan *echan;
2120         int i;
2121
2122         if (!ecc || dma_spec->args_count < 1)
2123                 return NULL;
2124
2125         for (i = 0; i < ecc->num_channels; i++) {
2126                 echan = &ecc->slave_chans[i];
2127                 if (echan->ch_num == dma_spec->args[0]) {
2128                         chan = &echan->vchan.chan;
2129                         break;
2130                 }
2131         }
2132
2133         if (!chan)
2134                 return NULL;
2135
2136         if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2137                 goto out;
2138
2139         if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2140             dma_spec->args[1] < echan->ecc->num_tc) {
2141                 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2142                 goto out;
2143         }
2144
2145         return NULL;
2146 out:
2147         /* The channel is going to be used as HW synchronized */
2148         echan->hw_triggered = true;
2149         return dma_get_slave_channel(chan);
2150 }
2151 #else
2152 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2153                                                      bool legacy_mode)
2154 {
2155         return ERR_PTR(-EINVAL);
2156 }
2157
2158 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2159                                       struct of_dma *ofdma)
2160 {
2161         return NULL;
2162 }
2163 #endif
2164
2165 static int edma_probe(struct platform_device *pdev)
2166 {
2167         struct edma_soc_info    *info = pdev->dev.platform_data;
2168         s8                      (*queue_priority_mapping)[2];
2169         int                     i, off, ln;
2170         const s16               (*rsv_slots)[2];
2171         const s16               (*xbar_chans)[2];
2172         int                     irq;
2173         char                    *irq_name;
2174         struct resource         *mem;
2175         struct device_node      *node = pdev->dev.of_node;
2176         struct device           *dev = &pdev->dev;
2177         struct edma_cc          *ecc;
2178         bool                    legacy_mode = true;
2179         int ret;
2180
2181         if (node) {
2182                 const struct of_device_id *match;
2183
2184                 match = of_match_node(edma_of_ids, node);
2185                 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
2186                         legacy_mode = false;
2187
2188                 info = edma_setup_info_from_dt(dev, legacy_mode);
2189                 if (IS_ERR(info)) {
2190                         dev_err(dev, "failed to get DT data\n");
2191                         return PTR_ERR(info);
2192                 }
2193         }
2194
2195         if (!info)
2196                 return -ENODEV;
2197
2198         pm_runtime_enable(dev);
2199         ret = pm_runtime_get_sync(dev);
2200         if (ret < 0) {
2201                 dev_err(dev, "pm_runtime_get_sync() failed\n");
2202                 return ret;
2203         }
2204
2205         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2206         if (ret)
2207                 return ret;
2208
2209         ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2210         if (!ecc)
2211                 return -ENOMEM;
2212
2213         ecc->dev = dev;
2214         ecc->id = pdev->id;
2215         ecc->legacy_mode = legacy_mode;
2216         /* When booting with DT the pdev->id is -1 */
2217         if (ecc->id < 0)
2218                 ecc->id = 0;
2219
2220         mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2221         if (!mem) {
2222                 dev_dbg(dev, "mem resource not found, using index 0\n");
2223                 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2224                 if (!mem) {
2225                         dev_err(dev, "no mem resource?\n");
2226                         return -ENODEV;
2227                 }
2228         }
2229         ecc->base = devm_ioremap_resource(dev, mem);
2230         if (IS_ERR(ecc->base))
2231                 return PTR_ERR(ecc->base);
2232
2233         platform_set_drvdata(pdev, ecc);
2234
2235         /* Get eDMA3 configuration from IP */
2236         ret = edma_setup_from_hw(dev, info, ecc);
2237         if (ret)
2238                 return ret;
2239
2240         /* Allocate memory based on the information we got from the IP */
2241         ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2242                                         sizeof(*ecc->slave_chans), GFP_KERNEL);
2243         if (!ecc->slave_chans)
2244                 return -ENOMEM;
2245
2246         ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2247                                        sizeof(unsigned long), GFP_KERNEL);
2248         if (!ecc->slot_inuse)
2249                 return -ENOMEM;
2250
2251         ecc->default_queue = info->default_queue;
2252
2253         for (i = 0; i < ecc->num_slots; i++)
2254                 edma_write_slot(ecc, i, &dummy_paramset);
2255
2256         if (info->rsv) {
2257                 /* Set the reserved slots in inuse list */
2258                 rsv_slots = info->rsv->rsv_slots;
2259                 if (rsv_slots) {
2260                         for (i = 0; rsv_slots[i][0] != -1; i++) {
2261                                 off = rsv_slots[i][0];
2262                                 ln = rsv_slots[i][1];
2263                                 edma_set_bits(off, ln, ecc->slot_inuse);
2264                         }
2265                 }
2266         }
2267
2268         /* Clear the xbar mapped channels in unused list */
2269         xbar_chans = info->xbar_chans;
2270         if (xbar_chans) {
2271                 for (i = 0; xbar_chans[i][1] != -1; i++) {
2272                         off = xbar_chans[i][1];
2273                 }
2274         }
2275
2276         irq = platform_get_irq_byname(pdev, "edma3_ccint");
2277         if (irq < 0 && node)
2278                 irq = irq_of_parse_and_map(node, 0);
2279
2280         if (irq >= 0) {
2281                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2282                                           dev_name(dev));
2283                 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2284                                        ecc);
2285                 if (ret) {
2286                         dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2287                         return ret;
2288                 }
2289                 ecc->ccint = irq;
2290         }
2291
2292         irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2293         if (irq < 0 && node)
2294                 irq = irq_of_parse_and_map(node, 2);
2295
2296         if (irq >= 0) {
2297                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2298                                           dev_name(dev));
2299                 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2300                                        ecc);
2301                 if (ret) {
2302                         dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2303                         return ret;
2304                 }
2305                 ecc->ccerrint = irq;
2306         }
2307
2308         ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2309         if (ecc->dummy_slot < 0) {
2310                 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2311                 return ecc->dummy_slot;
2312         }
2313
2314         queue_priority_mapping = info->queue_priority_mapping;
2315
2316         if (!ecc->legacy_mode) {
2317                 int lowest_priority = 0;
2318                 struct of_phandle_args tc_args;
2319
2320                 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2321                                             sizeof(*ecc->tc_list), GFP_KERNEL);
2322                 if (!ecc->tc_list)
2323                         return -ENOMEM;
2324
2325                 for (i = 0;; i++) {
2326                         ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2327                                                                1, i, &tc_args);
2328                         if (ret || i == ecc->num_tc)
2329                                 break;
2330
2331                         ecc->tc_list[i].node = tc_args.np;
2332                         ecc->tc_list[i].id = i;
2333                         queue_priority_mapping[i][1] = tc_args.args[0];
2334                         if (queue_priority_mapping[i][1] > lowest_priority) {
2335                                 lowest_priority = queue_priority_mapping[i][1];
2336                                 info->default_queue = i;
2337                         }
2338                 }
2339         }
2340
2341         /* Event queue priority mapping */
2342         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2343                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2344                                               queue_priority_mapping[i][1]);
2345
2346         for (i = 0; i < ecc->num_region; i++) {
2347                 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2348                 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2349                 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2350         }
2351         ecc->info = info;
2352
2353         /* Init the dma device and channels */
2354         edma_dma_init(ecc, legacy_mode);
2355
2356         for (i = 0; i < ecc->num_channels; i++) {
2357                 /* Assign all channels to the default queue */
2358                 edma_assign_channel_eventq(&ecc->slave_chans[i],
2359                                            info->default_queue);
2360                 /* Set entry slot to the dummy slot */
2361                 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2362         }
2363
2364         ecc->dma_slave.filter.map = info->slave_map;
2365         ecc->dma_slave.filter.mapcnt = info->slavecnt;
2366         ecc->dma_slave.filter.fn = edma_filter_fn;
2367
2368         ret = dma_async_device_register(&ecc->dma_slave);
2369         if (ret) {
2370                 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2371                 goto err_reg1;
2372         }
2373
2374         if (ecc->dma_memcpy) {
2375                 ret = dma_async_device_register(ecc->dma_memcpy);
2376                 if (ret) {
2377                         dev_err(dev, "memcpy ddev registration failed (%d)\n",
2378                                 ret);
2379                         dma_async_device_unregister(&ecc->dma_slave);
2380                         goto err_reg1;
2381                 }
2382         }
2383
2384         if (node)
2385                 of_dma_controller_register(node, of_edma_xlate, ecc);
2386
2387         dev_info(dev, "TI EDMA DMA engine driver\n");
2388
2389         return 0;
2390
2391 err_reg1:
2392         edma_free_slot(ecc, ecc->dummy_slot);
2393         return ret;
2394 }
2395
2396 static void edma_cleanupp_vchan(struct dma_device *dmadev)
2397 {
2398         struct edma_chan *echan, *_echan;
2399
2400         list_for_each_entry_safe(echan, _echan,
2401                         &dmadev->channels, vchan.chan.device_node) {
2402                 list_del(&echan->vchan.chan.device_node);
2403                 tasklet_kill(&echan->vchan.task);
2404         }
2405 }
2406
2407 static int edma_remove(struct platform_device *pdev)
2408 {
2409         struct device *dev = &pdev->dev;
2410         struct edma_cc *ecc = dev_get_drvdata(dev);
2411
2412         devm_free_irq(dev, ecc->ccint, ecc);
2413         devm_free_irq(dev, ecc->ccerrint, ecc);
2414
2415         edma_cleanupp_vchan(&ecc->dma_slave);
2416
2417         if (dev->of_node)
2418                 of_dma_controller_free(dev->of_node);
2419         dma_async_device_unregister(&ecc->dma_slave);
2420         if (ecc->dma_memcpy)
2421                 dma_async_device_unregister(ecc->dma_memcpy);
2422         edma_free_slot(ecc, ecc->dummy_slot);
2423
2424         return 0;
2425 }
2426
2427 #ifdef CONFIG_PM_SLEEP
2428 static int edma_pm_suspend(struct device *dev)
2429 {
2430         struct edma_cc *ecc = dev_get_drvdata(dev);
2431         struct edma_chan *echan = ecc->slave_chans;
2432         int i;
2433
2434         for (i = 0; i < ecc->num_channels; i++) {
2435                 if (echan[i].alloced)
2436                         edma_setup_interrupt(&echan[i], false);
2437         }
2438
2439         return 0;
2440 }
2441
2442 static int edma_pm_resume(struct device *dev)
2443 {
2444         struct edma_cc *ecc = dev_get_drvdata(dev);
2445         struct edma_chan *echan = ecc->slave_chans;
2446         int i;
2447         s8 (*queue_priority_mapping)[2];
2448
2449         queue_priority_mapping = ecc->info->queue_priority_mapping;
2450
2451         /* Event queue priority mapping */
2452         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2453                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2454                                               queue_priority_mapping[i][1]);
2455
2456         for (i = 0; i < ecc->num_channels; i++) {
2457                 if (echan[i].alloced) {
2458                         /* ensure access through shadow region 0 */
2459                         edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2460                                        BIT(i & 0x1f));
2461
2462                         edma_setup_interrupt(&echan[i], true);
2463
2464                         /* Set up channel -> slot mapping for the entry slot */
2465                         edma_set_chmap(&echan[i], echan[i].slot[0]);
2466                 }
2467         }
2468
2469         return 0;
2470 }
2471 #endif
2472
2473 static const struct dev_pm_ops edma_pm_ops = {
2474         SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2475 };
2476
2477 static struct platform_driver edma_driver = {
2478         .probe          = edma_probe,
2479         .remove         = edma_remove,
2480         .driver = {
2481                 .name   = "edma",
2482                 .pm     = &edma_pm_ops,
2483                 .of_match_table = edma_of_ids,
2484         },
2485 };
2486
2487 static int edma_tptc_probe(struct platform_device *pdev)
2488 {
2489         pm_runtime_enable(&pdev->dev);
2490         return pm_runtime_get_sync(&pdev->dev);
2491 }
2492
2493 static struct platform_driver edma_tptc_driver = {
2494         .probe          = edma_tptc_probe,
2495         .driver = {
2496                 .name   = "edma3-tptc",
2497                 .of_match_table = edma_tptc_of_ids,
2498         },
2499 };
2500
2501 bool edma_filter_fn(struct dma_chan *chan, void *param)
2502 {
2503         bool match = false;
2504
2505         if (chan->device->dev->driver == &edma_driver.driver) {
2506                 struct edma_chan *echan = to_edma_chan(chan);
2507                 unsigned ch_req = *(unsigned *)param;
2508                 if (ch_req == echan->ch_num) {
2509                         /* The channel is going to be used as HW synchronized */
2510                         echan->hw_triggered = true;
2511                         match = true;
2512                 }
2513         }
2514         return match;
2515 }
2516 EXPORT_SYMBOL(edma_filter_fn);
2517
2518 static int edma_init(void)
2519 {
2520         int ret;
2521
2522         ret = platform_driver_register(&edma_tptc_driver);
2523         if (ret)
2524                 return ret;
2525
2526         return platform_driver_register(&edma_driver);
2527 }
2528 subsys_initcall(edma_init);
2529
2530 static void __exit edma_exit(void)
2531 {
2532         platform_driver_unregister(&edma_driver);
2533         platform_driver_unregister(&edma_tptc_driver);
2534 }
2535 module_exit(edma_exit);
2536
2537 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2538 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2539 MODULE_LICENSE("GPL v2");