Merge tag 'for-5.1-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[sfrench/cifs-2.6.git] / drivers / dma / k3dma.c
1 /*
2  * Copyright (c) 2013 - 2015 Linaro Ltd.
3  * Copyright (c) 2013 Hisilicon Limited.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 #include <linux/sched.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmapool.h>
13 #include <linux/dmaengine.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/of_device.h>
22 #include <linux/of.h>
23 #include <linux/clk.h>
24 #include <linux/of_dma.h>
25
26 #include "virt-dma.h"
27
28 #define DRIVER_NAME             "k3-dma"
29 #define DMA_MAX_SIZE            0x1ffc
30 #define DMA_CYCLIC_MAX_PERIOD   0x1000
31 #define LLI_BLOCK_SIZE          (4 * PAGE_SIZE)
32
33 #define INT_STAT                0x00
34 #define INT_TC1                 0x04
35 #define INT_TC2                 0x08
36 #define INT_ERR1                0x0c
37 #define INT_ERR2                0x10
38 #define INT_TC1_MASK            0x18
39 #define INT_TC2_MASK            0x1c
40 #define INT_ERR1_MASK           0x20
41 #define INT_ERR2_MASK           0x24
42 #define INT_TC1_RAW             0x600
43 #define INT_TC2_RAW             0x608
44 #define INT_ERR1_RAW            0x610
45 #define INT_ERR2_RAW            0x618
46 #define CH_PRI                  0x688
47 #define CH_STAT                 0x690
48 #define CX_CUR_CNT              0x704
49 #define CX_LLI                  0x800
50 #define CX_CNT1                 0x80c
51 #define CX_CNT0                 0x810
52 #define CX_SRC                  0x814
53 #define CX_DST                  0x818
54 #define CX_CFG                  0x81c
55
56 #define CX_LLI_CHAIN_EN         0x2
57 #define CX_CFG_EN               0x1
58 #define CX_CFG_NODEIRQ          BIT(1)
59 #define CX_CFG_MEM2PER          (0x1 << 2)
60 #define CX_CFG_PER2MEM          (0x2 << 2)
61 #define CX_CFG_SRCINCR          (0x1 << 31)
62 #define CX_CFG_DSTINCR          (0x1 << 30)
63
64 struct k3_desc_hw {
65         u32 lli;
66         u32 reserved[3];
67         u32 count;
68         u32 saddr;
69         u32 daddr;
70         u32 config;
71 } __aligned(32);
72
73 struct k3_dma_desc_sw {
74         struct virt_dma_desc    vd;
75         dma_addr_t              desc_hw_lli;
76         size_t                  desc_num;
77         size_t                  size;
78         struct k3_desc_hw       *desc_hw;
79 };
80
81 struct k3_dma_phy;
82
83 struct k3_dma_chan {
84         u32                     ccfg;
85         struct virt_dma_chan    vc;
86         struct k3_dma_phy       *phy;
87         struct list_head        node;
88         dma_addr_t              dev_addr;
89         enum dma_status         status;
90         bool                    cyclic;
91         struct dma_slave_config slave_config;
92 };
93
94 struct k3_dma_phy {
95         u32                     idx;
96         void __iomem            *base;
97         struct k3_dma_chan      *vchan;
98         struct k3_dma_desc_sw   *ds_run;
99         struct k3_dma_desc_sw   *ds_done;
100 };
101
102 struct k3_dma_dev {
103         struct dma_device       slave;
104         void __iomem            *base;
105         struct tasklet_struct   task;
106         spinlock_t              lock;
107         struct list_head        chan_pending;
108         struct k3_dma_phy       *phy;
109         struct k3_dma_chan      *chans;
110         struct clk              *clk;
111         struct dma_pool         *pool;
112         u32                     dma_channels;
113         u32                     dma_requests;
114         u32                     dma_channel_mask;
115         unsigned int            irq;
116 };
117
118
119 #define K3_FLAG_NOCLK   BIT(1)
120
121 struct k3dma_soc_data {
122         unsigned long flags;
123 };
124
125
126 #define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
127
128 static int k3_dma_config_write(struct dma_chan *chan,
129                                enum dma_transfer_direction dir,
130                                struct dma_slave_config *cfg);
131
132 static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
133 {
134         return container_of(chan, struct k3_dma_chan, vc.chan);
135 }
136
137 static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
138 {
139         u32 val = 0;
140
141         if (on) {
142                 val = readl_relaxed(phy->base + CX_CFG);
143                 val |= CX_CFG_EN;
144                 writel_relaxed(val, phy->base + CX_CFG);
145         } else {
146                 val = readl_relaxed(phy->base + CX_CFG);
147                 val &= ~CX_CFG_EN;
148                 writel_relaxed(val, phy->base + CX_CFG);
149         }
150 }
151
152 static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
153 {
154         u32 val = 0;
155
156         k3_dma_pause_dma(phy, false);
157
158         val = 0x1 << phy->idx;
159         writel_relaxed(val, d->base + INT_TC1_RAW);
160         writel_relaxed(val, d->base + INT_TC2_RAW);
161         writel_relaxed(val, d->base + INT_ERR1_RAW);
162         writel_relaxed(val, d->base + INT_ERR2_RAW);
163 }
164
165 static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
166 {
167         writel_relaxed(hw->lli, phy->base + CX_LLI);
168         writel_relaxed(hw->count, phy->base + CX_CNT0);
169         writel_relaxed(hw->saddr, phy->base + CX_SRC);
170         writel_relaxed(hw->daddr, phy->base + CX_DST);
171         writel_relaxed(hw->config, phy->base + CX_CFG);
172 }
173
174 static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
175 {
176         u32 cnt = 0;
177
178         cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
179         cnt &= 0xffff;
180         return cnt;
181 }
182
183 static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
184 {
185         return readl_relaxed(phy->base + CX_LLI);
186 }
187
188 static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
189 {
190         return readl_relaxed(d->base + CH_STAT);
191 }
192
193 static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
194 {
195         if (on) {
196                 /* set same priority */
197                 writel_relaxed(0x0, d->base + CH_PRI);
198
199                 /* unmask irq */
200                 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
201                 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
202                 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
203                 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
204         } else {
205                 /* mask irq */
206                 writel_relaxed(0x0, d->base + INT_TC1_MASK);
207                 writel_relaxed(0x0, d->base + INT_TC2_MASK);
208                 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
209                 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
210         }
211 }
212
213 static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
214 {
215         struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
216         struct k3_dma_phy *p;
217         struct k3_dma_chan *c;
218         u32 stat = readl_relaxed(d->base + INT_STAT);
219         u32 tc1  = readl_relaxed(d->base + INT_TC1);
220         u32 tc2  = readl_relaxed(d->base + INT_TC2);
221         u32 err1 = readl_relaxed(d->base + INT_ERR1);
222         u32 err2 = readl_relaxed(d->base + INT_ERR2);
223         u32 i, irq_chan = 0;
224
225         while (stat) {
226                 i = __ffs(stat);
227                 stat &= ~BIT(i);
228                 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
229                         unsigned long flags;
230
231                         p = &d->phy[i];
232                         c = p->vchan;
233                         if (c && (tc1 & BIT(i))) {
234                                 spin_lock_irqsave(&c->vc.lock, flags);
235                                 vchan_cookie_complete(&p->ds_run->vd);
236                                 p->ds_done = p->ds_run;
237                                 p->ds_run = NULL;
238                                 spin_unlock_irqrestore(&c->vc.lock, flags);
239                         }
240                         if (c && (tc2 & BIT(i))) {
241                                 spin_lock_irqsave(&c->vc.lock, flags);
242                                 if (p->ds_run != NULL)
243                                         vchan_cyclic_callback(&p->ds_run->vd);
244                                 spin_unlock_irqrestore(&c->vc.lock, flags);
245                         }
246                         irq_chan |= BIT(i);
247                 }
248                 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
249                         dev_warn(d->slave.dev, "DMA ERR\n");
250         }
251
252         writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
253         writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
254         writel_relaxed(err1, d->base + INT_ERR1_RAW);
255         writel_relaxed(err2, d->base + INT_ERR2_RAW);
256
257         if (irq_chan)
258                 tasklet_schedule(&d->task);
259
260         if (irq_chan || err1 || err2)
261                 return IRQ_HANDLED;
262
263         return IRQ_NONE;
264 }
265
266 static int k3_dma_start_txd(struct k3_dma_chan *c)
267 {
268         struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
269         struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
270
271         if (!c->phy)
272                 return -EAGAIN;
273
274         if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
275                 return -EAGAIN;
276
277         if (vd) {
278                 struct k3_dma_desc_sw *ds =
279                         container_of(vd, struct k3_dma_desc_sw, vd);
280                 /*
281                  * fetch and remove request from vc->desc_issued
282                  * so vc->desc_issued only contains desc pending
283                  */
284                 list_del(&ds->vd.node);
285
286                 c->phy->ds_run = ds;
287                 c->phy->ds_done = NULL;
288                 /* start dma */
289                 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
290                 return 0;
291         }
292         c->phy->ds_run = NULL;
293         c->phy->ds_done = NULL;
294         return -EAGAIN;
295 }
296
297 static void k3_dma_tasklet(unsigned long arg)
298 {
299         struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
300         struct k3_dma_phy *p;
301         struct k3_dma_chan *c, *cn;
302         unsigned pch, pch_alloc = 0;
303
304         /* check new dma request of running channel in vc->desc_issued */
305         list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
306                 spin_lock_irq(&c->vc.lock);
307                 p = c->phy;
308                 if (p && p->ds_done) {
309                         if (k3_dma_start_txd(c)) {
310                                 /* No current txd associated with this channel */
311                                 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
312                                 /* Mark this channel free */
313                                 c->phy = NULL;
314                                 p->vchan = NULL;
315                         }
316                 }
317                 spin_unlock_irq(&c->vc.lock);
318         }
319
320         /* check new channel request in d->chan_pending */
321         spin_lock_irq(&d->lock);
322         for (pch = 0; pch < d->dma_channels; pch++) {
323                 if (!(d->dma_channel_mask & (1 << pch)))
324                         continue;
325
326                 p = &d->phy[pch];
327
328                 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
329                         c = list_first_entry(&d->chan_pending,
330                                 struct k3_dma_chan, node);
331                         /* remove from d->chan_pending */
332                         list_del_init(&c->node);
333                         pch_alloc |= 1 << pch;
334                         /* Mark this channel allocated */
335                         p->vchan = c;
336                         c->phy = p;
337                         dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
338                 }
339         }
340         spin_unlock_irq(&d->lock);
341
342         for (pch = 0; pch < d->dma_channels; pch++) {
343                 if (!(d->dma_channel_mask & (1 << pch)))
344                         continue;
345
346                 if (pch_alloc & (1 << pch)) {
347                         p = &d->phy[pch];
348                         c = p->vchan;
349                         if (c) {
350                                 spin_lock_irq(&c->vc.lock);
351                                 k3_dma_start_txd(c);
352                                 spin_unlock_irq(&c->vc.lock);
353                         }
354                 }
355         }
356 }
357
358 static void k3_dma_free_chan_resources(struct dma_chan *chan)
359 {
360         struct k3_dma_chan *c = to_k3_chan(chan);
361         struct k3_dma_dev *d = to_k3_dma(chan->device);
362         unsigned long flags;
363
364         spin_lock_irqsave(&d->lock, flags);
365         list_del_init(&c->node);
366         spin_unlock_irqrestore(&d->lock, flags);
367
368         vchan_free_chan_resources(&c->vc);
369         c->ccfg = 0;
370 }
371
372 static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
373         dma_cookie_t cookie, struct dma_tx_state *state)
374 {
375         struct k3_dma_chan *c = to_k3_chan(chan);
376         struct k3_dma_dev *d = to_k3_dma(chan->device);
377         struct k3_dma_phy *p;
378         struct virt_dma_desc *vd;
379         unsigned long flags;
380         enum dma_status ret;
381         size_t bytes = 0;
382
383         ret = dma_cookie_status(&c->vc.chan, cookie, state);
384         if (ret == DMA_COMPLETE)
385                 return ret;
386
387         spin_lock_irqsave(&c->vc.lock, flags);
388         p = c->phy;
389         ret = c->status;
390
391         /*
392          * If the cookie is on our issue queue, then the residue is
393          * its total size.
394          */
395         vd = vchan_find_desc(&c->vc, cookie);
396         if (vd && !c->cyclic) {
397                 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
398         } else if ((!p) || (!p->ds_run)) {
399                 bytes = 0;
400         } else {
401                 struct k3_dma_desc_sw *ds = p->ds_run;
402                 u32 clli = 0, index = 0;
403
404                 bytes = k3_dma_get_curr_cnt(d, p);
405                 clli = k3_dma_get_curr_lli(p);
406                 index = ((clli - ds->desc_hw_lli) /
407                                 sizeof(struct k3_desc_hw)) + 1;
408                 for (; index < ds->desc_num; index++) {
409                         bytes += ds->desc_hw[index].count;
410                         /* end of lli */
411                         if (!ds->desc_hw[index].lli)
412                                 break;
413                 }
414         }
415         spin_unlock_irqrestore(&c->vc.lock, flags);
416         dma_set_residue(state, bytes);
417         return ret;
418 }
419
420 static void k3_dma_issue_pending(struct dma_chan *chan)
421 {
422         struct k3_dma_chan *c = to_k3_chan(chan);
423         struct k3_dma_dev *d = to_k3_dma(chan->device);
424         unsigned long flags;
425
426         spin_lock_irqsave(&c->vc.lock, flags);
427         /* add request to vc->desc_issued */
428         if (vchan_issue_pending(&c->vc)) {
429                 spin_lock(&d->lock);
430                 if (!c->phy) {
431                         if (list_empty(&c->node)) {
432                                 /* if new channel, add chan_pending */
433                                 list_add_tail(&c->node, &d->chan_pending);
434                                 /* check in tasklet */
435                                 tasklet_schedule(&d->task);
436                                 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
437                         }
438                 }
439                 spin_unlock(&d->lock);
440         } else
441                 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
442         spin_unlock_irqrestore(&c->vc.lock, flags);
443 }
444
445 static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
446                         dma_addr_t src, size_t len, u32 num, u32 ccfg)
447 {
448         if (num != ds->desc_num - 1)
449                 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
450                         sizeof(struct k3_desc_hw);
451
452         ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
453         ds->desc_hw[num].count = len;
454         ds->desc_hw[num].saddr = src;
455         ds->desc_hw[num].daddr = dst;
456         ds->desc_hw[num].config = ccfg;
457 }
458
459 static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
460                                                         struct dma_chan *chan)
461 {
462         struct k3_dma_chan *c = to_k3_chan(chan);
463         struct k3_dma_desc_sw *ds;
464         struct k3_dma_dev *d = to_k3_dma(chan->device);
465         int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
466
467         if (num > lli_limit) {
468                 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
469                         &c->vc, num, lli_limit);
470                 return NULL;
471         }
472
473         ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
474         if (!ds)
475                 return NULL;
476
477         ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
478         if (!ds->desc_hw) {
479                 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
480                 kfree(ds);
481                 return NULL;
482         }
483         ds->desc_num = num;
484         return ds;
485 }
486
487 static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
488         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
489         size_t len, unsigned long flags)
490 {
491         struct k3_dma_chan *c = to_k3_chan(chan);
492         struct k3_dma_desc_sw *ds;
493         size_t copy = 0;
494         int num = 0;
495
496         if (!len)
497                 return NULL;
498
499         num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
500
501         ds = k3_dma_alloc_desc_resource(num, chan);
502         if (!ds)
503                 return NULL;
504
505         c->cyclic = 0;
506         ds->size = len;
507         num = 0;
508
509         if (!c->ccfg) {
510                 /* default is memtomem, without calling device_config */
511                 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
512                 c->ccfg |= (0xf << 20) | (0xf << 24);   /* burst = 16 */
513                 c->ccfg |= (0x3 << 12) | (0x3 << 16);   /* width = 64 bit */
514         }
515
516         do {
517                 copy = min_t(size_t, len, DMA_MAX_SIZE);
518                 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
519
520                 src += copy;
521                 dst += copy;
522                 len -= copy;
523         } while (len);
524
525         ds->desc_hw[num-1].lli = 0;     /* end of link */
526         return vchan_tx_prep(&c->vc, &ds->vd, flags);
527 }
528
529 static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
530         struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
531         enum dma_transfer_direction dir, unsigned long flags, void *context)
532 {
533         struct k3_dma_chan *c = to_k3_chan(chan);
534         struct k3_dma_desc_sw *ds;
535         size_t len, avail, total = 0;
536         struct scatterlist *sg;
537         dma_addr_t addr, src = 0, dst = 0;
538         int num = sglen, i;
539
540         if (sgl == NULL)
541                 return NULL;
542
543         c->cyclic = 0;
544
545         for_each_sg(sgl, sg, sglen, i) {
546                 avail = sg_dma_len(sg);
547                 if (avail > DMA_MAX_SIZE)
548                         num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
549         }
550
551         ds = k3_dma_alloc_desc_resource(num, chan);
552         if (!ds)
553                 return NULL;
554         num = 0;
555         k3_dma_config_write(chan, dir, &c->slave_config);
556
557         for_each_sg(sgl, sg, sglen, i) {
558                 addr = sg_dma_address(sg);
559                 avail = sg_dma_len(sg);
560                 total += avail;
561
562                 do {
563                         len = min_t(size_t, avail, DMA_MAX_SIZE);
564
565                         if (dir == DMA_MEM_TO_DEV) {
566                                 src = addr;
567                                 dst = c->dev_addr;
568                         } else if (dir == DMA_DEV_TO_MEM) {
569                                 src = c->dev_addr;
570                                 dst = addr;
571                         }
572
573                         k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
574
575                         addr += len;
576                         avail -= len;
577                 } while (avail);
578         }
579
580         ds->desc_hw[num-1].lli = 0;     /* end of link */
581         ds->size = total;
582         return vchan_tx_prep(&c->vc, &ds->vd, flags);
583 }
584
585 static struct dma_async_tx_descriptor *
586 k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
587                        size_t buf_len, size_t period_len,
588                        enum dma_transfer_direction dir,
589                        unsigned long flags)
590 {
591         struct k3_dma_chan *c = to_k3_chan(chan);
592         struct k3_dma_desc_sw *ds;
593         size_t len, avail, total = 0;
594         dma_addr_t addr, src = 0, dst = 0;
595         int num = 1, since = 0;
596         size_t modulo = DMA_CYCLIC_MAX_PERIOD;
597         u32 en_tc2 = 0;
598
599         dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
600                __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
601                buf_len, period_len, (int)dir);
602
603         avail = buf_len;
604         if (avail > modulo)
605                 num += DIV_ROUND_UP(avail, modulo) - 1;
606
607         ds = k3_dma_alloc_desc_resource(num, chan);
608         if (!ds)
609                 return NULL;
610
611         c->cyclic = 1;
612         addr = buf_addr;
613         avail = buf_len;
614         total = avail;
615         num = 0;
616         k3_dma_config_write(chan, dir, &c->slave_config);
617
618         if (period_len < modulo)
619                 modulo = period_len;
620
621         do {
622                 len = min_t(size_t, avail, modulo);
623
624                 if (dir == DMA_MEM_TO_DEV) {
625                         src = addr;
626                         dst = c->dev_addr;
627                 } else if (dir == DMA_DEV_TO_MEM) {
628                         src = c->dev_addr;
629                         dst = addr;
630                 }
631                 since += len;
632                 if (since >= period_len) {
633                         /* descriptor asks for TC2 interrupt on completion */
634                         en_tc2 = CX_CFG_NODEIRQ;
635                         since -= period_len;
636                 } else
637                         en_tc2 = 0;
638
639                 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
640
641                 addr += len;
642                 avail -= len;
643         } while (avail);
644
645         /* "Cyclic" == end of link points back to start of link */
646         ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
647
648         ds->size = total;
649
650         return vchan_tx_prep(&c->vc, &ds->vd, flags);
651 }
652
653 static int k3_dma_config(struct dma_chan *chan,
654                          struct dma_slave_config *cfg)
655 {
656         struct k3_dma_chan *c = to_k3_chan(chan);
657
658         memcpy(&c->slave_config, cfg, sizeof(*cfg));
659
660         return 0;
661 }
662
663 static int k3_dma_config_write(struct dma_chan *chan,
664                                enum dma_transfer_direction dir,
665                                struct dma_slave_config *cfg)
666 {
667         struct k3_dma_chan *c = to_k3_chan(chan);
668         u32 maxburst = 0, val = 0;
669         enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
670
671         if (dir == DMA_DEV_TO_MEM) {
672                 c->ccfg = CX_CFG_DSTINCR;
673                 c->dev_addr = cfg->src_addr;
674                 maxburst = cfg->src_maxburst;
675                 width = cfg->src_addr_width;
676         } else if (dir == DMA_MEM_TO_DEV) {
677                 c->ccfg = CX_CFG_SRCINCR;
678                 c->dev_addr = cfg->dst_addr;
679                 maxburst = cfg->dst_maxburst;
680                 width = cfg->dst_addr_width;
681         }
682         switch (width) {
683         case DMA_SLAVE_BUSWIDTH_1_BYTE:
684         case DMA_SLAVE_BUSWIDTH_2_BYTES:
685         case DMA_SLAVE_BUSWIDTH_4_BYTES:
686         case DMA_SLAVE_BUSWIDTH_8_BYTES:
687                 val =  __ffs(width);
688                 break;
689         default:
690                 val = 3;
691                 break;
692         }
693         c->ccfg |= (val << 12) | (val << 16);
694
695         if ((maxburst == 0) || (maxburst > 16))
696                 val = 15;
697         else
698                 val = maxburst - 1;
699         c->ccfg |= (val << 20) | (val << 24);
700         c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
701
702         /* specific request line */
703         c->ccfg |= c->vc.chan.chan_id << 4;
704
705         return 0;
706 }
707
708 static void k3_dma_free_desc(struct virt_dma_desc *vd)
709 {
710         struct k3_dma_desc_sw *ds =
711                 container_of(vd, struct k3_dma_desc_sw, vd);
712         struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
713
714         dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
715         kfree(ds);
716 }
717
718 static int k3_dma_terminate_all(struct dma_chan *chan)
719 {
720         struct k3_dma_chan *c = to_k3_chan(chan);
721         struct k3_dma_dev *d = to_k3_dma(chan->device);
722         struct k3_dma_phy *p = c->phy;
723         unsigned long flags;
724         LIST_HEAD(head);
725
726         dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
727
728         /* Prevent this channel being scheduled */
729         spin_lock(&d->lock);
730         list_del_init(&c->node);
731         spin_unlock(&d->lock);
732
733         /* Clear the tx descriptor lists */
734         spin_lock_irqsave(&c->vc.lock, flags);
735         vchan_get_all_descriptors(&c->vc, &head);
736         if (p) {
737                 /* vchan is assigned to a pchan - stop the channel */
738                 k3_dma_terminate_chan(p, d);
739                 c->phy = NULL;
740                 p->vchan = NULL;
741                 if (p->ds_run) {
742                         vchan_terminate_vdesc(&p->ds_run->vd);
743                         p->ds_run = NULL;
744                 }
745                 p->ds_done = NULL;
746         }
747         spin_unlock_irqrestore(&c->vc.lock, flags);
748         vchan_dma_desc_free_list(&c->vc, &head);
749
750         return 0;
751 }
752
753 static void k3_dma_synchronize(struct dma_chan *chan)
754 {
755         struct k3_dma_chan *c = to_k3_chan(chan);
756
757         vchan_synchronize(&c->vc);
758 }
759
760 static int k3_dma_transfer_pause(struct dma_chan *chan)
761 {
762         struct k3_dma_chan *c = to_k3_chan(chan);
763         struct k3_dma_dev *d = to_k3_dma(chan->device);
764         struct k3_dma_phy *p = c->phy;
765
766         dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
767         if (c->status == DMA_IN_PROGRESS) {
768                 c->status = DMA_PAUSED;
769                 if (p) {
770                         k3_dma_pause_dma(p, false);
771                 } else {
772                         spin_lock(&d->lock);
773                         list_del_init(&c->node);
774                         spin_unlock(&d->lock);
775                 }
776         }
777
778         return 0;
779 }
780
781 static int k3_dma_transfer_resume(struct dma_chan *chan)
782 {
783         struct k3_dma_chan *c = to_k3_chan(chan);
784         struct k3_dma_dev *d = to_k3_dma(chan->device);
785         struct k3_dma_phy *p = c->phy;
786         unsigned long flags;
787
788         dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
789         spin_lock_irqsave(&c->vc.lock, flags);
790         if (c->status == DMA_PAUSED) {
791                 c->status = DMA_IN_PROGRESS;
792                 if (p) {
793                         k3_dma_pause_dma(p, true);
794                 } else if (!list_empty(&c->vc.desc_issued)) {
795                         spin_lock(&d->lock);
796                         list_add_tail(&c->node, &d->chan_pending);
797                         spin_unlock(&d->lock);
798                 }
799         }
800         spin_unlock_irqrestore(&c->vc.lock, flags);
801
802         return 0;
803 }
804
805 static const struct k3dma_soc_data k3_v1_dma_data = {
806         .flags = 0,
807 };
808
809 static const struct k3dma_soc_data asp_v1_dma_data = {
810         .flags = K3_FLAG_NOCLK,
811 };
812
813 static const struct of_device_id k3_pdma_dt_ids[] = {
814         { .compatible = "hisilicon,k3-dma-1.0",
815           .data = &k3_v1_dma_data
816         },
817         { .compatible = "hisilicon,hisi-pcm-asp-dma-1.0",
818           .data = &asp_v1_dma_data
819         },
820         {}
821 };
822 MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
823
824 static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
825                                                 struct of_dma *ofdma)
826 {
827         struct k3_dma_dev *d = ofdma->of_dma_data;
828         unsigned int request = dma_spec->args[0];
829
830         if (request >= d->dma_requests)
831                 return NULL;
832
833         return dma_get_slave_channel(&(d->chans[request].vc.chan));
834 }
835
836 static int k3_dma_probe(struct platform_device *op)
837 {
838         const struct k3dma_soc_data *soc_data;
839         struct k3_dma_dev *d;
840         const struct of_device_id *of_id;
841         struct resource *iores;
842         int i, ret, irq = 0;
843
844         iores = platform_get_resource(op, IORESOURCE_MEM, 0);
845         if (!iores)
846                 return -EINVAL;
847
848         d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
849         if (!d)
850                 return -ENOMEM;
851
852         soc_data = device_get_match_data(&op->dev);
853         if (!soc_data)
854                 return -EINVAL;
855
856         d->base = devm_ioremap_resource(&op->dev, iores);
857         if (IS_ERR(d->base))
858                 return PTR_ERR(d->base);
859
860         of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
861         if (of_id) {
862                 of_property_read_u32((&op->dev)->of_node,
863                                 "dma-channels", &d->dma_channels);
864                 of_property_read_u32((&op->dev)->of_node,
865                                 "dma-requests", &d->dma_requests);
866                 ret = of_property_read_u32((&op->dev)->of_node,
867                                 "dma-channel-mask", &d->dma_channel_mask);
868                 if (ret) {
869                         dev_warn(&op->dev,
870                                  "dma-channel-mask doesn't exist, considering all as available.\n");
871                         d->dma_channel_mask = (u32)~0UL;
872                 }
873         }
874
875         if (!(soc_data->flags & K3_FLAG_NOCLK)) {
876                 d->clk = devm_clk_get(&op->dev, NULL);
877                 if (IS_ERR(d->clk)) {
878                         dev_err(&op->dev, "no dma clk\n");
879                         return PTR_ERR(d->clk);
880                 }
881         }
882
883         irq = platform_get_irq(op, 0);
884         ret = devm_request_irq(&op->dev, irq,
885                         k3_dma_int_handler, 0, DRIVER_NAME, d);
886         if (ret)
887                 return ret;
888
889         d->irq = irq;
890
891         /* A DMA memory pool for LLIs, align on 32-byte boundary */
892         d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
893                                         LLI_BLOCK_SIZE, 32, 0);
894         if (!d->pool)
895                 return -ENOMEM;
896
897         /* init phy channel */
898         d->phy = devm_kcalloc(&op->dev,
899                 d->dma_channels, sizeof(struct k3_dma_phy), GFP_KERNEL);
900         if (d->phy == NULL)
901                 return -ENOMEM;
902
903         for (i = 0; i < d->dma_channels; i++) {
904                 struct k3_dma_phy *p;
905
906                 if (!(d->dma_channel_mask & BIT(i)))
907                         continue;
908
909                 p = &d->phy[i];
910                 p->idx = i;
911                 p->base = d->base + i * 0x40;
912         }
913
914         INIT_LIST_HEAD(&d->slave.channels);
915         dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
916         dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
917         dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
918         d->slave.dev = &op->dev;
919         d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
920         d->slave.device_tx_status = k3_dma_tx_status;
921         d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
922         d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
923         d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
924         d->slave.device_issue_pending = k3_dma_issue_pending;
925         d->slave.device_config = k3_dma_config;
926         d->slave.device_pause = k3_dma_transfer_pause;
927         d->slave.device_resume = k3_dma_transfer_resume;
928         d->slave.device_terminate_all = k3_dma_terminate_all;
929         d->slave.device_synchronize = k3_dma_synchronize;
930         d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
931
932         /* init virtual channel */
933         d->chans = devm_kcalloc(&op->dev,
934                 d->dma_requests, sizeof(struct k3_dma_chan), GFP_KERNEL);
935         if (d->chans == NULL)
936                 return -ENOMEM;
937
938         for (i = 0; i < d->dma_requests; i++) {
939                 struct k3_dma_chan *c = &d->chans[i];
940
941                 c->status = DMA_IN_PROGRESS;
942                 INIT_LIST_HEAD(&c->node);
943                 c->vc.desc_free = k3_dma_free_desc;
944                 vchan_init(&c->vc, &d->slave);
945         }
946
947         /* Enable clock before accessing registers */
948         ret = clk_prepare_enable(d->clk);
949         if (ret < 0) {
950                 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
951                 return ret;
952         }
953
954         k3_dma_enable_dma(d, true);
955
956         ret = dma_async_device_register(&d->slave);
957         if (ret)
958                 goto dma_async_register_fail;
959
960         ret = of_dma_controller_register((&op->dev)->of_node,
961                                         k3_of_dma_simple_xlate, d);
962         if (ret)
963                 goto of_dma_register_fail;
964
965         spin_lock_init(&d->lock);
966         INIT_LIST_HEAD(&d->chan_pending);
967         tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
968         platform_set_drvdata(op, d);
969         dev_info(&op->dev, "initialized\n");
970
971         return 0;
972
973 of_dma_register_fail:
974         dma_async_device_unregister(&d->slave);
975 dma_async_register_fail:
976         clk_disable_unprepare(d->clk);
977         return ret;
978 }
979
980 static int k3_dma_remove(struct platform_device *op)
981 {
982         struct k3_dma_chan *c, *cn;
983         struct k3_dma_dev *d = platform_get_drvdata(op);
984
985         dma_async_device_unregister(&d->slave);
986         of_dma_controller_free((&op->dev)->of_node);
987
988         devm_free_irq(&op->dev, d->irq, d);
989
990         list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
991                 list_del(&c->vc.chan.device_node);
992                 tasklet_kill(&c->vc.task);
993         }
994         tasklet_kill(&d->task);
995         clk_disable_unprepare(d->clk);
996         return 0;
997 }
998
999 #ifdef CONFIG_PM_SLEEP
1000 static int k3_dma_suspend_dev(struct device *dev)
1001 {
1002         struct k3_dma_dev *d = dev_get_drvdata(dev);
1003         u32 stat = 0;
1004
1005         stat = k3_dma_get_chan_stat(d);
1006         if (stat) {
1007                 dev_warn(d->slave.dev,
1008                         "chan %d is running fail to suspend\n", stat);
1009                 return -1;
1010         }
1011         k3_dma_enable_dma(d, false);
1012         clk_disable_unprepare(d->clk);
1013         return 0;
1014 }
1015
1016 static int k3_dma_resume_dev(struct device *dev)
1017 {
1018         struct k3_dma_dev *d = dev_get_drvdata(dev);
1019         int ret = 0;
1020
1021         ret = clk_prepare_enable(d->clk);
1022         if (ret < 0) {
1023                 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
1024                 return ret;
1025         }
1026         k3_dma_enable_dma(d, true);
1027         return 0;
1028 }
1029 #endif
1030
1031 static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
1032
1033 static struct platform_driver k3_pdma_driver = {
1034         .driver         = {
1035                 .name   = DRIVER_NAME,
1036                 .pm     = &k3_dma_pmops,
1037                 .of_match_table = k3_pdma_dt_ids,
1038         },
1039         .probe          = k3_dma_probe,
1040         .remove         = k3_dma_remove,
1041 };
1042
1043 module_platform_driver(k3_pdma_driver);
1044
1045 MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
1046 MODULE_ALIAS("platform:k3dma");
1047 MODULE_LICENSE("GPL v2");