Merge tag 'mailbox-v4.16' of git://git.linaro.org/landing-teams/working/fujitsu/integ...
[sfrench/cifs-2.6.git] / drivers / dma / st_fdma.c
1 /*
2  * DMA driver for STMicroelectronics STi FDMA controller
3  *
4  * Copyright (C) 2014 STMicroelectronics
5  *
6  * Author: Ludovic Barre <Ludovic.barre@st.com>
7  *         Peter Griffin <peter.griffin@linaro.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  */
14
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/of_device.h>
18 #include <linux/of_dma.h>
19 #include <linux/platform_device.h>
20 #include <linux/interrupt.h>
21 #include <linux/remoteproc.h>
22
23 #include "st_fdma.h"
24
25 static inline struct st_fdma_chan *to_st_fdma_chan(struct dma_chan *c)
26 {
27         return container_of(c, struct st_fdma_chan, vchan.chan);
28 }
29
30 static struct st_fdma_desc *to_st_fdma_desc(struct virt_dma_desc *vd)
31 {
32         return container_of(vd, struct st_fdma_desc, vdesc);
33 }
34
35 static int st_fdma_dreq_get(struct st_fdma_chan *fchan)
36 {
37         struct st_fdma_dev *fdev = fchan->fdev;
38         u32 req_line_cfg = fchan->cfg.req_line;
39         u32 dreq_line;
40         int try = 0;
41
42         /*
43          * dreq_mask is shared for n channels of fdma, so all accesses must be
44          * atomic. if the dreq_mask is changed between ffz and set_bit,
45          * we retry
46          */
47         do {
48                 if (fdev->dreq_mask == ~0L) {
49                         dev_err(fdev->dev, "No req lines available\n");
50                         return -EINVAL;
51                 }
52
53                 if (try || req_line_cfg >= ST_FDMA_NR_DREQS) {
54                         dev_err(fdev->dev, "Invalid or used req line\n");
55                         return -EINVAL;
56                 } else {
57                         dreq_line = req_line_cfg;
58                 }
59
60                 try++;
61         } while (test_and_set_bit(dreq_line, &fdev->dreq_mask));
62
63         dev_dbg(fdev->dev, "get dreq_line:%d mask:%#lx\n",
64                 dreq_line, fdev->dreq_mask);
65
66         return dreq_line;
67 }
68
69 static void st_fdma_dreq_put(struct st_fdma_chan *fchan)
70 {
71         struct st_fdma_dev *fdev = fchan->fdev;
72
73         dev_dbg(fdev->dev, "put dreq_line:%#x\n", fchan->dreq_line);
74         clear_bit(fchan->dreq_line, &fdev->dreq_mask);
75 }
76
77 static void st_fdma_xfer_desc(struct st_fdma_chan *fchan)
78 {
79         struct virt_dma_desc *vdesc;
80         unsigned long nbytes, ch_cmd, cmd;
81
82         vdesc = vchan_next_desc(&fchan->vchan);
83         if (!vdesc)
84                 return;
85
86         fchan->fdesc = to_st_fdma_desc(vdesc);
87         nbytes = fchan->fdesc->node[0].desc->nbytes;
88         cmd = FDMA_CMD_START(fchan->vchan.chan.chan_id);
89         ch_cmd = fchan->fdesc->node[0].pdesc | FDMA_CH_CMD_STA_START;
90
91         /* start the channel for the descriptor */
92         fnode_write(fchan, nbytes, FDMA_CNTN_OFST);
93         fchan_write(fchan, ch_cmd, FDMA_CH_CMD_OFST);
94         writel(cmd,
95                 fchan->fdev->slim_rproc->peri + FDMA_CMD_SET_OFST);
96
97         dev_dbg(fchan->fdev->dev, "start chan:%d\n", fchan->vchan.chan.chan_id);
98 }
99
100 static void st_fdma_ch_sta_update(struct st_fdma_chan *fchan,
101                                   unsigned long int_sta)
102 {
103         unsigned long ch_sta, ch_err;
104         int ch_id = fchan->vchan.chan.chan_id;
105         struct st_fdma_dev *fdev = fchan->fdev;
106
107         ch_sta = fchan_read(fchan, FDMA_CH_CMD_OFST);
108         ch_err = ch_sta & FDMA_CH_CMD_ERR_MASK;
109         ch_sta &= FDMA_CH_CMD_STA_MASK;
110
111         if (int_sta & FDMA_INT_STA_ERR) {
112                 dev_warn(fdev->dev, "chan:%d, error:%ld\n", ch_id, ch_err);
113                 fchan->status = DMA_ERROR;
114                 return;
115         }
116
117         switch (ch_sta) {
118         case FDMA_CH_CMD_STA_PAUSED:
119                 fchan->status = DMA_PAUSED;
120                 break;
121
122         case FDMA_CH_CMD_STA_RUNNING:
123                 fchan->status = DMA_IN_PROGRESS;
124                 break;
125         }
126 }
127
128 static irqreturn_t st_fdma_irq_handler(int irq, void *dev_id)
129 {
130         struct st_fdma_dev *fdev = dev_id;
131         irqreturn_t ret = IRQ_NONE;
132         struct st_fdma_chan *fchan = &fdev->chans[0];
133         unsigned long int_sta, clr;
134
135         int_sta = fdma_read(fdev, FDMA_INT_STA_OFST);
136         clr = int_sta;
137
138         for (; int_sta != 0 ; int_sta >>= 2, fchan++) {
139                 if (!(int_sta & (FDMA_INT_STA_CH | FDMA_INT_STA_ERR)))
140                         continue;
141
142                 spin_lock(&fchan->vchan.lock);
143                 st_fdma_ch_sta_update(fchan, int_sta);
144
145                 if (fchan->fdesc) {
146                         if (!fchan->fdesc->iscyclic) {
147                                 list_del(&fchan->fdesc->vdesc.node);
148                                 vchan_cookie_complete(&fchan->fdesc->vdesc);
149                                 fchan->fdesc = NULL;
150                                 fchan->status = DMA_COMPLETE;
151                         } else {
152                                 vchan_cyclic_callback(&fchan->fdesc->vdesc);
153                         }
154
155                         /* Start the next descriptor (if available) */
156                         if (!fchan->fdesc)
157                                 st_fdma_xfer_desc(fchan);
158                 }
159
160                 spin_unlock(&fchan->vchan.lock);
161                 ret = IRQ_HANDLED;
162         }
163
164         fdma_write(fdev, clr, FDMA_INT_CLR_OFST);
165
166         return ret;
167 }
168
169 static struct dma_chan *st_fdma_of_xlate(struct of_phandle_args *dma_spec,
170                                          struct of_dma *ofdma)
171 {
172         struct st_fdma_dev *fdev = ofdma->of_dma_data;
173         struct dma_chan *chan;
174         struct st_fdma_chan *fchan;
175         int ret;
176
177         if (dma_spec->args_count < 1)
178                 return ERR_PTR(-EINVAL);
179
180         if (fdev->dma_device.dev->of_node != dma_spec->np)
181                 return ERR_PTR(-EINVAL);
182
183         ret = rproc_boot(fdev->slim_rproc->rproc);
184         if (ret == -ENOENT)
185                 return ERR_PTR(-EPROBE_DEFER);
186         else if (ret)
187                 return ERR_PTR(ret);
188
189         chan = dma_get_any_slave_channel(&fdev->dma_device);
190         if (!chan)
191                 goto err_chan;
192
193         fchan = to_st_fdma_chan(chan);
194
195         fchan->cfg.of_node = dma_spec->np;
196         fchan->cfg.req_line = dma_spec->args[0];
197         fchan->cfg.req_ctrl = 0;
198         fchan->cfg.type = ST_FDMA_TYPE_FREE_RUN;
199
200         if (dma_spec->args_count > 1)
201                 fchan->cfg.req_ctrl = dma_spec->args[1]
202                         & FDMA_REQ_CTRL_CFG_MASK;
203
204         if (dma_spec->args_count > 2)
205                 fchan->cfg.type = dma_spec->args[2];
206
207         if (fchan->cfg.type == ST_FDMA_TYPE_FREE_RUN) {
208                 fchan->dreq_line = 0;
209         } else {
210                 fchan->dreq_line = st_fdma_dreq_get(fchan);
211                 if (IS_ERR_VALUE(fchan->dreq_line)) {
212                         chan = ERR_PTR(fchan->dreq_line);
213                         goto err_chan;
214                 }
215         }
216
217         dev_dbg(fdev->dev, "xlate req_line:%d type:%d req_ctrl:%#lx\n",
218                 fchan->cfg.req_line, fchan->cfg.type, fchan->cfg.req_ctrl);
219
220         return chan;
221
222 err_chan:
223         rproc_shutdown(fdev->slim_rproc->rproc);
224         return chan;
225
226 }
227
228 static void st_fdma_free_desc(struct virt_dma_desc *vdesc)
229 {
230         struct st_fdma_desc *fdesc;
231         int i;
232
233         fdesc = to_st_fdma_desc(vdesc);
234         for (i = 0; i < fdesc->n_nodes; i++)
235                 dma_pool_free(fdesc->fchan->node_pool, fdesc->node[i].desc,
236                               fdesc->node[i].pdesc);
237         kfree(fdesc);
238 }
239
240 static struct st_fdma_desc *st_fdma_alloc_desc(struct st_fdma_chan *fchan,
241                                                int sg_len)
242 {
243         struct st_fdma_desc *fdesc;
244         int i;
245
246         fdesc = kzalloc(sizeof(*fdesc) +
247                         sizeof(struct st_fdma_sw_node) * sg_len, GFP_NOWAIT);
248         if (!fdesc)
249                 return NULL;
250
251         fdesc->fchan = fchan;
252         fdesc->n_nodes = sg_len;
253         for (i = 0; i < sg_len; i++) {
254                 fdesc->node[i].desc = dma_pool_alloc(fchan->node_pool,
255                                 GFP_NOWAIT, &fdesc->node[i].pdesc);
256                 if (!fdesc->node[i].desc)
257                         goto err;
258         }
259         return fdesc;
260
261 err:
262         while (--i >= 0)
263                 dma_pool_free(fchan->node_pool, fdesc->node[i].desc,
264                               fdesc->node[i].pdesc);
265         kfree(fdesc);
266         return NULL;
267 }
268
269 static int st_fdma_alloc_chan_res(struct dma_chan *chan)
270 {
271         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
272
273         /* Create the dma pool for descriptor allocation */
274         fchan->node_pool = dma_pool_create(dev_name(&chan->dev->device),
275                                             fchan->fdev->dev,
276                                             sizeof(struct st_fdma_hw_node),
277                                             __alignof__(struct st_fdma_hw_node),
278                                             0);
279
280         if (!fchan->node_pool) {
281                 dev_err(fchan->fdev->dev, "unable to allocate desc pool\n");
282                 return -ENOMEM;
283         }
284
285         dev_dbg(fchan->fdev->dev, "alloc ch_id:%d type:%d\n",
286                 fchan->vchan.chan.chan_id, fchan->cfg.type);
287
288         return 0;
289 }
290
291 static void st_fdma_free_chan_res(struct dma_chan *chan)
292 {
293         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
294         struct rproc *rproc = fchan->fdev->slim_rproc->rproc;
295         unsigned long flags;
296
297         LIST_HEAD(head);
298
299         dev_dbg(fchan->fdev->dev, "%s: freeing chan:%d\n",
300                 __func__, fchan->vchan.chan.chan_id);
301
302         if (fchan->cfg.type != ST_FDMA_TYPE_FREE_RUN)
303                 st_fdma_dreq_put(fchan);
304
305         spin_lock_irqsave(&fchan->vchan.lock, flags);
306         fchan->fdesc = NULL;
307         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
308
309         dma_pool_destroy(fchan->node_pool);
310         fchan->node_pool = NULL;
311         memset(&fchan->cfg, 0, sizeof(struct st_fdma_cfg));
312
313         rproc_shutdown(rproc);
314 }
315
316 static struct dma_async_tx_descriptor *st_fdma_prep_dma_memcpy(
317         struct dma_chan *chan,  dma_addr_t dst, dma_addr_t src,
318         size_t len, unsigned long flags)
319 {
320         struct st_fdma_chan *fchan;
321         struct st_fdma_desc *fdesc;
322         struct st_fdma_hw_node *hw_node;
323
324         if (!len)
325                 return NULL;
326
327         fchan = to_st_fdma_chan(chan);
328
329         /* We only require a single descriptor */
330         fdesc = st_fdma_alloc_desc(fchan, 1);
331         if (!fdesc) {
332                 dev_err(fchan->fdev->dev, "no memory for desc\n");
333                 return NULL;
334         }
335
336         hw_node = fdesc->node[0].desc;
337         hw_node->next = 0;
338         hw_node->control = FDMA_NODE_CTRL_REQ_MAP_FREE_RUN;
339         hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
340         hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
341         hw_node->control |= FDMA_NODE_CTRL_INT_EON;
342         hw_node->nbytes = len;
343         hw_node->saddr = src;
344         hw_node->daddr = dst;
345         hw_node->generic.length = len;
346         hw_node->generic.sstride = 0;
347         hw_node->generic.dstride = 0;
348
349         return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
350 }
351
352 static int config_reqctrl(struct st_fdma_chan *fchan,
353                           enum dma_transfer_direction direction)
354 {
355         u32 maxburst = 0, addr = 0;
356         enum dma_slave_buswidth width;
357         int ch_id = fchan->vchan.chan.chan_id;
358         struct st_fdma_dev *fdev = fchan->fdev;
359
360         switch (direction) {
361
362         case DMA_DEV_TO_MEM:
363                 fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_WNR;
364                 maxburst = fchan->scfg.src_maxburst;
365                 width = fchan->scfg.src_addr_width;
366                 addr = fchan->scfg.src_addr;
367                 break;
368
369         case DMA_MEM_TO_DEV:
370                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_WNR;
371                 maxburst = fchan->scfg.dst_maxburst;
372                 width = fchan->scfg.dst_addr_width;
373                 addr = fchan->scfg.dst_addr;
374                 break;
375
376         default:
377                 return -EINVAL;
378         }
379
380         fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_OPCODE_MASK;
381
382         switch (width) {
383
384         case DMA_SLAVE_BUSWIDTH_1_BYTE:
385                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST1;
386                 break;
387
388         case DMA_SLAVE_BUSWIDTH_2_BYTES:
389                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST2;
390                 break;
391
392         case DMA_SLAVE_BUSWIDTH_4_BYTES:
393                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST4;
394                 break;
395
396         case DMA_SLAVE_BUSWIDTH_8_BYTES:
397                 fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_OPCODE_LD_ST8;
398                 break;
399
400         default:
401                 return -EINVAL;
402         }
403
404         fchan->cfg.req_ctrl &= ~FDMA_REQ_CTRL_NUM_OPS_MASK;
405         fchan->cfg.req_ctrl |= FDMA_REQ_CTRL_NUM_OPS(maxburst-1);
406         dreq_write(fchan, fchan->cfg.req_ctrl, FDMA_REQ_CTRL_OFST);
407
408         fchan->cfg.dev_addr = addr;
409         fchan->cfg.dir = direction;
410
411         dev_dbg(fdev->dev, "chan:%d config_reqctrl:%#x req_ctrl:%#lx\n",
412                 ch_id, addr, fchan->cfg.req_ctrl);
413
414         return 0;
415 }
416
417 static void fill_hw_node(struct st_fdma_hw_node *hw_node,
418                         struct st_fdma_chan *fchan,
419                         enum dma_transfer_direction direction)
420 {
421         if (direction == DMA_MEM_TO_DEV) {
422                 hw_node->control |= FDMA_NODE_CTRL_SRC_INCR;
423                 hw_node->control |= FDMA_NODE_CTRL_DST_STATIC;
424                 hw_node->daddr = fchan->cfg.dev_addr;
425         } else {
426                 hw_node->control |= FDMA_NODE_CTRL_SRC_STATIC;
427                 hw_node->control |= FDMA_NODE_CTRL_DST_INCR;
428                 hw_node->saddr = fchan->cfg.dev_addr;
429         }
430
431         hw_node->generic.sstride = 0;
432         hw_node->generic.dstride = 0;
433 }
434
435 static inline struct st_fdma_chan *st_fdma_prep_common(struct dma_chan *chan,
436                 size_t len, enum dma_transfer_direction direction)
437 {
438         struct st_fdma_chan *fchan;
439
440         if (!chan || !len)
441                 return NULL;
442
443         fchan = to_st_fdma_chan(chan);
444
445         if (!is_slave_direction(direction)) {
446                 dev_err(fchan->fdev->dev, "bad direction?\n");
447                 return NULL;
448         }
449
450         return fchan;
451 }
452
453 static struct dma_async_tx_descriptor *st_fdma_prep_dma_cyclic(
454                 struct dma_chan *chan, dma_addr_t buf_addr, size_t len,
455                 size_t period_len, enum dma_transfer_direction direction,
456                 unsigned long flags)
457 {
458         struct st_fdma_chan *fchan;
459         struct st_fdma_desc *fdesc;
460         int sg_len, i;
461
462         fchan = st_fdma_prep_common(chan, len, direction);
463         if (!fchan)
464                 return NULL;
465
466         if (!period_len)
467                 return NULL;
468
469         if (config_reqctrl(fchan, direction)) {
470                 dev_err(fchan->fdev->dev, "bad width or direction\n");
471                 return NULL;
472         }
473
474         /* the buffer length must be a multiple of period_len */
475         if (len % period_len != 0) {
476                 dev_err(fchan->fdev->dev, "len is not multiple of period\n");
477                 return NULL;
478         }
479
480         sg_len = len / period_len;
481         fdesc = st_fdma_alloc_desc(fchan, sg_len);
482         if (!fdesc) {
483                 dev_err(fchan->fdev->dev, "no memory for desc\n");
484                 return NULL;
485         }
486
487         fdesc->iscyclic = true;
488
489         for (i = 0; i < sg_len; i++) {
490                 struct st_fdma_hw_node *hw_node = fdesc->node[i].desc;
491
492                 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
493
494                 hw_node->control =
495                         FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
496                 hw_node->control |= FDMA_NODE_CTRL_INT_EON;
497
498                 fill_hw_node(hw_node, fchan, direction);
499
500                 if (direction == DMA_MEM_TO_DEV)
501                         hw_node->saddr = buf_addr + (i * period_len);
502                 else
503                         hw_node->daddr = buf_addr + (i * period_len);
504
505                 hw_node->nbytes = period_len;
506                 hw_node->generic.length = period_len;
507         }
508
509         return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
510 }
511
512 static struct dma_async_tx_descriptor *st_fdma_prep_slave_sg(
513                 struct dma_chan *chan, struct scatterlist *sgl,
514                 unsigned int sg_len, enum dma_transfer_direction direction,
515                 unsigned long flags, void *context)
516 {
517         struct st_fdma_chan *fchan;
518         struct st_fdma_desc *fdesc;
519         struct st_fdma_hw_node *hw_node;
520         struct scatterlist *sg;
521         int i;
522
523         fchan = st_fdma_prep_common(chan, sg_len, direction);
524         if (!fchan)
525                 return NULL;
526
527         if (!sgl)
528                 return NULL;
529
530         fdesc = st_fdma_alloc_desc(fchan, sg_len);
531         if (!fdesc) {
532                 dev_err(fchan->fdev->dev, "no memory for desc\n");
533                 return NULL;
534         }
535
536         fdesc->iscyclic = false;
537
538         for_each_sg(sgl, sg, sg_len, i) {
539                 hw_node = fdesc->node[i].desc;
540
541                 hw_node->next = fdesc->node[(i + 1) % sg_len].pdesc;
542                 hw_node->control = FDMA_NODE_CTRL_REQ_MAP_DREQ(fchan->dreq_line);
543
544                 fill_hw_node(hw_node, fchan, direction);
545
546                 if (direction == DMA_MEM_TO_DEV)
547                         hw_node->saddr = sg_dma_address(sg);
548                 else
549                         hw_node->daddr = sg_dma_address(sg);
550
551                 hw_node->nbytes = sg_dma_len(sg);
552                 hw_node->generic.length = sg_dma_len(sg);
553         }
554
555         /* interrupt at end of last node */
556         hw_node->control |= FDMA_NODE_CTRL_INT_EON;
557
558         return vchan_tx_prep(&fchan->vchan, &fdesc->vdesc, flags);
559 }
560
561 static size_t st_fdma_desc_residue(struct st_fdma_chan *fchan,
562                                    struct virt_dma_desc *vdesc,
563                                    bool in_progress)
564 {
565         struct st_fdma_desc *fdesc = fchan->fdesc;
566         size_t residue = 0;
567         dma_addr_t cur_addr = 0;
568         int i;
569
570         if (in_progress) {
571                 cur_addr = fchan_read(fchan, FDMA_CH_CMD_OFST);
572                 cur_addr &= FDMA_CH_CMD_DATA_MASK;
573         }
574
575         for (i = fchan->fdesc->n_nodes - 1 ; i >= 0; i--) {
576                 if (cur_addr == fdesc->node[i].pdesc) {
577                         residue += fnode_read(fchan, FDMA_CNTN_OFST);
578                         break;
579                 }
580                 residue += fdesc->node[i].desc->nbytes;
581         }
582
583         return residue;
584 }
585
586 static enum dma_status st_fdma_tx_status(struct dma_chan *chan,
587                                          dma_cookie_t cookie,
588                                          struct dma_tx_state *txstate)
589 {
590         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
591         struct virt_dma_desc *vd;
592         enum dma_status ret;
593         unsigned long flags;
594
595         ret = dma_cookie_status(chan, cookie, txstate);
596         if (ret == DMA_COMPLETE || !txstate)
597                 return ret;
598
599         spin_lock_irqsave(&fchan->vchan.lock, flags);
600         vd = vchan_find_desc(&fchan->vchan, cookie);
601         if (fchan->fdesc && cookie == fchan->fdesc->vdesc.tx.cookie)
602                 txstate->residue = st_fdma_desc_residue(fchan, vd, true);
603         else if (vd)
604                 txstate->residue = st_fdma_desc_residue(fchan, vd, false);
605         else
606                 txstate->residue = 0;
607
608         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
609
610         return ret;
611 }
612
613 static void st_fdma_issue_pending(struct dma_chan *chan)
614 {
615         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
616         unsigned long flags;
617
618         spin_lock_irqsave(&fchan->vchan.lock, flags);
619
620         if (vchan_issue_pending(&fchan->vchan) && !fchan->fdesc)
621                 st_fdma_xfer_desc(fchan);
622
623         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
624 }
625
626 static int st_fdma_pause(struct dma_chan *chan)
627 {
628         unsigned long flags;
629         LIST_HEAD(head);
630         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
631         int ch_id = fchan->vchan.chan.chan_id;
632         unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
633
634         dev_dbg(fchan->fdev->dev, "pause chan:%d\n", ch_id);
635
636         spin_lock_irqsave(&fchan->vchan.lock, flags);
637         if (fchan->fdesc)
638                 fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
639         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
640
641         return 0;
642 }
643
644 static int st_fdma_resume(struct dma_chan *chan)
645 {
646         unsigned long flags;
647         unsigned long val;
648         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
649         int ch_id = fchan->vchan.chan.chan_id;
650
651         dev_dbg(fchan->fdev->dev, "resume chan:%d\n", ch_id);
652
653         spin_lock_irqsave(&fchan->vchan.lock, flags);
654         if (fchan->fdesc) {
655                 val = fchan_read(fchan, FDMA_CH_CMD_OFST);
656                 val &= FDMA_CH_CMD_DATA_MASK;
657                 fchan_write(fchan, val, FDMA_CH_CMD_OFST);
658         }
659         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
660
661         return 0;
662 }
663
664 static int st_fdma_terminate_all(struct dma_chan *chan)
665 {
666         unsigned long flags;
667         LIST_HEAD(head);
668         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
669         int ch_id = fchan->vchan.chan.chan_id;
670         unsigned long cmd = FDMA_CMD_PAUSE(ch_id);
671
672         dev_dbg(fchan->fdev->dev, "terminate chan:%d\n", ch_id);
673
674         spin_lock_irqsave(&fchan->vchan.lock, flags);
675         fdma_write(fchan->fdev, cmd, FDMA_CMD_SET_OFST);
676         fchan->fdesc = NULL;
677         vchan_get_all_descriptors(&fchan->vchan, &head);
678         spin_unlock_irqrestore(&fchan->vchan.lock, flags);
679         vchan_dma_desc_free_list(&fchan->vchan, &head);
680
681         return 0;
682 }
683
684 static int st_fdma_slave_config(struct dma_chan *chan,
685                                 struct dma_slave_config *slave_cfg)
686 {
687         struct st_fdma_chan *fchan = to_st_fdma_chan(chan);
688
689         memcpy(&fchan->scfg, slave_cfg, sizeof(fchan->scfg));
690         return 0;
691 }
692
693 static const struct st_fdma_driverdata fdma_mpe31_stih407_11 = {
694         .name = "STiH407",
695         .id = 0,
696 };
697
698 static const struct st_fdma_driverdata fdma_mpe31_stih407_12 = {
699         .name = "STiH407",
700         .id = 1,
701 };
702
703 static const struct st_fdma_driverdata fdma_mpe31_stih407_13 = {
704         .name = "STiH407",
705         .id = 2,
706 };
707
708 static const struct of_device_id st_fdma_match[] = {
709         { .compatible = "st,stih407-fdma-mpe31-11"
710           , .data = &fdma_mpe31_stih407_11 },
711         { .compatible = "st,stih407-fdma-mpe31-12"
712           , .data = &fdma_mpe31_stih407_12 },
713         { .compatible = "st,stih407-fdma-mpe31-13"
714           , .data = &fdma_mpe31_stih407_13 },
715         {},
716 };
717 MODULE_DEVICE_TABLE(of, st_fdma_match);
718
719 static int st_fdma_parse_dt(struct platform_device *pdev,
720                         const struct st_fdma_driverdata *drvdata,
721                         struct st_fdma_dev *fdev)
722 {
723         snprintf(fdev->fw_name, FW_NAME_SIZE, "fdma_%s_%d.elf",
724                 drvdata->name, drvdata->id);
725
726         return of_property_read_u32(pdev->dev.of_node, "dma-channels",
727                                     &fdev->nr_channels);
728 }
729 #define FDMA_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
730                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
731                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
732                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
733
734 static void st_fdma_free(struct st_fdma_dev *fdev)
735 {
736         struct st_fdma_chan *fchan;
737         int i;
738
739         for (i = 0; i < fdev->nr_channels; i++) {
740                 fchan = &fdev->chans[i];
741                 list_del(&fchan->vchan.chan.device_node);
742                 tasklet_kill(&fchan->vchan.task);
743         }
744 }
745
746 static int st_fdma_probe(struct platform_device *pdev)
747 {
748         struct st_fdma_dev *fdev;
749         const struct of_device_id *match;
750         struct device_node *np = pdev->dev.of_node;
751         const struct st_fdma_driverdata *drvdata;
752         int ret, i;
753
754         match = of_match_device((st_fdma_match), &pdev->dev);
755         if (!match || !match->data) {
756                 dev_err(&pdev->dev, "No device match found\n");
757                 return -ENODEV;
758         }
759
760         drvdata = match->data;
761
762         fdev = devm_kzalloc(&pdev->dev, sizeof(*fdev), GFP_KERNEL);
763         if (!fdev)
764                 return -ENOMEM;
765
766         ret = st_fdma_parse_dt(pdev, drvdata, fdev);
767         if (ret) {
768                 dev_err(&pdev->dev, "unable to find platform data\n");
769                 goto err;
770         }
771
772         fdev->chans = devm_kcalloc(&pdev->dev, fdev->nr_channels,
773                                    sizeof(struct st_fdma_chan), GFP_KERNEL);
774         if (!fdev->chans)
775                 return -ENOMEM;
776
777         fdev->dev = &pdev->dev;
778         fdev->drvdata = drvdata;
779         platform_set_drvdata(pdev, fdev);
780
781         fdev->irq = platform_get_irq(pdev, 0);
782         if (fdev->irq < 0) {
783                 dev_err(&pdev->dev, "Failed to get irq resource\n");
784                 return -EINVAL;
785         }
786
787         ret = devm_request_irq(&pdev->dev, fdev->irq, st_fdma_irq_handler, 0,
788                                dev_name(&pdev->dev), fdev);
789         if (ret) {
790                 dev_err(&pdev->dev, "Failed to request irq (%d)\n", ret);
791                 goto err;
792         }
793
794         fdev->slim_rproc = st_slim_rproc_alloc(pdev, fdev->fw_name);
795         if (IS_ERR(fdev->slim_rproc)) {
796                 ret = PTR_ERR(fdev->slim_rproc);
797                 dev_err(&pdev->dev, "slim_rproc_alloc failed (%d)\n", ret);
798                 goto err;
799         }
800
801         /* Initialise list of FDMA channels */
802         INIT_LIST_HEAD(&fdev->dma_device.channels);
803         for (i = 0; i < fdev->nr_channels; i++) {
804                 struct st_fdma_chan *fchan = &fdev->chans[i];
805
806                 fchan->fdev = fdev;
807                 fchan->vchan.desc_free = st_fdma_free_desc;
808                 vchan_init(&fchan->vchan, &fdev->dma_device);
809         }
810
811         /* Initialise the FDMA dreq (reserve 0 & 31 for FDMA use) */
812         fdev->dreq_mask = BIT(0) | BIT(31);
813
814         dma_cap_set(DMA_SLAVE, fdev->dma_device.cap_mask);
815         dma_cap_set(DMA_CYCLIC, fdev->dma_device.cap_mask);
816         dma_cap_set(DMA_MEMCPY, fdev->dma_device.cap_mask);
817
818         fdev->dma_device.dev = &pdev->dev;
819         fdev->dma_device.device_alloc_chan_resources = st_fdma_alloc_chan_res;
820         fdev->dma_device.device_free_chan_resources = st_fdma_free_chan_res;
821         fdev->dma_device.device_prep_dma_cyclic = st_fdma_prep_dma_cyclic;
822         fdev->dma_device.device_prep_slave_sg = st_fdma_prep_slave_sg;
823         fdev->dma_device.device_prep_dma_memcpy = st_fdma_prep_dma_memcpy;
824         fdev->dma_device.device_tx_status = st_fdma_tx_status;
825         fdev->dma_device.device_issue_pending = st_fdma_issue_pending;
826         fdev->dma_device.device_terminate_all = st_fdma_terminate_all;
827         fdev->dma_device.device_config = st_fdma_slave_config;
828         fdev->dma_device.device_pause = st_fdma_pause;
829         fdev->dma_device.device_resume = st_fdma_resume;
830
831         fdev->dma_device.src_addr_widths = FDMA_DMA_BUSWIDTHS;
832         fdev->dma_device.dst_addr_widths = FDMA_DMA_BUSWIDTHS;
833         fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
834         fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
835
836         ret = dma_async_device_register(&fdev->dma_device);
837         if (ret) {
838                 dev_err(&pdev->dev,
839                         "Failed to register DMA device (%d)\n", ret);
840                 goto err_rproc;
841         }
842
843         ret = of_dma_controller_register(np, st_fdma_of_xlate, fdev);
844         if (ret) {
845                 dev_err(&pdev->dev,
846                         "Failed to register controller (%d)\n", ret);
847                 goto err_dma_dev;
848         }
849
850         dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
851
852         return 0;
853
854 err_dma_dev:
855         dma_async_device_unregister(&fdev->dma_device);
856 err_rproc:
857         st_fdma_free(fdev);
858         st_slim_rproc_put(fdev->slim_rproc);
859 err:
860         return ret;
861 }
862
863 static int st_fdma_remove(struct platform_device *pdev)
864 {
865         struct st_fdma_dev *fdev = platform_get_drvdata(pdev);
866
867         devm_free_irq(&pdev->dev, fdev->irq, fdev);
868         st_slim_rproc_put(fdev->slim_rproc);
869         of_dma_controller_free(pdev->dev.of_node);
870         dma_async_device_unregister(&fdev->dma_device);
871
872         return 0;
873 }
874
875 static struct platform_driver st_fdma_platform_driver = {
876         .driver = {
877                 .name = DRIVER_NAME,
878                 .of_match_table = st_fdma_match,
879         },
880         .probe = st_fdma_probe,
881         .remove = st_fdma_remove,
882 };
883 module_platform_driver(st_fdma_platform_driver);
884
885 MODULE_LICENSE("GPL v2");
886 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
887 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
888 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
889 MODULE_ALIAS("platform: " DRIVER_NAME);