1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/dma/edma.h>
17 #include <linux/dma-mapping.h>
19 #include "dw-edma-core.h"
20 #include "dw-edma-v0-core.h"
21 #include "dw-hdma-v0-core.h"
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
26 struct device *dchan2dev(struct dma_chan *dchan)
28 return &dchan->dev->device;
32 struct device *chan2dev(struct dw_edma_chan *chan)
34 return &chan->vc.chan.dev->device;
38 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
40 return container_of(vd, struct dw_edma_desc, vd);
44 u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
46 struct dw_edma_chip *chip = chan->dw->chip;
48 if (chip->ops->pci_address)
49 return chip->ops->pci_address(chip->dev, cpu_addr);
54 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
56 struct dw_edma_burst *burst;
58 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
62 INIT_LIST_HEAD(&burst->list);
64 /* Create and add new element into the linked list */
65 chunk->bursts_alloc++;
66 list_add_tail(&burst->list, &chunk->burst->list);
69 chunk->bursts_alloc = 0;
76 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
78 struct dw_edma_chip *chip = desc->chan->dw->chip;
79 struct dw_edma_chan *chan = desc->chan;
80 struct dw_edma_chunk *chunk;
82 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
86 INIT_LIST_HEAD(&chunk->list);
88 /* Toggling change bit (CB) in each chunk, this is a mechanism to
89 * inform the eDMA HW block that this is a new linked list ready
91 * - Odd chunks originate CB equal to 0
92 * - Even chunks originate CB equal to 1
94 chunk->cb = !(desc->chunks_alloc % 2);
95 if (chan->dir == EDMA_DIR_WRITE) {
96 chunk->ll_region.paddr = chip->ll_region_wr[chan->id].paddr;
97 chunk->ll_region.vaddr = chip->ll_region_wr[chan->id].vaddr;
99 chunk->ll_region.paddr = chip->ll_region_rd[chan->id].paddr;
100 chunk->ll_region.vaddr = chip->ll_region_rd[chan->id].vaddr;
104 /* Create and add new element into the linked list */
105 if (!dw_edma_alloc_burst(chunk)) {
109 desc->chunks_alloc++;
110 list_add_tail(&chunk->list, &desc->chunk->list);
114 desc->chunks_alloc = 0;
121 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
123 struct dw_edma_desc *desc;
125 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
130 if (!dw_edma_alloc_chunk(desc)) {
138 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
140 struct dw_edma_burst *child, *_next;
142 /* Remove all the list elements */
143 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
144 list_del(&child->list);
146 chunk->bursts_alloc--;
149 /* Remove the list head */
154 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
156 struct dw_edma_chunk *child, *_next;
161 /* Remove all the list elements */
162 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
163 dw_edma_free_burst(child);
164 list_del(&child->list);
166 desc->chunks_alloc--;
169 /* Remove the list head */
174 static void dw_edma_free_desc(struct dw_edma_desc *desc)
176 dw_edma_free_chunk(desc);
180 static void vchan_free_desc(struct virt_dma_desc *vdesc)
182 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
185 static int dw_edma_start_transfer(struct dw_edma_chan *chan)
187 struct dw_edma *dw = chan->dw;
188 struct dw_edma_chunk *child;
189 struct dw_edma_desc *desc;
190 struct virt_dma_desc *vd;
192 vd = vchan_next_desc(&chan->vc);
196 desc = vd2dw_edma_desc(vd);
200 child = list_first_entry_or_null(&desc->chunk->list,
201 struct dw_edma_chunk, list);
205 dw_edma_core_start(dw, child, !desc->xfer_sz);
206 desc->xfer_sz += child->ll_region.sz;
207 dw_edma_free_burst(child);
208 list_del(&child->list);
210 desc->chunks_alloc--;
215 static void dw_edma_device_caps(struct dma_chan *dchan,
216 struct dma_slave_caps *caps)
218 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
220 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
221 if (chan->dir == EDMA_DIR_READ)
222 caps->directions = BIT(DMA_DEV_TO_MEM);
224 caps->directions = BIT(DMA_MEM_TO_DEV);
226 if (chan->dir == EDMA_DIR_WRITE)
227 caps->directions = BIT(DMA_DEV_TO_MEM);
229 caps->directions = BIT(DMA_MEM_TO_DEV);
233 static int dw_edma_device_config(struct dma_chan *dchan,
234 struct dma_slave_config *config)
236 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
238 memcpy(&chan->config, config, sizeof(*config));
239 chan->configured = true;
244 static int dw_edma_device_pause(struct dma_chan *dchan)
246 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
249 if (!chan->configured)
251 else if (chan->status != EDMA_ST_BUSY)
253 else if (chan->request != EDMA_REQ_NONE)
256 chan->request = EDMA_REQ_PAUSE;
261 static int dw_edma_device_resume(struct dma_chan *dchan)
263 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
266 if (!chan->configured) {
268 } else if (chan->status != EDMA_ST_PAUSE) {
270 } else if (chan->request != EDMA_REQ_NONE) {
273 chan->status = EDMA_ST_BUSY;
274 dw_edma_start_transfer(chan);
280 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
282 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
285 if (!chan->configured) {
287 } else if (chan->status == EDMA_ST_PAUSE) {
288 chan->status = EDMA_ST_IDLE;
289 chan->configured = false;
290 } else if (chan->status == EDMA_ST_IDLE) {
291 chan->configured = false;
292 } else if (dw_edma_core_ch_status(chan) == DMA_COMPLETE) {
294 * The channel is in a false BUSY state, probably didn't
295 * receive or lost an interrupt
297 chan->status = EDMA_ST_IDLE;
298 chan->configured = false;
299 } else if (chan->request > EDMA_REQ_PAUSE) {
302 chan->request = EDMA_REQ_STOP;
308 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
310 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
313 if (!chan->configured)
316 spin_lock_irqsave(&chan->vc.lock, flags);
317 if (vchan_issue_pending(&chan->vc) && chan->request == EDMA_REQ_NONE &&
318 chan->status == EDMA_ST_IDLE) {
319 chan->status = EDMA_ST_BUSY;
320 dw_edma_start_transfer(chan);
322 spin_unlock_irqrestore(&chan->vc.lock, flags);
325 static enum dma_status
326 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
327 struct dma_tx_state *txstate)
329 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
330 struct dw_edma_desc *desc;
331 struct virt_dma_desc *vd;
336 ret = dma_cookie_status(dchan, cookie, txstate);
337 if (ret == DMA_COMPLETE)
340 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
346 spin_lock_irqsave(&chan->vc.lock, flags);
347 vd = vchan_find_desc(&chan->vc, cookie);
349 desc = vd2dw_edma_desc(vd);
351 residue = desc->alloc_sz - desc->xfer_sz;
353 spin_unlock_irqrestore(&chan->vc.lock, flags);
356 dma_set_residue(txstate, residue);
361 static struct dma_async_tx_descriptor *
362 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
364 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
365 enum dma_transfer_direction dir = xfer->direction;
366 struct scatterlist *sg = NULL;
367 struct dw_edma_chunk *chunk;
368 struct dw_edma_burst *burst;
369 struct dw_edma_desc *desc;
370 u64 src_addr, dst_addr;
375 if (!chan->configured)
379 * Local Root Port/End-point Remote End-point
380 * +-----------------------+ PCIe bus +----------------------+
382 * | DEV_TO_MEM Rx Ch <----+ +---+ Tx Ch DEV_TO_MEM |
384 * | MEM_TO_DEV Tx Ch +----+ +---> Rx Ch MEM_TO_DEV |
386 * +-----------------------+ +----------------------+
389 * If eDMA is embedded into the DW PCIe RP/EP and controlled from the
390 * CPU/Application side, the Rx channel (EDMA_DIR_READ) will be used
391 * for the device read operations (DEV_TO_MEM) and the Tx channel
392 * (EDMA_DIR_WRITE) - for the write operations (MEM_TO_DEV).
395 * If eDMA is embedded into a Remote PCIe EP and is controlled by the
396 * MWr/MRd TLPs sent from the CPU's PCIe host controller, the Tx
397 * channel (EDMA_DIR_WRITE) will be used for the device read operations
398 * (DEV_TO_MEM) and the Rx channel (EDMA_DIR_READ) - for the write
399 * operations (MEM_TO_DEV).
401 * It is the client driver responsibility to choose a proper channel
402 * for the DMA transfers.
404 if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
405 if ((chan->dir == EDMA_DIR_READ && dir != DMA_DEV_TO_MEM) ||
406 (chan->dir == EDMA_DIR_WRITE && dir != DMA_MEM_TO_DEV))
409 if ((chan->dir == EDMA_DIR_WRITE && dir != DMA_DEV_TO_MEM) ||
410 (chan->dir == EDMA_DIR_READ && dir != DMA_MEM_TO_DEV))
414 if (xfer->type == EDMA_XFER_CYCLIC) {
415 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
417 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
418 if (xfer->xfer.sg.len < 1)
420 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
421 if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
423 if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
429 desc = dw_edma_alloc_desc(chan);
433 chunk = dw_edma_alloc_chunk(desc);
434 if (unlikely(!chunk))
437 if (xfer->type == EDMA_XFER_INTERLEAVED) {
438 src_addr = xfer->xfer.il->src_start;
439 dst_addr = xfer->xfer.il->dst_start;
441 src_addr = chan->config.src_addr;
442 dst_addr = chan->config.dst_addr;
445 if (dir == DMA_DEV_TO_MEM)
446 src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
448 dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
450 if (xfer->type == EDMA_XFER_CYCLIC) {
451 cnt = xfer->xfer.cyclic.cnt;
452 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
453 cnt = xfer->xfer.sg.len;
454 sg = xfer->xfer.sg.sgl;
455 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
456 cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
457 fsz = xfer->xfer.il->frame_size;
460 for (i = 0; i < cnt; i++) {
461 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
464 if (chunk->bursts_alloc == chan->ll_max) {
465 chunk = dw_edma_alloc_chunk(desc);
466 if (unlikely(!chunk))
470 burst = dw_edma_alloc_burst(chunk);
471 if (unlikely(!burst))
474 if (xfer->type == EDMA_XFER_CYCLIC)
475 burst->sz = xfer->xfer.cyclic.len;
476 else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
477 burst->sz = sg_dma_len(sg);
478 else if (xfer->type == EDMA_XFER_INTERLEAVED)
479 burst->sz = xfer->xfer.il->sgl[i % fsz].size;
481 chunk->ll_region.sz += burst->sz;
482 desc->alloc_sz += burst->sz;
484 if (dir == DMA_DEV_TO_MEM) {
485 burst->sar = src_addr;
486 if (xfer->type == EDMA_XFER_CYCLIC) {
487 burst->dar = xfer->xfer.cyclic.paddr;
488 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
489 src_addr += sg_dma_len(sg);
490 burst->dar = sg_dma_address(sg);
491 /* Unlike the typical assumption by other
492 * drivers/IPs the peripheral memory isn't
493 * a FIFO memory, in this case, it's a
494 * linear memory and that why the source
495 * and destination addresses are increased
496 * by the same portion (data length)
498 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
499 burst->dar = dst_addr;
502 burst->dar = dst_addr;
503 if (xfer->type == EDMA_XFER_CYCLIC) {
504 burst->sar = xfer->xfer.cyclic.paddr;
505 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
506 dst_addr += sg_dma_len(sg);
507 burst->sar = sg_dma_address(sg);
508 /* Unlike the typical assumption by other
509 * drivers/IPs the peripheral memory isn't
510 * a FIFO memory, in this case, it's a
511 * linear memory and that why the source
512 * and destination addresses are increased
513 * by the same portion (data length)
515 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
516 burst->sar = src_addr;
520 if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
522 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
523 struct dma_interleaved_template *il = xfer->xfer.il;
524 struct data_chunk *dc = &il->sgl[i % fsz];
526 src_addr += burst->sz;
528 src_addr += dmaengine_get_src_icg(il, dc);
530 dst_addr += burst->sz;
532 dst_addr += dmaengine_get_dst_icg(il, dc);
536 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
540 dw_edma_free_desc(desc);
545 static struct dma_async_tx_descriptor *
546 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
548 enum dma_transfer_direction direction,
549 unsigned long flags, void *context)
551 struct dw_edma_transfer xfer;
554 xfer.direction = direction;
555 xfer.xfer.sg.sgl = sgl;
556 xfer.xfer.sg.len = len;
558 xfer.type = EDMA_XFER_SCATTER_GATHER;
560 return dw_edma_device_transfer(&xfer);
563 static struct dma_async_tx_descriptor *
564 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
565 size_t len, size_t count,
566 enum dma_transfer_direction direction,
569 struct dw_edma_transfer xfer;
572 xfer.direction = direction;
573 xfer.xfer.cyclic.paddr = paddr;
574 xfer.xfer.cyclic.len = len;
575 xfer.xfer.cyclic.cnt = count;
577 xfer.type = EDMA_XFER_CYCLIC;
579 return dw_edma_device_transfer(&xfer);
582 static struct dma_async_tx_descriptor *
583 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
584 struct dma_interleaved_template *ilt,
587 struct dw_edma_transfer xfer;
590 xfer.direction = ilt->dir;
593 xfer.type = EDMA_XFER_INTERLEAVED;
595 return dw_edma_device_transfer(&xfer);
598 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
600 struct dw_edma_desc *desc;
601 struct virt_dma_desc *vd;
604 spin_lock_irqsave(&chan->vc.lock, flags);
605 vd = vchan_next_desc(&chan->vc);
607 switch (chan->request) {
609 desc = vd2dw_edma_desc(vd);
610 if (!desc->chunks_alloc) {
612 vchan_cookie_complete(vd);
615 /* Continue transferring if there are remaining chunks or issued requests.
617 chan->status = dw_edma_start_transfer(chan) ? EDMA_ST_BUSY : EDMA_ST_IDLE;
622 vchan_cookie_complete(vd);
623 chan->request = EDMA_REQ_NONE;
624 chan->status = EDMA_ST_IDLE;
628 chan->request = EDMA_REQ_NONE;
629 chan->status = EDMA_ST_PAUSE;
636 spin_unlock_irqrestore(&chan->vc.lock, flags);
639 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
641 struct virt_dma_desc *vd;
644 spin_lock_irqsave(&chan->vc.lock, flags);
645 vd = vchan_next_desc(&chan->vc);
648 vchan_cookie_complete(vd);
650 spin_unlock_irqrestore(&chan->vc.lock, flags);
651 chan->request = EDMA_REQ_NONE;
652 chan->status = EDMA_ST_IDLE;
655 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
657 struct dw_edma_irq *dw_irq = data;
659 return dw_edma_core_handle_int(dw_irq, EDMA_DIR_WRITE,
660 dw_edma_done_interrupt,
661 dw_edma_abort_interrupt);
664 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
666 struct dw_edma_irq *dw_irq = data;
668 return dw_edma_core_handle_int(dw_irq, EDMA_DIR_READ,
669 dw_edma_done_interrupt,
670 dw_edma_abort_interrupt);
673 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
675 irqreturn_t ret = IRQ_NONE;
677 ret |= dw_edma_interrupt_write(irq, data);
678 ret |= dw_edma_interrupt_read(irq, data);
683 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
685 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
687 if (chan->status != EDMA_ST_IDLE)
693 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
695 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
698 while (time_before(jiffies, timeout)) {
699 ret = dw_edma_device_terminate_all(dchan);
703 if (time_after_eq(jiffies, timeout))
710 static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
712 struct dw_edma_chip *chip = dw->chip;
713 struct device *dev = chip->dev;
714 struct dw_edma_chan *chan;
715 struct dw_edma_irq *irq;
716 struct dma_device *dma;
720 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
723 INIT_LIST_HEAD(&dma->channels);
725 for (i = 0; i < ch_cnt; i++) {
730 if (i < dw->wr_ch_cnt) {
732 chan->dir = EDMA_DIR_WRITE;
734 chan->id = i - dw->wr_ch_cnt;
735 chan->dir = EDMA_DIR_READ;
738 chan->configured = false;
739 chan->request = EDMA_REQ_NONE;
740 chan->status = EDMA_ST_IDLE;
742 if (chan->dir == EDMA_DIR_WRITE)
743 chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
745 chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
748 dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
749 chan->dir == EDMA_DIR_WRITE ? "write" : "read",
750 chan->id, chan->ll_max);
752 if (dw->nr_irqs == 1)
754 else if (chan->dir == EDMA_DIR_WRITE)
755 pos = chan->id % wr_alloc;
757 pos = wr_alloc + chan->id % rd_alloc;
761 if (chan->dir == EDMA_DIR_WRITE)
762 irq->wr_mask |= BIT(chan->id);
764 irq->rd_mask |= BIT(chan->id);
767 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
769 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
770 chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
771 chan->msi.address_hi, chan->msi.address_lo,
774 chan->vc.desc_free = vchan_free_desc;
775 chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
776 &dw->chip->dt_region_wr[chan->id] :
777 &dw->chip->dt_region_rd[chan->id];
779 vchan_init(&chan->vc, dma);
781 dw_edma_core_ch_config(chan);
784 /* Set DMA channel capabilities */
785 dma_cap_zero(dma->cap_mask);
786 dma_cap_set(DMA_SLAVE, dma->cap_mask);
787 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
788 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
789 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
790 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
791 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
792 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
793 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
795 /* Set DMA channel callbacks */
796 dma->dev = chip->dev;
797 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
798 dma->device_free_chan_resources = dw_edma_free_chan_resources;
799 dma->device_caps = dw_edma_device_caps;
800 dma->device_config = dw_edma_device_config;
801 dma->device_pause = dw_edma_device_pause;
802 dma->device_resume = dw_edma_device_resume;
803 dma->device_terminate_all = dw_edma_device_terminate_all;
804 dma->device_issue_pending = dw_edma_device_issue_pending;
805 dma->device_tx_status = dw_edma_device_tx_status;
806 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
807 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
808 dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
810 dma_set_max_seg_size(dma->dev, U32_MAX);
812 /* Register DMA device */
813 return dma_async_device_register(dma);
816 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
818 if (*nr_irqs && *alloc < cnt) {
824 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
826 while (*mask * alloc < cnt)
830 static int dw_edma_irq_request(struct dw_edma *dw,
831 u32 *wr_alloc, u32 *rd_alloc)
833 struct dw_edma_chip *chip = dw->chip;
834 struct device *dev = dw->chip->dev;
841 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
843 if (chip->nr_irqs < 1 || !chip->ops->irq_vector)
846 dw->irq = devm_kcalloc(dev, chip->nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
850 if (chip->nr_irqs == 1) {
851 /* Common IRQ shared among all channels */
852 irq = chip->ops->irq_vector(dev, 0);
853 err = request_irq(irq, dw_edma_interrupt_common,
854 IRQF_SHARED, dw->name, &dw->irq[0]);
860 if (irq_get_msi_desc(irq))
861 get_cached_msi_msg(irq, &dw->irq[0].msi);
865 /* Distribute IRQs equally among all channels */
866 int tmp = chip->nr_irqs;
868 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
869 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
870 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
873 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
874 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
876 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
877 irq = chip->ops->irq_vector(dev, i);
878 err = request_irq(irq,
880 dw_edma_interrupt_write :
881 dw_edma_interrupt_read,
882 IRQF_SHARED, dw->name,
887 if (irq_get_msi_desc(irq))
888 get_cached_msi_msg(irq, &dw->irq[i].msi);
897 for (i--; i >= 0; i--) {
898 irq = chip->ops->irq_vector(dev, i);
899 free_irq(irq, &dw->irq[i]);
905 int dw_edma_probe(struct dw_edma_chip *chip)
917 if (!dev || !chip->ops)
920 dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
926 if (dw->chip->mf == EDMA_MF_HDMA_NATIVE)
927 dw_hdma_v0_core_register(dw);
929 dw_edma_v0_core_register(dw);
931 raw_spin_lock_init(&dw->lock);
933 dw->wr_ch_cnt = min_t(u16, chip->ll_wr_cnt,
934 dw_edma_core_ch_count(dw, EDMA_DIR_WRITE));
935 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
937 dw->rd_ch_cnt = min_t(u16, chip->ll_rd_cnt,
938 dw_edma_core_ch_count(dw, EDMA_DIR_READ));
939 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
941 if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
944 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
945 dw->wr_ch_cnt, dw->rd_ch_cnt);
947 /* Allocate channels */
948 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
949 sizeof(*dw->chan), GFP_KERNEL);
953 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
954 dev_name(chip->dev));
956 /* Disable eDMA, only to establish the ideal initial conditions */
957 dw_edma_core_off(dw);
960 err = dw_edma_irq_request(dw, &wr_alloc, &rd_alloc);
964 /* Setup write/read channels */
965 err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
969 /* Turn debugfs on */
970 dw_edma_core_debugfs_on(dw);
977 for (i = (dw->nr_irqs - 1); i >= 0; i--)
978 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
982 EXPORT_SYMBOL_GPL(dw_edma_probe);
984 int dw_edma_remove(struct dw_edma_chip *chip)
986 struct dw_edma_chan *chan, *_chan;
987 struct device *dev = chip->dev;
988 struct dw_edma *dw = chip->dw;
991 /* Skip removal if no private data found */
996 dw_edma_core_off(dw);
999 for (i = (dw->nr_irqs - 1); i >= 0; i--)
1000 free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
1002 /* Deregister eDMA device */
1003 dma_async_device_unregister(&dw->dma);
1004 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1005 vc.chan.device_node) {
1006 tasklet_kill(&chan->vc.task);
1007 list_del(&chan->vc.chan.device_node);
1012 EXPORT_SYMBOL_GPL(dw_edma_remove);
1014 MODULE_LICENSE("GPL v2");
1015 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1016 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");