1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016 Broadcom
7 * Broadcom PDC Mailbox Driver
8 * The PDC provides a ring based programming interface to one or more hardware
9 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
10 * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
11 * and in others the FA2/FA+ hardware is used with this PDC driver.
13 * The PDC driver registers with the Linux mailbox framework as a mailbox
14 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
15 * a mailbox channel. The PDC driver uses interrupts to determine when data
16 * transfers to and from an offload engine are complete. The PDC driver uses
17 * threaded IRQs so that response messages are handled outside of interrupt
20 * The PDC driver allows multiple messages to be pending in the descriptor
21 * rings. The tx_msg_start descriptor index indicates where the last message
22 * starts. The txin_numd value at this index indicates how many descriptor
23 * indexes make up the message. Similar state is kept on the receive side. When
24 * an rx interrupt indicates a response is ready, the PDC driver processes numd
25 * descriptors from the tx and rx ring, thus processing one response at a time.
28 #include <linux/errno.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <linux/debugfs.h>
33 #include <linux/interrupt.h>
34 #include <linux/wait.h>
35 #include <linux/platform_device.h>
36 #include <linux/property.h>
39 #include <linux/of_irq.h>
40 #include <linux/mailbox_controller.h>
41 #include <linux/mailbox/brcm-message.h>
42 #include <linux/scatterlist.h>
43 #include <linux/dma-direction.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/dmapool.h>
49 #define RING_ENTRY_SIZE sizeof(struct dma64dd)
51 /* # entries in PDC dma ring */
52 #define PDC_RING_ENTRIES 512
54 * Minimum number of ring descriptor entries that must be free to tell mailbox
55 * framework that it can submit another request
57 #define PDC_RING_SPACE_MIN 15
59 #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE)
60 /* Rings are 8k aligned */
61 #define RING_ALIGN_ORDER 13
62 #define RING_ALIGN BIT(RING_ALIGN_ORDER)
64 #define RX_BUF_ALIGN_ORDER 5
65 #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER)
67 /* descriptor bumping macros */
68 #define XXD(x, max_mask) ((x) & (max_mask))
69 #define TXD(x, max_mask) XXD((x), (max_mask))
70 #define RXD(x, max_mask) XXD((x), (max_mask))
71 #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask))
72 #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask))
73 #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask))
74 #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask))
75 #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask))
76 #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask))
78 /* Length of BCM header at start of SPU msg, in bytes */
82 * PDC driver reserves ringset 0 on each SPU for its own use. The driver does
83 * not currently support use of multiple ringsets on a single PDC engine.
88 * Interrupt mask and status definitions. Enable interrupts for tx and rx on
91 #define PDC_RCVINT_0 (16 + PDC_RINGSET)
92 #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0)
93 #define PDC_INTMASK (PDC_RCVINTEN_0)
94 #define PDC_LAZY_FRAMECOUNT 1
95 #define PDC_LAZY_TIMEOUT 10000
96 #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24))
97 #define PDC_INTMASK_OFFSET 0x24
98 #define PDC_INTSTATUS_OFFSET 0x20
99 #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
100 #define FA_RCVLAZY0_OFFSET 0x100
103 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
106 #define PDC_SPU2_RESP_HDR_LEN 17
107 #define PDC_CKSUM_CTRL BIT(27)
108 #define PDC_CKSUM_CTRL_OFFSET 0x400
110 #define PDC_SPUM_RESP_HDR_LEN 32
113 * Sets the following bits for write to transmit control reg:
114 * 11 - PtyChkDisable - parity check is disabled
115 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
117 #define PDC_TX_CTL 0x000C0800
119 /* Bit in tx control reg to enable tx channel */
120 #define PDC_TX_ENABLE 0x1
123 * Sets the following bits for write to receive control reg:
124 * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf
125 * 9 - SepRxHdrDescEn - place start of new frames only in descriptors
126 * that have StartOfFrame set
127 * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all
128 * remaining bytes in current frame, report error
129 * in rx frame status for current frame
130 * 11 - PtyChkDisable - parity check is disabled
131 * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory
133 #define PDC_RX_CTL 0x000C0E00
135 /* Bit in rx control reg to enable rx channel */
136 #define PDC_RX_ENABLE 0x1
138 #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1)
140 /* descriptor flags */
141 #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */
142 #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */
143 #define D64_CTRL1_EOF BIT(30) /* end of frame */
144 #define D64_CTRL1_SOF BIT(31) /* start of frame */
146 #define RX_STATUS_OVERFLOW 0x00800000
147 #define RX_STATUS_LEN 0x0000FFFF
149 #define PDC_TXREGS_OFFSET 0x200
150 #define PDC_RXREGS_OFFSET 0x220
152 /* Maximum size buffer the DMA engine can handle */
153 #define PDC_DMA_BUF_MAX 16384
156 FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
157 PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
161 void *ctx; /* opaque context associated with frame */
166 u32 ctrl1; /* misc control bits */
167 u32 ctrl2; /* buffer count and address extension */
168 u32 addrlow; /* memory address of the date buffer, bits 31:0 */
169 u32 addrhigh; /* memory address of the date buffer, bits 63:32 */
172 /* dma registers per channel(xmt or rcv) */
174 u32 control; /* enable, et al */
175 u32 ptr; /* last descriptor posted to chip */
176 u32 addrlow; /* descriptor ring base address low 32-bits */
177 u32 addrhigh; /* descriptor ring base address bits 63:32 */
178 u32 status0; /* last rx descriptor written by hw */
179 u32 status1; /* driver does not use */
182 /* cpp contortions to concatenate w/arg prescan */
184 #define _PADLINE(line) pad ## line
185 #define _XSTR(line) _PADLINE(line)
186 #define PAD _XSTR(__LINE__)
189 /* dma registers. matches hw layout. */
191 struct dma64_regs dmaxmt; /* dma tx */
193 struct dma64_regs dmarcv; /* dma rx */
199 u32 devcontrol; /* 0x000 */
200 u32 devstatus; /* 0x004 */
202 u32 biststatus; /* 0x00c */
204 u32 intstatus; /* 0x020 */
205 u32 intmask; /* 0x024 */
206 u32 gptimer; /* 0x028 */
209 u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
210 u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
211 u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
212 u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
215 u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
216 u32 flowctlthresh; /* 0x104 */
217 u32 wrrthresh; /* 0x108 */
218 u32 gmac_idle_cnt_thresh; /* 0x10c */
221 u32 ifioaccessaddr; /* 0x120 */
222 u32 ifioaccessbyte; /* 0x124 */
223 u32 ifioaccessdata; /* 0x128 */
226 u32 phyaccess; /* 0x180 */
228 u32 phycontrol; /* 0x188 */
229 u32 txqctl; /* 0x18c */
230 u32 rxqctl; /* 0x190 */
231 u32 gpioselect; /* 0x194 */
232 u32 gpio_output_en; /* 0x198 */
234 u32 txq_rxq_mem_ctl; /* 0x1a0 */
235 u32 memory_ecc_status; /* 0x1a4 */
236 u32 serdes_ctl; /* 0x1a8 */
237 u32 serdes_status0; /* 0x1ac */
238 u32 serdes_status1; /* 0x1b0 */
239 u32 PAD[11]; /* 0x1b4-1dc */
240 u32 clk_ctl_st; /* 0x1e0 */
241 u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
242 u32 pwrctl; /* 0x1e8 */
245 #define PDC_NUM_DMA_RINGS 4
246 struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */
248 /* more registers follow, but we don't use them */
251 /* structure for allocating/freeing DMA rings */
252 struct pdc_ring_alloc {
253 dma_addr_t dmabase; /* DMA address of start of ring */
254 void *vbase; /* base kernel virtual address of ring */
255 u32 size; /* ring allocation size in bytes */
259 * context associated with a receive descriptor.
260 * @rxp_ctx: opaque context associated with frame that starts at each
262 * @dst_sg: Scatterlist used to form reply frames beginning at a given ring
263 * index. Retained in order to unmap each sg after reply is processed.
264 * @rxin_numd: Number of rx descriptors associated with the message that starts
265 * at a descriptor index. Not set for every index. For example,
266 * if descriptor index i points to a scatterlist with 4 entries,
267 * then the next three descriptor indexes don't have a value set.
268 * @resp_hdr: Virtual address of buffer used to catch DMA rx status
269 * @resp_hdr_daddr: physical address of DMA rx status buffer
273 struct scatterlist *dst_sg;
276 dma_addr_t resp_hdr_daddr;
279 /* PDC state structure */
281 /* Index of the PDC whose state is in this structure instance */
284 /* Platform device for this PDC instance */
285 struct platform_device *pdev;
288 * Each PDC instance has a mailbox controller. PDC receives request
289 * messages through mailboxes, and sends response messages through the
292 struct mbox_controller mbc;
294 unsigned int pdc_irq;
296 /* tasklet for deferred processing after DMA rx interrupt */
297 struct tasklet_struct rx_tasklet;
299 /* Number of bytes of receive status prior to each rx frame */
301 /* Whether a BCM header is prepended to each frame */
303 /* Sum of length of BCM header and rx status header */
304 u32 pdc_resp_hdr_len;
306 /* The base virtual address of DMA hw registers */
307 void __iomem *pdc_reg_vbase;
309 /* Pool for allocation of DMA rings */
310 struct dma_pool *ring_pool;
312 /* Pool for allocation of metadata buffers for response messages */
313 struct dma_pool *rx_buf_pool;
316 * The base virtual address of DMA tx/rx descriptor rings. Corresponding
317 * DMA address and size of ring allocation.
319 struct pdc_ring_alloc tx_ring_alloc;
320 struct pdc_ring_alloc rx_ring_alloc;
322 struct pdc_regs *regs; /* start of PDC registers */
324 struct dma64_regs *txregs_64; /* dma tx engine registers */
325 struct dma64_regs *rxregs_64; /* dma rx engine registers */
328 * Arrays of PDC_RING_ENTRIES descriptors
329 * To use multiple ringsets, this needs to be extended
331 struct dma64dd *txd_64; /* tx descriptor ring */
332 struct dma64dd *rxd_64; /* rx descriptor ring */
334 /* descriptor ring sizes */
335 u32 ntxd; /* # tx descriptors */
336 u32 nrxd; /* # rx descriptors */
337 u32 nrxpost; /* # rx buffers to keep posted */
338 u32 ntxpost; /* max number of tx buffers that can be posted */
341 * Index of next tx descriptor to reclaim. That is, the descriptor
342 * index of the oldest tx buffer for which the host has yet to process
343 * the corresponding response.
348 * Index of the first receive descriptor for the sequence of
349 * message fragments currently under construction. Used to build up
350 * the rxin_numd count for a message. Updated to rxout when the host
351 * starts a new sequence of rx buffers for a new message.
355 /* Index of next tx descriptor to post. */
359 * Number of tx descriptors associated with the message that starts
360 * at this tx descriptor index.
362 u32 txin_numd[PDC_RING_ENTRIES];
365 * Index of next rx descriptor to reclaim. This is the index of
366 * the next descriptor whose data has yet to be processed by the host.
371 * Index of the first receive descriptor for the sequence of
372 * message fragments currently under construction. Used to build up
373 * the rxin_numd count for a message. Updated to rxout when the host
374 * starts a new sequence of rx buffers for a new message.
379 * Saved value of current hardware rx descriptor index.
380 * The last rx buffer written by the hw is the index previous to
385 /* Index of next rx descriptor to post. */
388 struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES];
391 * Scatterlists used to form request and reply frames beginning at a
392 * given ring index. Retained in order to unmap each sg after reply
395 struct scatterlist *src_sg[PDC_RING_ENTRIES];
398 u32 pdc_requests; /* number of request messages submitted */
399 u32 pdc_replies; /* number of reply messages received */
400 u32 last_tx_not_done; /* too few tx descriptors to indicate done */
401 u32 tx_ring_full; /* unable to accept msg because tx ring full */
402 u32 rx_ring_full; /* unable to accept msg because rx ring full */
403 u32 txnobuf; /* unable to create tx descriptor */
404 u32 rxnobuf; /* unable to create rx descriptor */
405 u32 rx_oflow; /* count of rx overflows */
407 /* hardware type - FA2 or PDC/MDE */
411 /* Global variables */
414 /* Actual number of SPUs in hardware, as reported by device tree */
418 static struct pdc_globals pdcg;
420 /* top level debug FS directory for PDC driver */
421 static struct dentry *debugfs_dir;
423 static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf,
424 size_t count, loff_t *offp)
426 struct pdc_state *pdcs;
428 ssize_t ret, out_offset, out_count;
432 buf = kmalloc(out_count, GFP_KERNEL);
436 pdcs = filp->private_data;
438 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
439 "SPU %u stats:\n", pdcs->pdc_idx);
440 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
441 "PDC requests....................%u\n",
443 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
444 "PDC responses...................%u\n",
446 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
447 "Tx not done.....................%u\n",
448 pdcs->last_tx_not_done);
449 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
450 "Tx ring full....................%u\n",
452 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
453 "Rx ring full....................%u\n",
455 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
456 "Tx desc write fail. Ring full...%u\n",
458 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
459 "Rx desc write fail. Ring full...%u\n",
461 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
462 "Receive overflow................%u\n",
464 out_offset += scnprintf(buf + out_offset, out_count - out_offset,
465 "Num frags in rx ring............%u\n",
466 NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr,
469 if (out_offset > out_count)
470 out_offset = out_count;
472 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
477 static const struct file_operations pdc_debugfs_stats = {
478 .owner = THIS_MODULE,
480 .read = pdc_debugfs_read,
484 * pdc_setup_debugfs() - Create the debug FS directories. If the top-level
485 * directory has not yet been created, create it now. Create a stats file in
486 * this directory for a SPU.
487 * @pdcs: PDC state structure
489 static void pdc_setup_debugfs(struct pdc_state *pdcs)
491 char spu_stats_name[16];
493 if (!debugfs_initialized())
496 snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx);
498 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
500 /* S_IRUSR == 0400 */
501 debugfs_create_file(spu_stats_name, 0400, debugfs_dir, pdcs,
505 static void pdc_free_debugfs(void)
507 debugfs_remove_recursive(debugfs_dir);
512 * pdc_build_rxd() - Build DMA descriptor to receive SPU result.
513 * @pdcs: PDC state for SPU that will generate result
514 * @dma_addr: DMA address of buffer that descriptor is being built for
515 * @buf_len: Length of the receive buffer, in bytes
516 * @flags: Flags to be stored in descriptor
519 pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr,
520 u32 buf_len, u32 flags)
522 struct device *dev = &pdcs->pdev->dev;
523 struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout];
526 "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n",
527 pdcs->pdc_idx, pdcs->rxout, buf_len, flags);
529 rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
530 rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
531 rxd->ctrl1 = cpu_to_le32(flags);
532 rxd->ctrl2 = cpu_to_le32(buf_len);
534 /* bump ring index and return */
535 pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost);
539 * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to
541 * @pdcs: PDC state for the SPU that will process this request
542 * @dma_addr: DMA address of packet to be transmitted
543 * @buf_len: Length of tx buffer, in bytes
544 * @flags: Flags to be stored in descriptor
547 pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len,
550 struct device *dev = &pdcs->pdev->dev;
551 struct dma64dd *txd = &pdcs->txd_64[pdcs->txout];
554 "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n",
555 pdcs->pdc_idx, pdcs->txout, buf_len, flags);
557 txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr));
558 txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr));
559 txd->ctrl1 = cpu_to_le32(flags);
560 txd->ctrl2 = cpu_to_le32(buf_len);
562 /* bump ring index and return */
563 pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost);
567 * pdc_receive_one() - Receive a response message from a given SPU.
568 * @pdcs: PDC state for the SPU to receive from
570 * When the return code indicates success, the response message is available in
571 * the receive buffers provided prior to submission of the request.
573 * Return: PDC_SUCCESS if one or more receive descriptors was processed
574 * -EAGAIN indicates that no response message is available
575 * -EIO an error occurred
578 pdc_receive_one(struct pdc_state *pdcs)
580 struct device *dev = &pdcs->pdev->dev;
581 struct mbox_controller *mbc;
582 struct mbox_chan *chan;
583 struct brcm_message mssg;
586 u8 *resp_hdr; /* virtual addr of start of resp message DMA header */
587 u32 frags_rdy; /* number of fragments ready to read */
588 u32 rx_idx; /* ring index of start of receive frame */
589 dma_addr_t resp_hdr_daddr;
590 struct pdc_rx_ctx *rx_ctx;
593 chan = &mbc->chans[0];
594 mssg.type = BRCM_MESSAGE_SPU;
597 * return if a complete response message is not yet ready.
598 * rxin_numd[rxin] is the number of fragments in the next msg
601 frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost);
602 if ((frags_rdy == 0) ||
603 (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd))
604 /* No response ready */
607 num_frags = pdcs->txin_numd[pdcs->txin];
608 WARN_ON(num_frags == 0);
610 dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin],
611 sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE);
613 pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost;
615 dev_dbg(dev, "PDC %u reclaimed %d tx descriptors",
616 pdcs->pdc_idx, num_frags);
619 rx_ctx = &pdcs->rx_ctx[rx_idx];
620 num_frags = rx_ctx->rxin_numd;
621 /* Return opaque context with result */
622 mssg.ctx = rx_ctx->rxp_ctx;
623 rx_ctx->rxp_ctx = NULL;
624 resp_hdr = rx_ctx->resp_hdr;
625 resp_hdr_daddr = rx_ctx->resp_hdr_daddr;
626 dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg),
629 pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost;
631 dev_dbg(dev, "PDC %u reclaimed %d rx descriptors",
632 pdcs->pdc_idx, num_frags);
635 "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n",
636 pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin,
637 pdcs->rxout, pdcs->last_rx_curr);
639 if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) {
641 * For SPU-M, get length of response msg and rx overflow status.
643 rx_status = *((u32 *)resp_hdr);
644 len = rx_status & RX_STATUS_LEN;
646 "SPU response length %u bytes", len);
647 if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) {
648 if (rx_status & RX_STATUS_OVERFLOW) {
649 dev_err_ratelimited(dev,
650 "crypto receive overflow");
653 dev_info_ratelimited(dev, "crypto rx len = 0");
659 dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr);
661 mbox_chan_received_data(chan, &mssg);
668 * pdc_receive() - Process as many responses as are available in the rx ring.
671 * Called within the hard IRQ.
675 pdc_receive(struct pdc_state *pdcs)
679 /* read last_rx_curr from register once */
681 (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) &
682 CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE;
685 /* Could be many frames ready */
686 rx_status = pdc_receive_one(pdcs);
687 } while (rx_status == PDC_SUCCESS);
693 * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
694 * descriptors for a given SPU. The scatterlist buffers contain the data for a
695 * SPU request message.
696 * @pdcs: PDC state for the SPU that will process this request
697 * @sg: Scatterlist whose buffers contain part of the SPU request
699 * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
700 * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length.
702 * Return: PDC_SUCCESS if successful
705 static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
712 * Num descriptors needed. Conservatively assume we need a descriptor
713 * for every entry in sg.
716 u32 desc_w = 0; /* Number of tx descriptors written */
717 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
718 dma_addr_t databufptr; /* DMA address to put in descriptor */
720 num_desc = (u32)sg_nents(sg);
722 /* check whether enough tx descriptors are available */
723 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
725 if (unlikely(num_desc > tx_avail)) {
730 /* build tx descriptors */
731 if (pdcs->tx_msg_start == pdcs->txout) {
733 pdcs->txin_numd[pdcs->tx_msg_start] = 0;
734 pdcs->src_sg[pdcs->txout] = sg;
735 flags = D64_CTRL1_SOF;
739 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
745 * If sg buffer larger than PDC limit, split across
746 * multiple descriptors
748 bufcnt = sg_dma_len(sg);
749 databufptr = sg_dma_address(sg);
750 while (bufcnt > PDC_DMA_BUF_MAX) {
751 pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX,
754 bufcnt -= PDC_DMA_BUF_MAX;
755 databufptr += PDC_DMA_BUF_MAX;
756 if (unlikely(pdcs->txout == (pdcs->ntxd - 1)))
763 /* Writing last descriptor for frame */
764 flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC);
765 pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot);
767 /* Clear start of frame after first descriptor */
768 flags &= ~D64_CTRL1_SOF;
770 pdcs->txin_numd[pdcs->tx_msg_start] += desc_w;
776 * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx
778 * @pdcs: PDC state for SPU to process the request
780 * Sets the index of the last descriptor written in both the rx and tx ring.
782 * Return: PDC_SUCCESS
784 static int pdc_tx_list_final(struct pdc_state *pdcs)
787 * write barrier to ensure all register writes are complete
788 * before chip starts to process new request
791 iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr);
792 iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr);
793 pdcs->pdc_requests++;
799 * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC.
800 * @pdcs: PDC state for SPU handling request
801 * @dst_sg: scatterlist providing rx buffers for response to be returned to
803 * @ctx: Opaque context for this request
805 * Posts a single receive descriptor to hold the metadata that precedes a
806 * response. For example, with SPU-M, the metadata is a 32-byte DMA header and
807 * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and
808 * rx to indicate the start of a new message.
810 * Return: PDC_SUCCESS if successful
811 * < 0 if an error (e.g., rx ring is full)
813 static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
818 u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */
821 struct pdc_rx_ctx *rx_ctx;
823 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
825 if (unlikely(rx_pkt_cnt > rx_avail)) {
830 /* allocate a buffer for the dma rx status */
831 vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr);
832 if (unlikely(!vaddr))
836 * Update msg_start indexes for both tx and rx to indicate the start
837 * of a new sequence of descriptor indexes that contain the fragments
838 * of the same message.
840 pdcs->rx_msg_start = pdcs->rxout;
841 pdcs->tx_msg_start = pdcs->txout;
843 /* This is always the first descriptor in the receive sequence */
844 flags = D64_CTRL1_SOF;
845 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1;
847 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
848 flags |= D64_CTRL1_EOT;
850 rx_ctx = &pdcs->rx_ctx[pdcs->rxout];
851 rx_ctx->rxp_ctx = ctx;
852 rx_ctx->dst_sg = dst_sg;
853 rx_ctx->resp_hdr = vaddr;
854 rx_ctx->resp_hdr_daddr = daddr;
855 pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags);
860 * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
861 * descriptors for a given SPU. The caller must have already DMA mapped the
863 * @pdcs: PDC state for the SPU that will process this request
864 * @sg: Scatterlist whose buffers are added to the receive ring
866 * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
867 * multiple receive descriptors are written, each with a buffer <=
870 * Return: PDC_SUCCESS if successful
871 * < 0 otherwise (e.g., receive ring is full)
873 static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg)
879 * Num descriptors needed. Conservatively assume we need a descriptor
880 * for every entry from our starting point in the scatterlist.
883 u32 desc_w = 0; /* Number of tx descriptors written */
884 u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */
885 dma_addr_t databufptr; /* DMA address to put in descriptor */
887 num_desc = (u32)sg_nents(sg);
889 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
891 if (unlikely(num_desc > rx_avail)) {
897 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
898 flags = D64_CTRL1_EOT;
903 * If sg buffer larger than PDC limit, split across
904 * multiple descriptors
906 bufcnt = sg_dma_len(sg);
907 databufptr = sg_dma_address(sg);
908 while (bufcnt > PDC_DMA_BUF_MAX) {
909 pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags);
911 bufcnt -= PDC_DMA_BUF_MAX;
912 databufptr += PDC_DMA_BUF_MAX;
913 if (unlikely(pdcs->rxout == (pdcs->nrxd - 1)))
914 flags = D64_CTRL1_EOT;
918 pdc_build_rxd(pdcs, databufptr, bufcnt, flags);
922 pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w;
928 * pdc_irq_handler() - Interrupt handler called in interrupt context.
929 * @irq: Interrupt number that has fired
930 * @data: device struct for DMA engine that generated the interrupt
932 * We have to clear the device interrupt status flags here. So cache the
933 * status for later use in the thread function. Other than that, just return
934 * WAKE_THREAD to invoke the thread function.
936 * Return: IRQ_WAKE_THREAD if interrupt is ours
939 static irqreturn_t pdc_irq_handler(int irq, void *data)
941 struct device *dev = (struct device *)data;
942 struct pdc_state *pdcs = dev_get_drvdata(dev);
943 u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
945 if (unlikely(intstatus == 0))
948 /* Disable interrupts until soft handler runs */
949 iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
951 /* Clear interrupt flags in device */
952 iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
954 /* Wakeup IRQ thread */
955 tasklet_schedule(&pdcs->rx_tasklet);
960 * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
961 * a DMA receive interrupt. Reenables the receive interrupt.
962 * @t: Pointer to the Altera sSGDMA channel structure
964 static void pdc_tasklet_cb(struct tasklet_struct *t)
966 struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
970 /* reenable interrupts */
971 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
975 * pdc_ring_init() - Allocate DMA rings and initialize constant fields of
976 * descriptors in one ringset.
977 * @pdcs: PDC instance state
978 * @ringset: index of ringset being used
980 * Return: PDC_SUCCESS if ring initialized
983 static int pdc_ring_init(struct pdc_state *pdcs, int ringset)
986 int err = PDC_SUCCESS;
987 struct dma64 *dma_reg;
988 struct device *dev = &pdcs->pdev->dev;
989 struct pdc_ring_alloc tx;
990 struct pdc_ring_alloc rx;
992 /* Allocate tx ring */
993 tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase);
994 if (unlikely(!tx.vbase)) {
999 /* Allocate rx ring */
1000 rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase);
1001 if (unlikely(!rx.vbase)) {
1006 dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase);
1007 dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase);
1008 dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase);
1009 dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase);
1011 memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx));
1012 memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx));
1015 pdcs->rx_msg_start = 0;
1016 pdcs->last_rx_curr = 0;
1019 pdcs->tx_msg_start = 0;
1022 /* Set descriptor array base addresses */
1023 pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase;
1024 pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase;
1026 /* Tell device the base DMA address of each ring */
1027 dma_reg = &pdcs->regs->dmaregs[ringset];
1029 /* But first disable DMA and set curptr to 0 for both TX & RX */
1030 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1031 iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)),
1032 &dma_reg->dmarcv.control);
1033 iowrite32(0, &dma_reg->dmaxmt.ptr);
1034 iowrite32(0, &dma_reg->dmarcv.ptr);
1036 /* Set base DMA addresses */
1037 iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase),
1038 &dma_reg->dmaxmt.addrlow);
1039 iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase),
1040 &dma_reg->dmaxmt.addrhigh);
1042 iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase),
1043 &dma_reg->dmarcv.addrlow);
1044 iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase),
1045 &dma_reg->dmarcv.addrhigh);
1048 iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control);
1049 iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)),
1050 &dma_reg->dmarcv.control);
1052 /* Initialize descriptors */
1053 for (i = 0; i < PDC_RING_ENTRIES; i++) {
1054 /* Every tx descriptor can be used for start of frame. */
1055 if (i != pdcs->ntxpost) {
1056 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF,
1057 &pdcs->txd_64[i].ctrl1);
1059 /* Last descriptor in ringset. Set End of Table. */
1060 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF |
1061 D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1);
1064 /* Every rx descriptor can be used for start of frame */
1065 if (i != pdcs->nrxpost) {
1066 iowrite32(D64_CTRL1_SOF,
1067 &pdcs->rxd_64[i].ctrl1);
1069 /* Last descriptor in ringset. Set End of Table. */
1070 iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT,
1071 &pdcs->rxd_64[i].ctrl1);
1077 dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase);
1082 static void pdc_ring_free(struct pdc_state *pdcs)
1084 if (pdcs->tx_ring_alloc.vbase) {
1085 dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase,
1086 pdcs->tx_ring_alloc.dmabase);
1087 pdcs->tx_ring_alloc.vbase = NULL;
1090 if (pdcs->rx_ring_alloc.vbase) {
1091 dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase,
1092 pdcs->rx_ring_alloc.dmabase);
1093 pdcs->rx_ring_alloc.vbase = NULL;
1098 * pdc_desc_count() - Count the number of DMA descriptors that will be required
1099 * for a given scatterlist. Account for the max length of a DMA buffer.
1100 * @sg: Scatterlist to be DMA'd
1101 * Return: Number of descriptors required
1103 static u32 pdc_desc_count(struct scatterlist *sg)
1108 cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1);
1115 * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors
1116 * and the rx ring has room for rx_cnt descriptors.
1118 * @tx_cnt: The number of descriptors required in the tx ring
1119 * @rx_cnt: The number of descriptors required i the rx ring
1121 * Return: true if one of the rings does not have enough space
1122 * false if sufficient space is available in both rings
1124 static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt)
1130 /* Check if the tx and rx rings are likely to have enough space */
1131 rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout,
1133 if (unlikely(rx_cnt > rx_avail)) {
1134 pdcs->rx_ring_full++;
1138 if (likely(!full)) {
1139 tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout,
1141 if (unlikely(tx_cnt > tx_avail)) {
1142 pdcs->tx_ring_full++;
1150 * pdc_last_tx_done() - If both the tx and rx rings have at least
1151 * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox
1152 * framework can submit another message.
1153 * @chan: mailbox channel to check
1154 * Return: true if PDC can accept another message on this channel
1156 static bool pdc_last_tx_done(struct mbox_chan *chan)
1158 struct pdc_state *pdcs = chan->con_priv;
1161 if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN,
1162 PDC_RING_SPACE_MIN))) {
1163 pdcs->last_tx_not_done++;
1172 * pdc_send_data() - mailbox send_data function
1173 * @chan: The mailbox channel on which the data is sent. The channel
1174 * corresponds to a DMA ringset.
1175 * @data: The mailbox message to be sent. The message must be a
1176 * brcm_message structure.
1178 * This function is registered as the send_data function for the mailbox
1179 * controller. From the destination scatterlist in the mailbox message, it
1180 * creates a sequence of receive descriptors in the rx ring. From the source
1181 * scatterlist, it creates a sequence of transmit descriptors in the tx ring.
1182 * After creating the descriptors, it writes the rx ptr and tx ptr registers to
1183 * initiate the DMA transfer.
1185 * This function does the DMA map and unmap of the src and dst scatterlists in
1186 * the mailbox message.
1188 * Return: 0 if successful
1189 * -ENOTSUPP if the mailbox message is a type this driver does not
1193 static int pdc_send_data(struct mbox_chan *chan, void *data)
1195 struct pdc_state *pdcs = chan->con_priv;
1196 struct device *dev = &pdcs->pdev->dev;
1197 struct brcm_message *mssg = data;
1198 int err = PDC_SUCCESS;
1205 if (unlikely(mssg->type != BRCM_MESSAGE_SPU))
1208 src_nent = sg_nents(mssg->spu.src);
1209 if (likely(src_nent)) {
1210 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE);
1211 if (unlikely(nent == 0))
1215 dst_nent = sg_nents(mssg->spu.dst);
1216 if (likely(dst_nent)) {
1217 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent,
1219 if (unlikely(nent == 0)) {
1220 dma_unmap_sg(dev, mssg->spu.src, src_nent,
1227 * Check if the tx and rx rings have enough space. Do this prior to
1228 * writing any tx or rx descriptors. Need to ensure that we do not write
1229 * a partial set of descriptors, or write just rx descriptors but
1230 * corresponding tx descriptors don't fit. Note that we want this check
1231 * and the entire sequence of descriptor to happen without another
1232 * thread getting in. The channel spin lock in the mailbox framework
1235 tx_desc_req = pdc_desc_count(mssg->spu.src);
1236 rx_desc_req = pdc_desc_count(mssg->spu.dst);
1237 if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1)))
1240 /* Create rx descriptors to SPU catch response */
1241 err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx);
1242 err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst);
1244 /* Create tx descriptors to submit SPU request */
1245 err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src);
1246 err |= pdc_tx_list_final(pdcs); /* initiate transfer */
1249 dev_err(&pdcs->pdev->dev,
1250 "%s failed with error %d", __func__, err);
1255 static int pdc_startup(struct mbox_chan *chan)
1257 return pdc_ring_init(chan->con_priv, PDC_RINGSET);
1260 static void pdc_shutdown(struct mbox_chan *chan)
1262 struct pdc_state *pdcs = chan->con_priv;
1267 dev_dbg(&pdcs->pdev->dev,
1268 "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx);
1269 pdc_ring_free(pdcs);
1273 * pdc_hw_init() - Use the given initialization parameters to initialize the
1274 * state for one of the PDCs.
1275 * @pdcs: state of the PDC
1278 void pdc_hw_init(struct pdc_state *pdcs)
1280 struct platform_device *pdev;
1282 struct dma64 *dma_reg;
1283 int ringset = PDC_RINGSET;
1288 dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx);
1289 dev_dbg(dev, "state structure: %p",
1291 dev_dbg(dev, " - base virtual addr of hw regs %p",
1292 pdcs->pdc_reg_vbase);
1294 /* initialize data structures */
1295 pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase;
1296 pdcs->txregs_64 = (struct dma64_regs *)
1297 (((u8 *)pdcs->pdc_reg_vbase) +
1298 PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1299 pdcs->rxregs_64 = (struct dma64_regs *)
1300 (((u8 *)pdcs->pdc_reg_vbase) +
1301 PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset));
1303 pdcs->ntxd = PDC_RING_ENTRIES;
1304 pdcs->nrxd = PDC_RING_ENTRIES;
1305 pdcs->ntxpost = PDC_RING_ENTRIES - 1;
1306 pdcs->nrxpost = PDC_RING_ENTRIES - 1;
1307 iowrite32(0, &pdcs->regs->intmask);
1309 dma_reg = &pdcs->regs->dmaregs[ringset];
1311 /* Configure DMA but will enable later in pdc_ring_init() */
1312 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1314 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1315 &dma_reg->dmarcv.control);
1317 /* Reset current index pointers after making sure DMA is disabled */
1318 iowrite32(0, &dma_reg->dmaxmt.ptr);
1319 iowrite32(0, &dma_reg->dmarcv.ptr);
1321 if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN)
1322 iowrite32(PDC_CKSUM_CTRL,
1323 pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET);
1327 * pdc_hw_disable() - Disable the tx and rx control in the hw.
1328 * @pdcs: PDC state structure
1331 static void pdc_hw_disable(struct pdc_state *pdcs)
1333 struct dma64 *dma_reg;
1335 dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET];
1336 iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control);
1337 iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1),
1338 &dma_reg->dmarcv.control);
1342 * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata
1343 * header returned with each response message.
1344 * @pdcs: PDC state structure
1346 * The metadata is not returned to the mailbox client. So the PDC driver
1347 * manages these buffers.
1349 * Return: PDC_SUCCESS
1350 * -ENOMEM if pool creation fails
1352 static int pdc_rx_buf_pool_create(struct pdc_state *pdcs)
1354 struct platform_device *pdev;
1360 pdcs->pdc_resp_hdr_len = pdcs->rx_status_len;
1361 if (pdcs->use_bcm_hdr)
1362 pdcs->pdc_resp_hdr_len += BCM_HDR_LEN;
1364 pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev,
1365 pdcs->pdc_resp_hdr_len,
1367 if (!pdcs->rx_buf_pool)
1374 * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and
1375 * specify a threaded IRQ handler for deferred handling of interrupts outside of
1376 * interrupt context.
1379 * Set the interrupt mask for transmit and receive done.
1380 * Set the lazy interrupt frame count to generate an interrupt for just one pkt.
1382 * Return: PDC_SUCCESS
1383 * <0 if threaded irq request fails
1385 static int pdc_interrupts_init(struct pdc_state *pdcs)
1387 struct platform_device *pdev = pdcs->pdev;
1388 struct device *dev = &pdev->dev;
1389 struct device_node *dn = pdev->dev.of_node;
1392 /* interrupt configuration */
1393 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1395 if (pdcs->hw_type == FA_HW)
1396 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1397 FA_RCVLAZY0_OFFSET);
1399 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1400 PDC_RCVLAZY0_OFFSET);
1402 /* read irq from device tree */
1403 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
1404 dev_dbg(dev, "pdc device %s irq %u for pdcs %p",
1405 dev_name(dev), pdcs->pdc_irq, pdcs);
1407 err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0,
1408 dev_name(dev), dev);
1410 dev_err(dev, "IRQ %u request failed with err %d\n",
1411 pdcs->pdc_irq, err);
1417 static const struct mbox_chan_ops pdc_mbox_chan_ops = {
1418 .send_data = pdc_send_data,
1419 .last_tx_done = pdc_last_tx_done,
1420 .startup = pdc_startup,
1421 .shutdown = pdc_shutdown
1425 * pdc_mb_init() - Initialize the mailbox controller.
1428 * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel
1429 * driver only uses one ringset and thus one mb channel. PDC uses the transmit
1430 * complete interrupt to determine when a mailbox message has successfully been
1433 * Return: 0 on success
1434 * < 0 if there is an allocation or registration failure
1436 static int pdc_mb_init(struct pdc_state *pdcs)
1438 struct device *dev = &pdcs->pdev->dev;
1439 struct mbox_controller *mbc;
1445 mbc->ops = &pdc_mbox_chan_ops;
1447 mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans),
1452 mbc->txdone_irq = false;
1453 mbc->txdone_poll = true;
1454 mbc->txpoll_period = 1;
1455 for (chan_index = 0; chan_index < mbc->num_chans; chan_index++)
1456 mbc->chans[chan_index].con_priv = pdcs;
1458 /* Register mailbox controller */
1459 err = devm_mbox_controller_register(dev, mbc);
1462 "Failed to register PDC mailbox controller. Error %d.",
1469 /* Device tree API */
1470 static const int pdc_hw = PDC_HW;
1471 static const int fa_hw = FA_HW;
1473 static const struct of_device_id pdc_mbox_of_match[] = {
1474 {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
1475 {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
1478 MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1481 * pdc_dt_read() - Read application-specific data from device tree.
1482 * @pdev: Platform device
1485 * Reads the number of bytes of receive status that precede each received frame.
1486 * Reads whether transmit and received frames should be preceded by an 8-byte
1489 * Return: 0 if successful
1490 * -ENODEV if device not available
1492 static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1494 struct device *dev = &pdev->dev;
1495 struct device_node *dn = pdev->dev.of_node;
1499 err = of_property_read_u32(dn, "brcm,rx-status-len",
1500 &pdcs->rx_status_len);
1503 "%s failed to get DMA receive status length from device tree",
1506 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1508 pdcs->hw_type = PDC_HW;
1510 hw_type = device_get_match_data(dev);
1512 pdcs->hw_type = *hw_type;
1518 * pdc_probe() - Probe function for PDC driver.
1519 * @pdev: PDC platform device
1521 * Reserve and map register regions defined in device tree.
1522 * Allocate and initialize tx and rx DMA rings.
1523 * Initialize a mailbox controller for each PDC.
1525 * Return: 0 if successful
1528 static int pdc_probe(struct platform_device *pdev)
1531 struct device *dev = &pdev->dev;
1532 struct resource *pdc_regs;
1533 struct pdc_state *pdcs;
1535 /* PDC state for one SPU */
1536 pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL);
1543 platform_set_drvdata(pdev, pdcs);
1544 pdcs->pdc_idx = pdcg.num_spu;
1547 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
1549 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1553 /* Create DMA pool for tx ring */
1554 pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE,
1556 if (!pdcs->ring_pool) {
1561 err = pdc_dt_read(pdev, pdcs);
1563 goto cleanup_ring_pool;
1565 pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
1566 if (IS_ERR(pdcs->pdc_reg_vbase)) {
1567 err = PTR_ERR(pdcs->pdc_reg_vbase);
1568 goto cleanup_ring_pool;
1570 dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
1571 &pdc_regs->start, &pdc_regs->end);
1573 /* create rx buffer pool after dt read to know how big buffers are */
1574 err = pdc_rx_buf_pool_create(pdcs);
1576 goto cleanup_ring_pool;
1580 /* Init tasklet for deferred DMA rx processing */
1581 tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
1583 err = pdc_interrupts_init(pdcs);
1585 goto cleanup_buf_pool;
1587 /* Initialize mailbox controller */
1588 err = pdc_mb_init(pdcs);
1590 goto cleanup_buf_pool;
1592 pdc_setup_debugfs(pdcs);
1594 dev_dbg(dev, "pdc_probe() successful");
1598 tasklet_kill(&pdcs->rx_tasklet);
1599 dma_pool_destroy(pdcs->rx_buf_pool);
1602 dma_pool_destroy(pdcs->ring_pool);
1608 static int pdc_remove(struct platform_device *pdev)
1610 struct pdc_state *pdcs = platform_get_drvdata(pdev);
1614 tasklet_kill(&pdcs->rx_tasklet);
1616 pdc_hw_disable(pdcs);
1618 dma_pool_destroy(pdcs->rx_buf_pool);
1619 dma_pool_destroy(pdcs->ring_pool);
1623 static struct platform_driver pdc_mbox_driver = {
1625 .remove = pdc_remove,
1627 .name = "brcm-iproc-pdc-mbox",
1628 .of_match_table = pdc_mbox_of_match,
1631 module_platform_driver(pdc_mbox_driver);
1633 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>");
1634 MODULE_DESCRIPTION("Broadcom PDC mailbox driver");
1635 MODULE_LICENSE("GPL v2");