1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
4 #include <linux/bitfield.h>
5 #include <linux/dmapool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/if_vlan.h>
8 #include <linux/of_address.h>
9 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
13 #include "prestera_dsa.h"
15 #include "prestera_hw.h"
16 #include "prestera_rxtx.h"
17 #include "prestera_devlink.h"
19 #define PRESTERA_SDMA_WAIT_MUL 10
21 struct prestera_sdma_desc {
26 } __packed __aligned(16);
28 #define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
30 #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
31 ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
33 #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
34 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
36 #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
37 (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
39 #define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
40 #define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
42 #define PRESTERA_SDMA_RX_QUEUE_NUM 8
44 #define PRESTERA_SDMA_RX_DESC_PER_Q 1000
46 #define PRESTERA_SDMA_TX_DESC_PER_Q 1000
47 #define PRESTERA_SDMA_TX_MAX_BURST 64
49 #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
50 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
52 #define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
53 #define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
55 #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
56 (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
58 #define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
59 #define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
60 #define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
62 #define PRESTERA_SDMA_TX_DESC_SINGLE \
63 (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
65 #define PRESTERA_SDMA_TX_DESC_INIT \
66 (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
68 #define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
69 #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
70 #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
72 #define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
73 #define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
75 struct prestera_sdma_buf {
76 struct prestera_sdma_desc *desc;
83 struct prestera_rx_ring {
84 struct prestera_sdma_buf *bufs;
88 struct prestera_tx_ring {
89 struct prestera_sdma_buf *bufs;
95 struct prestera_sdma {
96 struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
97 struct prestera_tx_ring tx_ring;
98 struct prestera_switch *sw;
99 struct dma_pool *desc_pool;
100 struct work_struct tx_work;
101 struct napi_struct rx_napi;
102 struct net_device napi_dev;
105 /* protect SDMA with concurrent access from multiple CPUs */
109 struct prestera_rxtx {
110 struct prestera_sdma sdma;
113 static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
114 struct prestera_sdma_buf *buf)
116 struct prestera_sdma_desc *desc;
119 desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
123 buf->buf_dma = DMA_MAPPING_ERROR;
131 static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
133 return sdma->map_addr + pa;
136 static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
137 struct prestera_sdma_desc *desc,
140 u32 word = le32_to_cpu(desc->word2);
142 u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
143 desc->word2 = cpu_to_le32(word);
145 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
147 /* make sure buffer is set before reset the descriptor */
150 desc->word1 = cpu_to_le32(0xA0000000);
153 static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
154 struct prestera_sdma_desc *desc,
157 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
160 static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
161 struct prestera_sdma_buf *buf)
163 struct device *dev = sdma->sw->dev->dev;
167 skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
171 dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
172 if (dma_mapping_error(dev, dma))
176 dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
190 static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
191 struct prestera_sdma_buf *buf)
193 dma_addr_t buf_dma = buf->buf_dma;
194 struct sk_buff *skb = buf->skb;
198 err = prestera_sdma_rx_skb_alloc(sdma, buf);
200 buf->buf_dma = buf_dma;
203 skb = alloc_skb(skb->len, GFP_ATOMIC);
206 skb_copy_from_linear_data(buf->skb, skb->data, len);
210 prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
215 static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
218 struct prestera_port *port;
219 struct prestera_dsa dsa;
224 skb_pull(skb, ETH_HLEN);
226 /* ethertype field is part of the dsa header */
227 err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
231 dev_id = dsa.hw_dev_num;
232 hw_port = dsa.port_num;
234 port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
235 if (unlikely(!port)) {
236 dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
241 if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
244 /* remove DSA tag and update checksum */
245 skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
247 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
250 skb_push(skb, ETH_HLEN);
252 skb->protocol = eth_type_trans(skb, port->dev);
254 if (dsa.vlan.is_tagged) {
255 u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
257 tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
258 if (dsa.vlan.cfi_bit)
259 tci |= VLAN_CFI_MASK;
261 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
264 cpu_code = dsa.cpu_code;
265 prestera_devlink_trap_report(port, skb, cpu_code);
270 static int prestera_sdma_next_rx_buf_idx(int buf_idx)
272 return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
275 static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
277 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
278 unsigned int rxq_done_map = 0;
279 struct prestera_sdma *sdma;
280 struct list_head rx_list;
285 qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
286 qmask = GENMASK(qnum - 1, 0);
288 INIT_LIST_HEAD(&rx_list);
290 sdma = container_of(napi, struct prestera_sdma, rx_napi);
292 while (pkts_done < budget && rxq_done_map != qmask) {
293 for (q = 0; q < qnum && pkts_done < budget; q++) {
294 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
295 struct prestera_sdma_desc *desc;
296 struct prestera_sdma_buf *buf;
297 int buf_idx = ring->next_rx;
300 buf = &ring->bufs[buf_idx];
303 if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
304 rxq_done_map &= ~BIT(q);
306 rxq_done_map |= BIT(q);
312 __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
314 skb = prestera_sdma_rx_skb_get(sdma, buf);
318 if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
321 list_add_tail(&skb->list, &rx_list);
323 ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
327 if (pkts_done < budget && napi_complete_done(napi, pkts_done))
328 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
331 netif_receive_skb_list(&rx_list);
336 static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
338 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
341 /* disable all rx queues */
342 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
345 for (q = 0; q < qnum; q++) {
346 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
351 for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
352 struct prestera_sdma_buf *buf = &ring->bufs[b];
355 dma_pool_free(sdma->desc_pool, buf->desc,
361 if (buf->buf_dma != DMA_MAPPING_ERROR)
362 dma_unmap_single(sdma->sw->dev->dev,
363 buf->buf_dma, buf->skb->len,
370 static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
372 int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
373 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
377 /* disable all rx queues */
378 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
381 for (q = 0; q < qnum; q++) {
382 struct prestera_sdma_buf *head, *tail, *next, *prev;
383 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
385 ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
391 tail = &ring->bufs[bnum - 1];
392 head = &ring->bufs[0];
397 err = prestera_sdma_buf_init(sdma, next);
401 err = prestera_sdma_rx_skb_alloc(sdma, next);
405 prestera_sdma_rx_desc_init(sdma, next->desc,
408 prestera_sdma_rx_desc_set_next(sdma, prev->desc,
413 } while (prev != tail);
415 /* join tail with head to make a circular list */
416 prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
418 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
419 prestera_sdma_map(sdma, head->desc_dma));
422 /* make sure all rx descs are filled before enabling all rx queues */
425 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
431 static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
432 struct prestera_sdma_desc *desc)
434 desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
438 static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
439 struct prestera_sdma_desc *desc,
442 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
445 static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
446 struct prestera_sdma_desc *desc,
447 dma_addr_t buf, size_t len)
449 u32 word = le32_to_cpu(desc->word2);
451 u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
453 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
454 desc->word2 = cpu_to_le32(word);
457 static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
459 u32 word = le32_to_cpu(desc->word1);
461 word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
463 /* make sure everything is written before enable xmit */
466 desc->word1 = cpu_to_le32(word);
469 static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
470 struct prestera_sdma_buf *buf,
473 struct device *dma_dev = sdma->sw->dev->dev;
476 dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
477 if (dma_mapping_error(dma_dev, dma))
486 static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
487 struct prestera_sdma_buf *buf)
489 struct device *dma_dev = sdma->sw->dev->dev;
491 dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
494 static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
496 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
497 struct prestera_tx_ring *tx_ring;
498 struct prestera_sdma *sdma;
501 sdma = container_of(work, struct prestera_sdma, tx_work);
503 tx_ring = &sdma->tx_ring;
505 for (b = 0; b < bnum; b++) {
506 struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
511 if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
514 prestera_sdma_tx_buf_unmap(sdma, buf);
515 dev_consume_skb_any(buf->skb);
518 /* make sure everything is cleaned up */
521 buf->is_used = false;
525 static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
527 struct prestera_sdma_buf *head, *tail, *next, *prev;
528 struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
529 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
532 INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
533 spin_lock_init(&sdma->tx_lock);
535 tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
539 tail = &tx_ring->bufs[bnum - 1];
540 head = &tx_ring->bufs[0];
544 tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
545 tx_ring->burst = tx_ring->max_burst;
546 tx_ring->next_tx = 0;
549 err = prestera_sdma_buf_init(sdma, next);
553 next->is_used = false;
555 prestera_sdma_tx_desc_init(sdma, next->desc);
557 prestera_sdma_tx_desc_set_next(sdma, prev->desc,
562 } while (prev != tail);
564 /* join tail with head to make a circular list */
565 prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
567 /* make sure descriptors are written */
570 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
571 prestera_sdma_map(sdma, head->desc_dma));
576 static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
578 struct prestera_tx_ring *ring = &sdma->tx_ring;
579 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
582 cancel_work_sync(&sdma->tx_work);
587 for (b = 0; b < bnum; b++) {
588 struct prestera_sdma_buf *buf = &ring->bufs[b];
591 dma_pool_free(sdma->desc_pool, buf->desc,
597 dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
598 buf->skb->len, DMA_TO_DEVICE);
600 dev_consume_skb_any(buf->skb);
604 static void prestera_rxtx_handle_event(struct prestera_switch *sw,
605 struct prestera_event *evt,
608 struct prestera_sdma *sdma = arg;
610 if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
613 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
614 napi_schedule(&sdma->rx_napi);
617 static int prestera_sdma_switch_init(struct prestera_switch *sw)
619 struct prestera_sdma *sdma = &sw->rxtx->sdma;
620 struct device *dev = sw->dev->dev;
621 struct prestera_rxtx_params p;
626 err = prestera_hw_rxtx_init(sw, &p);
628 dev_err(dev, "failed to init rxtx by hw\n");
632 sdma->dma_mask = dma_get_mask(dev);
633 sdma->map_addr = p.map_addr;
636 sdma->desc_pool = dma_pool_create("desc_pool", dev,
637 sizeof(struct prestera_sdma_desc),
639 if (!sdma->desc_pool)
642 err = prestera_sdma_rx_init(sdma);
644 dev_err(dev, "failed to init rx ring\n");
648 err = prestera_sdma_tx_init(sdma);
650 dev_err(dev, "failed to init tx ring\n");
654 err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
655 prestera_rxtx_handle_event,
658 goto err_evt_register;
660 init_dummy_netdev(&sdma->napi_dev);
662 netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
663 napi_enable(&sdma->rx_napi);
669 prestera_sdma_tx_fini(sdma);
671 prestera_sdma_rx_fini(sdma);
673 dma_pool_destroy(sdma->desc_pool);
677 static void prestera_sdma_switch_fini(struct prestera_switch *sw)
679 struct prestera_sdma *sdma = &sw->rxtx->sdma;
681 napi_disable(&sdma->rx_napi);
682 netif_napi_del(&sdma->rx_napi);
683 prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
684 prestera_rxtx_handle_event);
685 prestera_sdma_tx_fini(sdma);
686 prestera_sdma_rx_fini(sdma);
687 dma_pool_destroy(sdma->desc_pool);
690 static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
692 return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
695 static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
696 struct prestera_tx_ring *tx_ring)
698 int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
701 if (prestera_sdma_is_ready(sdma))
705 } while (--tx_wait_num);
710 static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
712 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
713 schedule_work(&sdma->tx_work);
716 static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
719 struct device *dma_dev = sdma->sw->dev->dev;
720 struct net_device *dev = skb->dev;
721 struct prestera_tx_ring *tx_ring;
722 struct prestera_sdma_buf *buf;
725 spin_lock(&sdma->tx_lock);
727 tx_ring = &sdma->tx_ring;
729 buf = &tx_ring->bufs[tx_ring->next_tx];
731 schedule_work(&sdma->tx_work);
735 if (unlikely(eth_skb_pad(skb)))
736 goto drop_skb_nofree;
738 err = prestera_sdma_tx_buf_map(sdma, buf, skb);
742 prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
744 dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
747 if (tx_ring->burst) {
750 tx_ring->burst = tx_ring->max_burst;
752 err = prestera_sdma_tx_wait(sdma, tx_ring);
757 tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
758 prestera_sdma_tx_desc_xmit(buf->desc);
761 prestera_sdma_tx_start(sdma);
766 prestera_sdma_tx_buf_unmap(sdma, buf);
768 dev_consume_skb_any(skb);
770 dev->stats.tx_dropped++;
772 spin_unlock(&sdma->tx_lock);
776 int prestera_rxtx_switch_init(struct prestera_switch *sw)
778 struct prestera_rxtx *rxtx;
780 rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
786 return prestera_sdma_switch_init(sw);
789 void prestera_rxtx_switch_fini(struct prestera_switch *sw)
791 prestera_sdma_switch_fini(sw);
795 int prestera_rxtx_port_init(struct prestera_port *port)
797 port->dev->needed_headroom = PRESTERA_DSA_HLEN;
801 netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
803 struct prestera_dsa dsa;
805 dsa.hw_dev_num = port->dev_id;
806 dsa.port_num = port->hw_id;
808 if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
809 return NET_XMIT_DROP;
811 skb_push(skb, PRESTERA_DSA_HLEN);
812 memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
814 if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
815 return NET_XMIT_DROP;
817 return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);