1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2017 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
19 #include "dpaa2-eth.h"
21 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
22 * using trace events only need to #include <trace/events/sched.h>
24 #define CREATE_TRACE_POINTS
25 #include "dpaa2-eth-trace.h"
27 MODULE_LICENSE("Dual BSD/GPL");
28 MODULE_AUTHOR("Freescale Semiconductor, Inc");
29 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
31 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
34 phys_addr_t phys_addr;
36 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
38 return phys_to_virt(phys_addr);
41 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
45 skb_checksum_none_assert(skb);
47 /* HW checksum validation is disabled, nothing to do here */
48 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
51 /* Read checksum validation bits */
52 if (!((fd_status & DPAA2_FAS_L3CV) &&
53 (fd_status & DPAA2_FAS_L4CV)))
56 /* Inform the stack there's no need to compute L3/L4 csum anymore */
57 skb->ip_summed = CHECKSUM_UNNECESSARY;
60 /* Free a received FD.
61 * Not to be used for Tx conf FDs or on any other paths.
63 static void free_rx_fd(struct dpaa2_eth_priv *priv,
64 const struct dpaa2_fd *fd,
67 struct device *dev = priv->net_dev->dev.parent;
68 dma_addr_t addr = dpaa2_fd_get_addr(fd);
69 u8 fd_format = dpaa2_fd_get_format(fd);
70 struct dpaa2_sg_entry *sgt;
74 /* If single buffer frame, just free the data buffer */
75 if (fd_format == dpaa2_fd_single)
77 else if (fd_format != dpaa2_fd_sg)
78 /* We don't support any other format */
81 /* For S/G frames, we first need to free all SG entries
82 * except the first one, which was taken care of already
84 sgt = vaddr + dpaa2_fd_get_offset(fd);
85 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
86 addr = dpaa2_sg_get_addr(&sgt[i]);
87 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
88 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
91 skb_free_frag(sg_vaddr);
92 if (dpaa2_sg_is_final(&sgt[i]))
100 /* Build a linear skb based on a single-buffer frame descriptor */
101 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
102 const struct dpaa2_fd *fd,
105 struct sk_buff *skb = NULL;
106 u16 fd_offset = dpaa2_fd_get_offset(fd);
107 u32 fd_length = dpaa2_fd_get_len(fd);
111 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
115 skb_reserve(skb, fd_offset);
116 skb_put(skb, fd_length);
121 /* Build a non linear (fragmented) skb based on a S/G table */
122 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
123 struct dpaa2_eth_channel *ch,
124 struct dpaa2_sg_entry *sgt)
126 struct sk_buff *skb = NULL;
127 struct device *dev = priv->net_dev->dev.parent;
132 struct page *page, *head_page;
136 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
137 struct dpaa2_sg_entry *sge = &sgt[i];
139 /* NOTE: We only support SG entries in dpaa2_sg_single format,
140 * but this is the only format we may receive from HW anyway
143 /* Get the address and length from the S/G entry */
144 sg_addr = dpaa2_sg_get_addr(sge);
145 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
146 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
149 sg_length = dpaa2_sg_get_len(sge);
152 /* We build the skb around the first data buffer */
153 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
154 if (unlikely(!skb)) {
155 /* Free the first SG entry now, since we already
156 * unmapped it and obtained the virtual address
158 skb_free_frag(sg_vaddr);
160 /* We still need to subtract the buffers used
161 * by this FD from our software counter
163 while (!dpaa2_sg_is_final(&sgt[i]) &&
164 i < DPAA2_ETH_MAX_SG_ENTRIES)
169 sg_offset = dpaa2_sg_get_offset(sge);
170 skb_reserve(skb, sg_offset);
171 skb_put(skb, sg_length);
173 /* Rest of the data buffers are stored as skb frags */
174 page = virt_to_page(sg_vaddr);
175 head_page = virt_to_head_page(sg_vaddr);
177 /* Offset in page (which may be compound).
178 * Data in subsequent SG entries is stored from the
179 * beginning of the buffer, so we don't need to add the
182 page_offset = ((unsigned long)sg_vaddr &
184 (page_address(page) - page_address(head_page));
186 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
187 sg_length, DPAA2_ETH_RX_BUF_SIZE);
190 if (dpaa2_sg_is_final(sge))
194 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196 /* Count all data buffers + SG table buffer */
197 ch->buf_count -= i + 2;
202 /* Main Rx frame processing routine */
203 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
204 struct dpaa2_eth_channel *ch,
205 const struct dpaa2_fd *fd,
206 struct napi_struct *napi,
209 dma_addr_t addr = dpaa2_fd_get_addr(fd);
210 u8 fd_format = dpaa2_fd_get_format(fd);
213 struct rtnl_link_stats64 *percpu_stats;
214 struct dpaa2_eth_drv_stats *percpu_extras;
215 struct device *dev = priv->net_dev->dev.parent;
216 struct dpaa2_fas *fas;
221 trace_dpaa2_rx_fd(priv->net_dev, fd);
223 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
224 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
226 fas = dpaa2_get_fas(vaddr, false);
228 buf_data = vaddr + dpaa2_fd_get_offset(fd);
231 percpu_stats = this_cpu_ptr(priv->percpu_stats);
232 percpu_extras = this_cpu_ptr(priv->percpu_extras);
234 if (fd_format == dpaa2_fd_single) {
235 skb = build_linear_skb(ch, fd, vaddr);
236 } else if (fd_format == dpaa2_fd_sg) {
237 skb = build_frag_skb(priv, ch, buf_data);
238 skb_free_frag(vaddr);
239 percpu_extras->rx_sg_frames++;
240 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
242 /* We don't support any other format */
243 goto err_frame_format;
251 /* Get the timestamp value */
252 if (priv->rx_tstamp) {
253 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
254 __le64 *ts = dpaa2_get_ts(vaddr, false);
257 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
259 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
260 shhwtstamps->hwtstamp = ns_to_ktime(ns);
263 /* Check if we need to validate the L4 csum */
264 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
265 status = le32_to_cpu(fas->status);
266 validate_rx_csum(priv, status, skb);
269 skb->protocol = eth_type_trans(skb, priv->net_dev);
270 skb_record_rx_queue(skb, queue_id);
272 percpu_stats->rx_packets++;
273 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
275 napi_gro_receive(napi, skb);
280 free_rx_fd(priv, fd, vaddr);
282 percpu_stats->rx_dropped++;
285 /* Consume all frames pull-dequeued into the store. This is the simplest way to
286 * make sure we don't accidentally issue another volatile dequeue which would
287 * overwrite (leak) frames already in the store.
289 * Observance of NAPI budget is not our concern, leaving that to the caller.
291 static int consume_frames(struct dpaa2_eth_channel *ch,
292 enum dpaa2_eth_fq_type *type)
294 struct dpaa2_eth_priv *priv = ch->priv;
295 struct dpaa2_eth_fq *fq = NULL;
297 const struct dpaa2_fd *fd;
302 dq = dpaa2_io_store_next(ch->store, &is_last);
304 /* If we're here, we *must* have placed a
305 * volatile dequeue comnmand, so keep reading through
306 * the store until we get some sort of valid response
307 * token (either a valid frame or an "empty dequeue")
312 fd = dpaa2_dq_fd(dq);
313 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
315 fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
322 fq->stats.frames += cleaned;
323 ch->stats.frames += cleaned;
325 /* A dequeue operation only pulls frames from a single queue
326 * into the store. Return the frame queue type as an out param.
334 /* Configure the egress frame annotation for timestamp update */
335 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
337 struct dpaa2_faead *faead;
340 /* Mark the egress frame annotation area as valid */
341 frc = dpaa2_fd_get_frc(fd);
342 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
344 /* Set hardware annotation size */
345 ctrl = dpaa2_fd_get_ctrl(fd);
346 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
348 /* enable UPD (update prepanded data) bit in FAEAD field of
349 * hardware frame annotation area
351 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
352 faead = dpaa2_get_faead(buf_start, true);
353 faead->ctrl = cpu_to_le32(ctrl);
356 /* Create a frame descriptor based on a fragmented skb */
357 static int build_sg_fd(struct dpaa2_eth_priv *priv,
361 struct device *dev = priv->net_dev->dev.parent;
362 void *sgt_buf = NULL;
364 int nr_frags = skb_shinfo(skb)->nr_frags;
365 struct dpaa2_sg_entry *sgt;
368 struct scatterlist *scl, *crt_scl;
371 struct dpaa2_eth_swa *swa;
373 /* Create and map scatterlist.
374 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
375 * to go beyond nr_frags+1.
376 * Note: We don't support chained scatterlists
378 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
381 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
385 sg_init_table(scl, nr_frags + 1);
386 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
387 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
388 if (unlikely(!num_dma_bufs)) {
390 goto dma_map_sg_failed;
393 /* Prepare the HW SGT structure */
394 sgt_buf_size = priv->tx_data_offset +
395 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
396 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
397 if (unlikely(!sgt_buf)) {
399 goto sgt_buf_alloc_failed;
401 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
402 memset(sgt_buf, 0, sgt_buf_size);
404 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
406 /* Fill in the HW SGT structure.
408 * sgt_buf is zeroed out, so the following fields are implicit
409 * in all sgt entries:
411 * - format is 'dpaa2_sg_single'
413 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
414 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
415 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
417 dpaa2_sg_set_final(&sgt[i - 1], true);
419 /* Store the skb backpointer in the SGT buffer.
420 * Fit the scatterlist and the number of buffers alongside the
421 * skb backpointer in the software annotation area. We'll need
422 * all of them on Tx Conf.
424 swa = (struct dpaa2_eth_swa *)sgt_buf;
427 swa->num_sg = num_sg;
428 swa->sgt_size = sgt_buf_size;
430 /* Separately map the SGT buffer */
431 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
432 if (unlikely(dma_mapping_error(dev, addr))) {
434 goto dma_map_single_failed;
436 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
437 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
438 dpaa2_fd_set_addr(fd, addr);
439 dpaa2_fd_set_len(fd, skb->len);
440 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
442 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
443 enable_tx_tstamp(fd, sgt_buf);
447 dma_map_single_failed:
448 skb_free_frag(sgt_buf);
449 sgt_buf_alloc_failed:
450 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
456 /* Create a frame descriptor based on a linear skb */
457 static int build_single_fd(struct dpaa2_eth_priv *priv,
461 struct device *dev = priv->net_dev->dev.parent;
462 u8 *buffer_start, *aligned_start;
463 struct sk_buff **skbh;
466 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
468 /* If there's enough room to align the FD address, do it.
469 * It will help hardware optimize accesses.
471 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
472 DPAA2_ETH_TX_BUF_ALIGN);
473 if (aligned_start >= skb->head)
474 buffer_start = aligned_start;
476 /* Store a backpointer to the skb at the beginning of the buffer
477 * (in the private data area) such that we can release it
480 skbh = (struct sk_buff **)buffer_start;
483 addr = dma_map_single(dev, buffer_start,
484 skb_tail_pointer(skb) - buffer_start,
486 if (unlikely(dma_mapping_error(dev, addr)))
489 dpaa2_fd_set_addr(fd, addr);
490 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
491 dpaa2_fd_set_len(fd, skb->len);
492 dpaa2_fd_set_format(fd, dpaa2_fd_single);
493 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
495 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
496 enable_tx_tstamp(fd, buffer_start);
501 /* FD freeing routine on the Tx path
503 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
504 * back-pointed to is also freed.
505 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
508 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
509 const struct dpaa2_fd *fd)
511 struct device *dev = priv->net_dev->dev.parent;
513 struct sk_buff **skbh, *skb;
514 unsigned char *buffer_start;
515 struct dpaa2_eth_swa *swa;
516 u8 fd_format = dpaa2_fd_get_format(fd);
518 fd_addr = dpaa2_fd_get_addr(fd);
519 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
521 if (fd_format == dpaa2_fd_single) {
523 buffer_start = (unsigned char *)skbh;
524 /* Accessing the skb buffer is safe before dma unmap, because
525 * we didn't map the actual skb shell.
527 dma_unmap_single(dev, fd_addr,
528 skb_tail_pointer(skb) - buffer_start,
530 } else if (fd_format == dpaa2_fd_sg) {
531 swa = (struct dpaa2_eth_swa *)skbh;
534 /* Unmap the scatterlist */
535 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
538 /* Unmap the SGT buffer */
539 dma_unmap_single(dev, fd_addr, swa->sgt_size,
542 netdev_dbg(priv->net_dev, "Invalid FD format\n");
546 /* Get the timestamp value */
547 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
548 struct skb_shared_hwtstamps shhwtstamps;
549 __le64 *ts = dpaa2_get_ts(skbh, true);
552 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
554 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
555 shhwtstamps.hwtstamp = ns_to_ktime(ns);
556 skb_tstamp_tx(skb, &shhwtstamps);
559 /* Free SGT buffer allocated on tx */
560 if (fd_format != dpaa2_fd_single)
563 /* Move on with skb release */
567 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
569 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
571 struct rtnl_link_stats64 *percpu_stats;
572 struct dpaa2_eth_drv_stats *percpu_extras;
573 struct dpaa2_eth_fq *fq;
575 unsigned int needed_headroom;
578 percpu_stats = this_cpu_ptr(priv->percpu_stats);
579 percpu_extras = this_cpu_ptr(priv->percpu_extras);
581 needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
582 if (skb_headroom(skb) < needed_headroom) {
585 ns = skb_realloc_headroom(skb, needed_headroom);
587 percpu_stats->tx_dropped++;
588 goto err_alloc_headroom;
590 percpu_extras->tx_reallocs++;
593 skb_set_owner_w(ns, skb->sk);
599 /* We'll be holding a back-reference to the skb until Tx Confirmation;
600 * we don't want that overwritten by a concurrent Tx with a cloned skb.
602 skb = skb_unshare(skb, GFP_ATOMIC);
603 if (unlikely(!skb)) {
604 /* skb_unshare() has already freed the skb */
605 percpu_stats->tx_dropped++;
609 /* Setup the FD fields */
610 memset(&fd, 0, sizeof(fd));
612 if (skb_is_nonlinear(skb)) {
613 err = build_sg_fd(priv, skb, &fd);
614 percpu_extras->tx_sg_frames++;
615 percpu_extras->tx_sg_bytes += skb->len;
617 err = build_single_fd(priv, skb, &fd);
621 percpu_stats->tx_dropped++;
626 trace_dpaa2_tx_fd(net_dev, &fd);
628 /* TxConf FQ selection relies on queue id from the stack.
629 * In case of a forwarded frame from another DPNI interface, we choose
630 * a queue affined to the same core that processed the Rx frame
632 queue_mapping = skb_get_queue_mapping(skb);
633 fq = &priv->fq[queue_mapping];
634 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
635 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
641 percpu_extras->tx_portal_busy += i;
642 if (unlikely(err < 0)) {
643 percpu_stats->tx_errors++;
644 /* Clean up everything, including freeing the skb */
645 free_tx_fd(priv, &fd);
647 percpu_stats->tx_packets++;
648 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
660 /* Tx confirmation frame processing routine */
661 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
662 struct dpaa2_eth_channel *ch __always_unused,
663 const struct dpaa2_fd *fd,
664 struct napi_struct *napi __always_unused,
665 u16 queue_id __always_unused)
667 struct rtnl_link_stats64 *percpu_stats;
668 struct dpaa2_eth_drv_stats *percpu_extras;
672 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
674 percpu_extras = this_cpu_ptr(priv->percpu_extras);
675 percpu_extras->tx_conf_frames++;
676 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
678 /* Check frame errors in the FD field */
679 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
680 free_tx_fd(priv, fd);
682 if (likely(!fd_errors))
686 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
689 percpu_stats = this_cpu_ptr(priv->percpu_stats);
690 /* Tx-conf logically pertains to the egress path. */
691 percpu_stats->tx_errors++;
694 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
698 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
699 DPNI_OFF_RX_L3_CSUM, enable);
701 netdev_err(priv->net_dev,
702 "dpni_set_offload(RX_L3_CSUM) failed\n");
706 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
707 DPNI_OFF_RX_L4_CSUM, enable);
709 netdev_err(priv->net_dev,
710 "dpni_set_offload(RX_L4_CSUM) failed\n");
717 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
721 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
722 DPNI_OFF_TX_L3_CSUM, enable);
724 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
728 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
729 DPNI_OFF_TX_L4_CSUM, enable);
731 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
738 /* Free buffers acquired from the buffer pool or which were meant to
739 * be released in the pool
741 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
743 struct device *dev = priv->net_dev->dev.parent;
747 for (i = 0; i < count; i++) {
748 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
749 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
751 skb_free_frag(vaddr);
755 /* Perform a single release command to add buffers
756 * to the specified buffer pool
758 static int add_bufs(struct dpaa2_eth_priv *priv,
759 struct dpaa2_eth_channel *ch, u16 bpid)
761 struct device *dev = priv->net_dev->dev.parent;
762 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
767 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
768 /* Allocate buffer visible to WRIOP + skb shared info +
771 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
775 buf = PTR_ALIGN(buf, priv->rx_buf_align);
777 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
779 if (unlikely(dma_mapping_error(dev, addr)))
785 trace_dpaa2_eth_buf_seed(priv->net_dev,
786 buf, dpaa2_eth_buf_raw_size(priv),
787 addr, DPAA2_ETH_RX_BUF_SIZE,
792 /* In case the portal is busy, retry until successful */
793 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
794 buf_array, i)) == -EBUSY)
797 /* If release command failed, clean up and bail out;
798 * not much else we can do about it
801 free_bufs(priv, buf_array, i);
810 /* If we managed to allocate at least some buffers,
811 * release them to hardware
819 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
824 /* This is the lazy seeding of Rx buffer pools.
825 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
826 * napi_alloc_frag(). The trouble with that is that it in turn ends up
827 * calling this_cpu_ptr(), which mandates execution in atomic context.
828 * Rather than splitting up the code, do a one-off preempt disable.
831 for (j = 0; j < priv->num_channels; j++) {
832 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
833 i += DPAA2_ETH_BUFS_PER_CMD) {
834 new_count = add_bufs(priv, priv->channel[j], bpid);
835 priv->channel[j]->buf_count += new_count;
837 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
849 * Drain the specified number of buffers from the DPNI's private buffer pool.
850 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
852 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
854 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
858 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
861 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
864 free_bufs(priv, buf_array, ret);
868 static void drain_pool(struct dpaa2_eth_priv *priv)
872 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
875 for (i = 0; i < priv->num_channels; i++)
876 priv->channel[i]->buf_count = 0;
879 /* Function is called from softirq context only, so we don't need to guard
880 * the access to percpu count
882 static int refill_pool(struct dpaa2_eth_priv *priv,
883 struct dpaa2_eth_channel *ch,
888 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
892 new_count = add_bufs(priv, ch, bpid);
893 if (unlikely(!new_count)) {
894 /* Out of memory; abort for now, we'll try later on */
897 ch->buf_count += new_count;
898 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
900 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
906 static int pull_channel(struct dpaa2_eth_channel *ch)
911 /* Retry while portal is busy */
913 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
917 } while (err == -EBUSY);
919 ch->stats.dequeue_portal_busy += dequeues;
921 ch->stats.pull_err++;
928 * Frames are dequeued from the QMan channel associated with this NAPI context.
929 * Rx, Tx confirmation and (if configured) Rx error frames all count
930 * towards the NAPI budget.
932 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
934 struct dpaa2_eth_channel *ch;
935 struct dpaa2_eth_priv *priv;
936 int rx_cleaned = 0, txconf_cleaned = 0;
937 enum dpaa2_eth_fq_type type = 0;
941 ch = container_of(napi, struct dpaa2_eth_channel, napi);
945 err = pull_channel(ch);
949 /* Refill pool if appropriate */
950 refill_pool(priv, ch, priv->bpid);
952 store_cleaned = consume_frames(ch, &type);
953 if (type == DPAA2_RX_FQ)
954 rx_cleaned += store_cleaned;
956 txconf_cleaned += store_cleaned;
958 /* If we either consumed the whole NAPI budget with Rx frames
959 * or we reached the Tx confirmations threshold, we're done.
961 if (rx_cleaned >= budget ||
962 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
964 } while (store_cleaned);
966 /* We didn't consume the entire budget, so finish napi and
967 * re-enable data availability notifications
969 napi_complete_done(napi, rx_cleaned);
971 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
973 } while (err == -EBUSY);
974 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
975 ch->nctx.desired_cpu);
977 return max(rx_cleaned, 1);
980 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
982 struct dpaa2_eth_channel *ch;
985 for (i = 0; i < priv->num_channels; i++) {
986 ch = priv->channel[i];
987 napi_enable(&ch->napi);
991 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
993 struct dpaa2_eth_channel *ch;
996 for (i = 0; i < priv->num_channels; i++) {
997 ch = priv->channel[i];
998 napi_disable(&ch->napi);
1002 static int link_state_update(struct dpaa2_eth_priv *priv)
1004 struct dpni_link_state state = {0};
1007 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1008 if (unlikely(err)) {
1009 netdev_err(priv->net_dev,
1010 "dpni_get_link_state() failed\n");
1014 /* Chech link state; speed / duplex changes are not treated yet */
1015 if (priv->link_state.up == state.up)
1018 priv->link_state = state;
1020 netif_carrier_on(priv->net_dev);
1021 netif_tx_start_all_queues(priv->net_dev);
1023 netif_tx_stop_all_queues(priv->net_dev);
1024 netif_carrier_off(priv->net_dev);
1027 netdev_info(priv->net_dev, "Link Event: state %s\n",
1028 state.up ? "up" : "down");
1033 static int dpaa2_eth_open(struct net_device *net_dev)
1035 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1038 err = seed_pool(priv, priv->bpid);
1040 /* Not much to do; the buffer pool, though not filled up,
1041 * may still contain some buffers which would enable us
1044 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1045 priv->dpbp_dev->obj_desc.id, priv->bpid);
1048 /* We'll only start the txqs when the link is actually ready; make sure
1049 * we don't race against the link up notification, which may come
1050 * immediately after dpni_enable();
1052 netif_tx_stop_all_queues(net_dev);
1053 enable_ch_napi(priv);
1054 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1055 * return true and cause 'ip link show' to report the LOWER_UP flag,
1056 * even though the link notification wasn't even received.
1058 netif_carrier_off(net_dev);
1060 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1062 netdev_err(net_dev, "dpni_enable() failed\n");
1066 /* If the DPMAC object has already processed the link up interrupt,
1067 * we have to learn the link state ourselves.
1069 err = link_state_update(priv);
1071 netdev_err(net_dev, "Can't update link state\n");
1072 goto link_state_err;
1079 disable_ch_napi(priv);
1084 /* The DPIO store must be empty when we call this,
1085 * at the end of every NAPI cycle.
1087 static u32 drain_channel(struct dpaa2_eth_channel *ch)
1089 u32 drained = 0, total = 0;
1093 drained = consume_frames(ch, NULL);
1100 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1102 struct dpaa2_eth_channel *ch;
1106 for (i = 0; i < priv->num_channels; i++) {
1107 ch = priv->channel[i];
1108 drained += drain_channel(ch);
1114 static int dpaa2_eth_stop(struct net_device *net_dev)
1116 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1117 int dpni_enabled = 0;
1121 netif_tx_stop_all_queues(net_dev);
1122 netif_carrier_off(net_dev);
1124 /* Loop while dpni_disable() attempts to drain the egress FQs
1125 * and confirm them back to us.
1128 dpni_disable(priv->mc_io, 0, priv->mc_token);
1129 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1131 /* Allow the hardware some slack */
1133 } while (dpni_enabled && --retries);
1135 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1136 /* Must go on and disable NAPI nonetheless, so we don't crash at
1137 * the next "ifconfig up"
1141 /* Wait for NAPI to complete on every core and disable it.
1142 * In particular, this will also prevent NAPI from being rescheduled if
1143 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1144 * don't even need to disarm the channels, except perhaps for the case
1145 * of a huge coalescing value.
1147 disable_ch_napi(priv);
1149 /* Manually drain the Rx and TxConf queues */
1150 drained = drain_ingress_frames(priv);
1152 netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1154 /* Empty the buffer pool */
1160 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1162 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1163 struct device *dev = net_dev->dev.parent;
1166 err = eth_mac_addr(net_dev, addr);
1168 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1172 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1175 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1182 /** Fill in counters maintained by the GPP driver. These may be different from
1183 * the hardware counters obtained by ethtool.
1185 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1186 struct rtnl_link_stats64 *stats)
1188 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1189 struct rtnl_link_stats64 *percpu_stats;
1191 u64 *netstats = (u64 *)stats;
1193 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1195 for_each_possible_cpu(i) {
1196 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1197 cpustats = (u64 *)percpu_stats;
1198 for (j = 0; j < num; j++)
1199 netstats[j] += cpustats[j];
1203 /* Copy mac unicast addresses from @net_dev to @priv.
1204 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1206 static void add_uc_hw_addr(const struct net_device *net_dev,
1207 struct dpaa2_eth_priv *priv)
1209 struct netdev_hw_addr *ha;
1212 netdev_for_each_uc_addr(ha, net_dev) {
1213 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1216 netdev_warn(priv->net_dev,
1217 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1222 /* Copy mac multicast addresses from @net_dev to @priv
1223 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1225 static void add_mc_hw_addr(const struct net_device *net_dev,
1226 struct dpaa2_eth_priv *priv)
1228 struct netdev_hw_addr *ha;
1231 netdev_for_each_mc_addr(ha, net_dev) {
1232 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1235 netdev_warn(priv->net_dev,
1236 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1241 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1243 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1244 int uc_count = netdev_uc_count(net_dev);
1245 int mc_count = netdev_mc_count(net_dev);
1246 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1247 u32 options = priv->dpni_attrs.options;
1248 u16 mc_token = priv->mc_token;
1249 struct fsl_mc_io *mc_io = priv->mc_io;
1252 /* Basic sanity checks; these probably indicate a misconfiguration */
1253 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1254 netdev_info(net_dev,
1255 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1258 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1259 if (uc_count > max_mac) {
1260 netdev_info(net_dev,
1261 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1265 if (mc_count + uc_count > max_mac) {
1266 netdev_info(net_dev,
1267 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1268 uc_count + mc_count, max_mac);
1269 goto force_mc_promisc;
1272 /* Adjust promisc settings due to flag combinations */
1273 if (net_dev->flags & IFF_PROMISC)
1275 if (net_dev->flags & IFF_ALLMULTI) {
1276 /* First, rebuild unicast filtering table. This should be done
1277 * in promisc mode, in order to avoid frame loss while we
1278 * progressively add entries to the table.
1279 * We don't know whether we had been in promisc already, and
1280 * making an MC call to find out is expensive; so set uc promisc
1283 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1285 netdev_warn(net_dev, "Can't set uc promisc\n");
1287 /* Actual uc table reconstruction. */
1288 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1290 netdev_warn(net_dev, "Can't clear uc filters\n");
1291 add_uc_hw_addr(net_dev, priv);
1293 /* Finally, clear uc promisc and set mc promisc as requested. */
1294 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1296 netdev_warn(net_dev, "Can't clear uc promisc\n");
1297 goto force_mc_promisc;
1300 /* Neither unicast, nor multicast promisc will be on... eventually.
1301 * For now, rebuild mac filtering tables while forcing both of them on.
1303 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1305 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1306 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1308 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1310 /* Actual mac filtering tables reconstruction */
1311 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1313 netdev_warn(net_dev, "Can't clear mac filters\n");
1314 add_mc_hw_addr(net_dev, priv);
1315 add_uc_hw_addr(net_dev, priv);
1317 /* Now we can clear both ucast and mcast promisc, without risking
1318 * to drop legitimate frames anymore.
1320 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1322 netdev_warn(net_dev, "Can't clear ucast promisc\n");
1323 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1325 netdev_warn(net_dev, "Can't clear mcast promisc\n");
1330 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1332 netdev_warn(net_dev, "Can't set ucast promisc\n");
1334 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1336 netdev_warn(net_dev, "Can't set mcast promisc\n");
1339 static int dpaa2_eth_set_features(struct net_device *net_dev,
1340 netdev_features_t features)
1342 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1343 netdev_features_t changed = features ^ net_dev->features;
1347 if (changed & NETIF_F_RXCSUM) {
1348 enable = !!(features & NETIF_F_RXCSUM);
1349 err = set_rx_csum(priv, enable);
1354 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1355 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1356 err = set_tx_csum(priv, enable);
1364 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1366 struct dpaa2_eth_priv *priv = netdev_priv(dev);
1367 struct hwtstamp_config config;
1369 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1372 switch (config.tx_type) {
1373 case HWTSTAMP_TX_OFF:
1374 priv->tx_tstamp = false;
1376 case HWTSTAMP_TX_ON:
1377 priv->tx_tstamp = true;
1383 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1384 priv->rx_tstamp = false;
1386 priv->rx_tstamp = true;
1387 /* TS is set for all frame types, not only those requested */
1388 config.rx_filter = HWTSTAMP_FILTER_ALL;
1391 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1395 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1397 if (cmd == SIOCSHWTSTAMP)
1398 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1403 static const struct net_device_ops dpaa2_eth_ops = {
1404 .ndo_open = dpaa2_eth_open,
1405 .ndo_start_xmit = dpaa2_eth_tx,
1406 .ndo_stop = dpaa2_eth_stop,
1407 .ndo_set_mac_address = dpaa2_eth_set_addr,
1408 .ndo_get_stats64 = dpaa2_eth_get_stats,
1409 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1410 .ndo_set_features = dpaa2_eth_set_features,
1411 .ndo_do_ioctl = dpaa2_eth_ioctl,
1414 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1416 struct dpaa2_eth_channel *ch;
1418 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1420 /* Update NAPI statistics */
1423 napi_schedule_irqoff(&ch->napi);
1426 /* Allocate and configure a DPCON object */
1427 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1429 struct fsl_mc_device *dpcon;
1430 struct device *dev = priv->net_dev->dev.parent;
1431 struct dpcon_attr attrs;
1434 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1435 FSL_MC_POOL_DPCON, &dpcon);
1437 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1441 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1443 dev_err(dev, "dpcon_open() failed\n");
1447 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1449 dev_err(dev, "dpcon_reset() failed\n");
1453 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1455 dev_err(dev, "dpcon_get_attributes() failed\n");
1459 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1461 dev_err(dev, "dpcon_enable() failed\n");
1468 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1470 fsl_mc_object_free(dpcon);
1475 static void free_dpcon(struct dpaa2_eth_priv *priv,
1476 struct fsl_mc_device *dpcon)
1478 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1479 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1480 fsl_mc_object_free(dpcon);
1483 static struct dpaa2_eth_channel *
1484 alloc_channel(struct dpaa2_eth_priv *priv)
1486 struct dpaa2_eth_channel *channel;
1487 struct dpcon_attr attr;
1488 struct device *dev = priv->net_dev->dev.parent;
1491 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1495 channel->dpcon = setup_dpcon(priv);
1496 if (!channel->dpcon)
1499 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1502 dev_err(dev, "dpcon_get_attributes() failed\n");
1506 channel->dpcon_id = attr.id;
1507 channel->ch_id = attr.qbman_ch_id;
1508 channel->priv = priv;
1513 free_dpcon(priv, channel->dpcon);
1519 static void free_channel(struct dpaa2_eth_priv *priv,
1520 struct dpaa2_eth_channel *channel)
1522 free_dpcon(priv, channel->dpcon);
1526 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
1527 * and register data availability notifications
1529 static int setup_dpio(struct dpaa2_eth_priv *priv)
1531 struct dpaa2_io_notification_ctx *nctx;
1532 struct dpaa2_eth_channel *channel;
1533 struct dpcon_notification_cfg dpcon_notif_cfg;
1534 struct device *dev = priv->net_dev->dev.parent;
1537 /* We want the ability to spread ingress traffic (RX, TX conf) to as
1538 * many cores as possible, so we need one channel for each core
1539 * (unless there's fewer queues than cores, in which case the extra
1540 * channels would be wasted).
1541 * Allocate one channel per core and register it to the core's
1542 * affine DPIO. If not enough channels are available for all cores
1543 * or if some cores don't have an affine DPIO, there will be no
1544 * ingress frame processing on those cores.
1546 cpumask_clear(&priv->dpio_cpumask);
1547 for_each_online_cpu(i) {
1548 /* Try to allocate a channel */
1549 channel = alloc_channel(priv);
1552 "No affine channel for cpu %d and above\n", i);
1557 priv->channel[priv->num_channels] = channel;
1559 nctx = &channel->nctx;
1562 nctx->id = channel->ch_id;
1563 nctx->desired_cpu = i;
1565 /* Register the new context */
1566 channel->dpio = dpaa2_io_service_select(i);
1567 err = dpaa2_io_service_register(channel->dpio, nctx);
1569 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
1570 /* If no affine DPIO for this core, there's probably
1571 * none available for next cores either. Signal we want
1572 * to retry later, in case the DPIO devices weren't
1575 err = -EPROBE_DEFER;
1576 goto err_service_reg;
1579 /* Register DPCON notification with MC */
1580 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1581 dpcon_notif_cfg.priority = 0;
1582 dpcon_notif_cfg.user_ctx = nctx->qman64;
1583 err = dpcon_set_notification(priv->mc_io, 0,
1584 channel->dpcon->mc_handle,
1587 dev_err(dev, "dpcon_set_notification failed()\n");
1591 /* If we managed to allocate a channel and also found an affine
1592 * DPIO for this core, add it to the final mask
1594 cpumask_set_cpu(i, &priv->dpio_cpumask);
1595 priv->num_channels++;
1597 /* Stop if we already have enough channels to accommodate all
1598 * RX and TX conf queues
1600 if (priv->num_channels == dpaa2_eth_queue_count(priv))
1607 dpaa2_io_service_deregister(channel->dpio, nctx);
1609 free_channel(priv, channel);
1611 if (cpumask_empty(&priv->dpio_cpumask)) {
1612 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1616 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1617 cpumask_pr_args(&priv->dpio_cpumask));
1622 static void free_dpio(struct dpaa2_eth_priv *priv)
1625 struct dpaa2_eth_channel *ch;
1627 /* deregister CDAN notifications and free channels */
1628 for (i = 0; i < priv->num_channels; i++) {
1629 ch = priv->channel[i];
1630 dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
1631 free_channel(priv, ch);
1635 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1638 struct device *dev = priv->net_dev->dev.parent;
1641 for (i = 0; i < priv->num_channels; i++)
1642 if (priv->channel[i]->nctx.desired_cpu == cpu)
1643 return priv->channel[i];
1645 /* We should never get here. Issue a warning and return
1646 * the first channel, because it's still better than nothing
1648 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1650 return priv->channel[0];
1653 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1655 struct device *dev = priv->net_dev->dev.parent;
1656 struct cpumask xps_mask;
1657 struct dpaa2_eth_fq *fq;
1658 int rx_cpu, txc_cpu;
1661 /* For each FQ, pick one channel/CPU to deliver frames to.
1662 * This may well change at runtime, either through irqbalance or
1663 * through direct user intervention.
1665 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1667 for (i = 0; i < priv->num_fqs; i++) {
1671 fq->target_cpu = rx_cpu;
1672 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1673 if (rx_cpu >= nr_cpu_ids)
1674 rx_cpu = cpumask_first(&priv->dpio_cpumask);
1676 case DPAA2_TX_CONF_FQ:
1677 fq->target_cpu = txc_cpu;
1679 /* Tell the stack to affine to txc_cpu the Tx queue
1680 * associated with the confirmation one
1682 cpumask_clear(&xps_mask);
1683 cpumask_set_cpu(txc_cpu, &xps_mask);
1684 err = netif_set_xps_queue(priv->net_dev, &xps_mask,
1687 dev_err(dev, "Error setting XPS queue\n");
1689 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1690 if (txc_cpu >= nr_cpu_ids)
1691 txc_cpu = cpumask_first(&priv->dpio_cpumask);
1694 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1696 fq->channel = get_affine_channel(priv, fq->target_cpu);
1700 static void setup_fqs(struct dpaa2_eth_priv *priv)
1704 /* We have one TxConf FQ per Tx flow.
1705 * The number of Tx and Rx queues is the same.
1706 * Tx queues come first in the fq array.
1708 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1709 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1710 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1711 priv->fq[priv->num_fqs++].flowid = (u16)i;
1714 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1715 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1716 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1717 priv->fq[priv->num_fqs++].flowid = (u16)i;
1720 /* For each FQ, decide on which core to process incoming frames */
1721 set_fq_affinity(priv);
1724 /* Allocate and configure one buffer pool for each interface */
1725 static int setup_dpbp(struct dpaa2_eth_priv *priv)
1728 struct fsl_mc_device *dpbp_dev;
1729 struct device *dev = priv->net_dev->dev.parent;
1730 struct dpbp_attr dpbp_attrs;
1732 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1735 dev_err(dev, "DPBP device allocation failed\n");
1739 priv->dpbp_dev = dpbp_dev;
1741 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1742 &dpbp_dev->mc_handle);
1744 dev_err(dev, "dpbp_open() failed\n");
1748 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
1750 dev_err(dev, "dpbp_reset() failed\n");
1754 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1756 dev_err(dev, "dpbp_enable() failed\n");
1760 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1763 dev_err(dev, "dpbp_get_attributes() failed\n");
1766 priv->bpid = dpbp_attrs.bpid;
1771 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1774 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1776 fsl_mc_object_free(dpbp_dev);
1781 static void free_dpbp(struct dpaa2_eth_priv *priv)
1784 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1785 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1786 fsl_mc_object_free(priv->dpbp_dev);
1789 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
1791 struct device *dev = priv->net_dev->dev.parent;
1792 struct dpni_buffer_layout buf_layout = {0};
1795 /* We need to check for WRIOP version 1.0.0, but depending on the MC
1796 * version, this number is not always provided correctly on rev1.
1797 * We need to check for both alternatives in this situation.
1799 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
1800 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
1801 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
1803 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
1806 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1807 buf_layout.pass_timestamp = true;
1808 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1809 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1810 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1811 DPNI_QUEUE_TX, &buf_layout);
1813 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1817 /* tx-confirm buffer */
1818 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1819 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1820 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
1822 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1826 /* Now that we've set our tx buffer layout, retrieve the minimum
1827 * required tx data offset.
1829 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1830 &priv->tx_data_offset);
1832 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1836 if ((priv->tx_data_offset % 64) != 0)
1837 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1838 priv->tx_data_offset);
1841 buf_layout.pass_frame_status = true;
1842 buf_layout.pass_parser_result = true;
1843 buf_layout.data_align = priv->rx_buf_align;
1844 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
1845 buf_layout.private_data_size = 0;
1846 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1847 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1848 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
1849 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
1850 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1851 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1852 DPNI_QUEUE_RX, &buf_layout);
1854 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1861 /* Configure the DPNI object this interface is associated with */
1862 static int setup_dpni(struct fsl_mc_device *ls_dev)
1864 struct device *dev = &ls_dev->dev;
1865 struct dpaa2_eth_priv *priv;
1866 struct net_device *net_dev;
1869 net_dev = dev_get_drvdata(dev);
1870 priv = netdev_priv(net_dev);
1872 /* get a handle for the DPNI object */
1873 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
1875 dev_err(dev, "dpni_open() failed\n");
1879 /* Check if we can work with this DPNI object */
1880 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
1881 &priv->dpni_ver_minor);
1883 dev_err(dev, "dpni_get_api_version() failed\n");
1886 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
1887 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
1888 priv->dpni_ver_major, priv->dpni_ver_minor,
1889 DPNI_VER_MAJOR, DPNI_VER_MINOR);
1894 ls_dev->mc_io = priv->mc_io;
1895 ls_dev->mc_handle = priv->mc_token;
1897 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1899 dev_err(dev, "dpni_reset() failed\n");
1903 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1906 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1910 err = set_buffer_layout(priv);
1914 priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
1915 dpaa2_eth_fs_count(priv), GFP_KERNEL);
1916 if (!priv->cls_rules)
1922 dpni_close(priv->mc_io, 0, priv->mc_token);
1927 static void free_dpni(struct dpaa2_eth_priv *priv)
1931 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1933 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1936 dpni_close(priv->mc_io, 0, priv->mc_token);
1939 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1940 struct dpaa2_eth_fq *fq)
1942 struct device *dev = priv->net_dev->dev.parent;
1943 struct dpni_queue queue;
1944 struct dpni_queue_id qid;
1945 struct dpni_taildrop td;
1948 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1949 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1951 dev_err(dev, "dpni_get_queue(RX) failed\n");
1955 fq->fqid = qid.fqid;
1957 queue.destination.id = fq->channel->dpcon_id;
1958 queue.destination.type = DPNI_DEST_DPCON;
1959 queue.destination.priority = 1;
1960 queue.user_context = (u64)(uintptr_t)fq;
1961 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1962 DPNI_QUEUE_RX, 0, fq->flowid,
1963 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1966 dev_err(dev, "dpni_set_queue(RX) failed\n");
1971 td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1972 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1973 DPNI_QUEUE_RX, 0, fq->flowid, &td);
1975 dev_err(dev, "dpni_set_threshold() failed\n");
1982 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1983 struct dpaa2_eth_fq *fq)
1985 struct device *dev = priv->net_dev->dev.parent;
1986 struct dpni_queue queue;
1987 struct dpni_queue_id qid;
1990 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1991 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1993 dev_err(dev, "dpni_get_queue(TX) failed\n");
1997 fq->tx_qdbin = qid.qdbin;
1999 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2000 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2003 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2007 fq->fqid = qid.fqid;
2009 queue.destination.id = fq->channel->dpcon_id;
2010 queue.destination.type = DPNI_DEST_DPCON;
2011 queue.destination.priority = 0;
2012 queue.user_context = (u64)(uintptr_t)fq;
2013 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2014 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2015 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2018 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2025 /* Supported header fields for Rx hash distribution key */
2026 static const struct dpaa2_eth_dist_fields dist_fields[] = {
2029 .rxnfc_field = RXH_L2DA,
2030 .cls_prot = NET_PROT_ETH,
2031 .cls_field = NH_FLD_ETH_DA,
2034 .cls_prot = NET_PROT_ETH,
2035 .cls_field = NH_FLD_ETH_SA,
2038 /* This is the last ethertype field parsed:
2039 * depending on frame format, it can be the MAC ethertype
2040 * or the VLAN etype.
2042 .cls_prot = NET_PROT_ETH,
2043 .cls_field = NH_FLD_ETH_TYPE,
2047 .rxnfc_field = RXH_VLAN,
2048 .cls_prot = NET_PROT_VLAN,
2049 .cls_field = NH_FLD_VLAN_TCI,
2053 .rxnfc_field = RXH_IP_SRC,
2054 .cls_prot = NET_PROT_IP,
2055 .cls_field = NH_FLD_IP_SRC,
2058 .rxnfc_field = RXH_IP_DST,
2059 .cls_prot = NET_PROT_IP,
2060 .cls_field = NH_FLD_IP_DST,
2063 .rxnfc_field = RXH_L3_PROTO,
2064 .cls_prot = NET_PROT_IP,
2065 .cls_field = NH_FLD_IP_PROTO,
2068 /* Using UDP ports, this is functionally equivalent to raw
2069 * byte pairs from L4 header.
2071 .rxnfc_field = RXH_L4_B_0_1,
2072 .cls_prot = NET_PROT_UDP,
2073 .cls_field = NH_FLD_UDP_PORT_SRC,
2076 .rxnfc_field = RXH_L4_B_2_3,
2077 .cls_prot = NET_PROT_UDP,
2078 .cls_field = NH_FLD_UDP_PORT_DST,
2083 /* Configure the Rx hash key using the legacy API */
2084 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2086 struct device *dev = priv->net_dev->dev.parent;
2087 struct dpni_rx_tc_dist_cfg dist_cfg;
2090 memset(&dist_cfg, 0, sizeof(dist_cfg));
2092 dist_cfg.key_cfg_iova = key;
2093 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2094 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2096 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2098 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2103 /* Configure the Rx hash key using the new API */
2104 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2106 struct device *dev = priv->net_dev->dev.parent;
2107 struct dpni_rx_dist_cfg dist_cfg;
2110 memset(&dist_cfg, 0, sizeof(dist_cfg));
2112 dist_cfg.key_cfg_iova = key;
2113 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2114 dist_cfg.enable = 1;
2116 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2118 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2123 /* Configure the Rx flow classification key */
2124 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2126 struct device *dev = priv->net_dev->dev.parent;
2127 struct dpni_rx_dist_cfg dist_cfg;
2130 memset(&dist_cfg, 0, sizeof(dist_cfg));
2132 dist_cfg.key_cfg_iova = key;
2133 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2134 dist_cfg.enable = 1;
2136 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2138 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2143 /* Size of the Rx flow classification key */
2144 int dpaa2_eth_cls_key_size(void)
2148 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
2149 size += dist_fields[i].size;
2154 /* Offset of header field in Rx classification key */
2155 int dpaa2_eth_cls_fld_off(int prot, int field)
2159 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2160 if (dist_fields[i].cls_prot == prot &&
2161 dist_fields[i].cls_field == field)
2163 off += dist_fields[i].size;
2166 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2170 /* Set Rx distribution (hash or flow classification) key
2171 * flags is a combination of RXH_ bits
2173 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2174 enum dpaa2_eth_rx_dist type, u64 flags)
2176 struct device *dev = net_dev->dev.parent;
2177 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2178 struct dpkg_profile_cfg cls_cfg;
2179 u32 rx_hash_fields = 0;
2180 dma_addr_t key_iova;
2185 memset(&cls_cfg, 0, sizeof(cls_cfg));
2187 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2188 struct dpkg_extract *key =
2189 &cls_cfg.extracts[cls_cfg.num_extracts];
2191 /* For Rx hashing key we set only the selected fields.
2192 * For Rx flow classification key we set all supported fields
2194 if (type == DPAA2_ETH_RX_DIST_HASH) {
2195 if (!(flags & dist_fields[i].rxnfc_field))
2197 rx_hash_fields |= dist_fields[i].rxnfc_field;
2200 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2201 dev_err(dev, "error adding key extraction rule, too many rules?\n");
2205 key->type = DPKG_EXTRACT_FROM_HDR;
2206 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
2207 key->extract.from_hdr.type = DPKG_FULL_FIELD;
2208 key->extract.from_hdr.field = dist_fields[i].cls_field;
2209 cls_cfg.num_extracts++;
2212 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2216 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2218 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2222 /* Prepare for setting the rx dist */
2223 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2225 if (dma_mapping_error(dev, key_iova)) {
2226 dev_err(dev, "DMA mapping failed\n");
2231 if (type == DPAA2_ETH_RX_DIST_HASH) {
2232 if (dpaa2_eth_has_legacy_dist(priv))
2233 err = config_legacy_hash_key(priv, key_iova);
2235 err = config_hash_key(priv, key_iova);
2237 err = config_cls_key(priv, key_iova);
2240 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2242 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
2243 priv->rx_hash_fields = rx_hash_fields;
2250 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2252 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2254 if (!dpaa2_eth_hash_enabled(priv))
2257 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
2260 static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
2262 struct device *dev = priv->net_dev->dev.parent;
2264 /* Check if we actually support Rx flow classification */
2265 if (dpaa2_eth_has_legacy_dist(priv)) {
2266 dev_dbg(dev, "Rx cls not supported by current MC version\n");
2270 if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
2271 !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
2272 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
2276 if (!dpaa2_eth_hash_enabled(priv)) {
2277 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
2281 priv->rx_cls_enabled = 1;
2283 return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
2286 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2287 * frame queues and channels
2289 static int bind_dpni(struct dpaa2_eth_priv *priv)
2291 struct net_device *net_dev = priv->net_dev;
2292 struct device *dev = net_dev->dev.parent;
2293 struct dpni_pools_cfg pools_params;
2294 struct dpni_error_cfg err_cfg;
2298 pools_params.num_dpbp = 1;
2299 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2300 pools_params.pools[0].backup_pool = 0;
2301 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2302 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2304 dev_err(dev, "dpni_set_pools() failed\n");
2308 /* have the interface implicitly distribute traffic based on
2309 * the default hash key
2311 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
2312 if (err && err != -EOPNOTSUPP)
2313 dev_err(dev, "Failed to configure hashing\n");
2315 /* Configure the flow classification key; it includes all
2316 * supported header fields and cannot be modified at runtime
2318 err = dpaa2_eth_set_cls(priv);
2319 if (err && err != -EOPNOTSUPP)
2320 dev_err(dev, "Failed to configure Rx classification key\n");
2322 /* Configure handling of error frames */
2323 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2324 err_cfg.set_frame_annotation = 1;
2325 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2326 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2329 dev_err(dev, "dpni_set_errors_behavior failed\n");
2333 /* Configure Rx and Tx conf queues to generate CDANs */
2334 for (i = 0; i < priv->num_fqs; i++) {
2335 switch (priv->fq[i].type) {
2337 err = setup_rx_flow(priv, &priv->fq[i]);
2339 case DPAA2_TX_CONF_FQ:
2340 err = setup_tx_flow(priv, &priv->fq[i]);
2343 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2350 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2351 DPNI_QUEUE_TX, &priv->tx_qdid);
2353 dev_err(dev, "dpni_get_qdid() failed\n");
2360 /* Allocate rings for storing incoming frame descriptors */
2361 static int alloc_rings(struct dpaa2_eth_priv *priv)
2363 struct net_device *net_dev = priv->net_dev;
2364 struct device *dev = net_dev->dev.parent;
2367 for (i = 0; i < priv->num_channels; i++) {
2368 priv->channel[i]->store =
2369 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2370 if (!priv->channel[i]->store) {
2371 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2379 for (i = 0; i < priv->num_channels; i++) {
2380 if (!priv->channel[i]->store)
2382 dpaa2_io_store_destroy(priv->channel[i]->store);
2388 static void free_rings(struct dpaa2_eth_priv *priv)
2392 for (i = 0; i < priv->num_channels; i++)
2393 dpaa2_io_store_destroy(priv->channel[i]->store);
2396 static int set_mac_addr(struct dpaa2_eth_priv *priv)
2398 struct net_device *net_dev = priv->net_dev;
2399 struct device *dev = net_dev->dev.parent;
2400 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2403 /* Get firmware address, if any */
2404 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2406 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2410 /* Get DPNI attributes address, if any */
2411 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2414 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
2418 /* First check if firmware has any address configured by bootloader */
2419 if (!is_zero_ether_addr(mac_addr)) {
2420 /* If the DPMAC addr != DPNI addr, update it */
2421 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2422 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2426 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2430 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2431 } else if (is_zero_ether_addr(dpni_mac_addr)) {
2432 /* No MAC address configured, fill in net_dev->dev_addr
2435 eth_hw_addr_random(net_dev);
2436 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2438 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2441 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2445 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2446 * practical purposes, this will be our "permanent" mac address,
2447 * at least until the next reboot. This move will also permit
2448 * register_netdevice() to properly fill up net_dev->perm_addr.
2450 net_dev->addr_assign_type = NET_ADDR_PERM;
2452 /* NET_ADDR_PERM is default, all we have to do is
2453 * fill in the device addr.
2455 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2461 static int netdev_init(struct net_device *net_dev)
2463 struct device *dev = net_dev->dev.parent;
2464 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2465 u32 options = priv->dpni_attrs.options;
2466 u64 supported = 0, not_supported = 0;
2467 u8 bcast_addr[ETH_ALEN];
2471 net_dev->netdev_ops = &dpaa2_eth_ops;
2472 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2474 err = set_mac_addr(priv);
2478 /* Explicitly add the broadcast address to the MAC filtering table */
2479 eth_broadcast_addr(bcast_addr);
2480 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2482 dev_err(dev, "dpni_add_mac_addr() failed\n");
2486 /* Set MTU upper limit; lower limit is 68B (default value) */
2487 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2488 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2491 dev_err(dev, "dpni_set_max_frame_length() failed\n");
2495 /* Set actual number of queues in the net device */
2496 num_queues = dpaa2_eth_queue_count(priv);
2497 err = netif_set_real_num_tx_queues(net_dev, num_queues);
2499 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2502 err = netif_set_real_num_rx_queues(net_dev, num_queues);
2504 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2508 /* Capabilities listing */
2509 supported |= IFF_LIVE_ADDR_CHANGE;
2511 if (options & DPNI_OPT_NO_MAC_FILTER)
2512 not_supported |= IFF_UNICAST_FLT;
2514 supported |= IFF_UNICAST_FLT;
2516 net_dev->priv_flags |= supported;
2517 net_dev->priv_flags &= ~not_supported;
2520 net_dev->features = NETIF_F_RXCSUM |
2521 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2522 NETIF_F_SG | NETIF_F_HIGHDMA |
2524 net_dev->hw_features = net_dev->features;
2529 static int poll_link_state(void *arg)
2531 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2534 while (!kthread_should_stop()) {
2535 err = link_state_update(priv);
2539 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2545 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2548 struct device *dev = (struct device *)arg;
2549 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2550 struct net_device *net_dev = dev_get_drvdata(dev);
2553 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2554 DPNI_IRQ_INDEX, &status);
2555 if (unlikely(err)) {
2556 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
2560 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
2561 link_state_update(netdev_priv(net_dev));
2566 static int setup_irqs(struct fsl_mc_device *ls_dev)
2569 struct fsl_mc_device_irq *irq;
2571 err = fsl_mc_allocate_irqs(ls_dev);
2573 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2577 irq = ls_dev->irqs[0];
2578 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2579 NULL, dpni_irq0_handler_thread,
2580 IRQF_NO_SUSPEND | IRQF_ONESHOT,
2581 dev_name(&ls_dev->dev), &ls_dev->dev);
2583 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
2587 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2588 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2590 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
2594 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2597 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
2604 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2606 fsl_mc_free_irqs(ls_dev);
2611 static void add_ch_napi(struct dpaa2_eth_priv *priv)
2614 struct dpaa2_eth_channel *ch;
2616 for (i = 0; i < priv->num_channels; i++) {
2617 ch = priv->channel[i];
2618 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2619 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2624 static void del_ch_napi(struct dpaa2_eth_priv *priv)
2627 struct dpaa2_eth_channel *ch;
2629 for (i = 0; i < priv->num_channels; i++) {
2630 ch = priv->channel[i];
2631 netif_napi_del(&ch->napi);
2635 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2638 struct net_device *net_dev = NULL;
2639 struct dpaa2_eth_priv *priv = NULL;
2642 dev = &dpni_dev->dev;
2645 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2647 dev_err(dev, "alloc_etherdev_mq() failed\n");
2651 SET_NETDEV_DEV(net_dev, dev);
2652 dev_set_drvdata(dev, net_dev);
2654 priv = netdev_priv(net_dev);
2655 priv->net_dev = net_dev;
2657 priv->iommu_domain = iommu_get_domain_for_dev(dev);
2659 /* Obtain a MC portal */
2660 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2664 err = -EPROBE_DEFER;
2666 dev_err(dev, "MC portal allocation failed\n");
2667 goto err_portal_alloc;
2670 /* MC objects initialization and configuration */
2671 err = setup_dpni(dpni_dev);
2673 goto err_dpni_setup;
2675 err = setup_dpio(priv);
2677 goto err_dpio_setup;
2681 err = setup_dpbp(priv);
2683 goto err_dpbp_setup;
2685 err = bind_dpni(priv);
2689 /* Add a NAPI context for each channel */
2692 /* Percpu statistics */
2693 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2694 if (!priv->percpu_stats) {
2695 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2697 goto err_alloc_percpu_stats;
2699 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2700 if (!priv->percpu_extras) {
2701 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2703 goto err_alloc_percpu_extras;
2706 err = netdev_init(net_dev);
2708 goto err_netdev_init;
2710 /* Configure checksum offload based on current interface flags */
2711 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2715 err = set_tx_csum(priv, !!(net_dev->features &
2716 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2720 err = alloc_rings(priv);
2722 goto err_alloc_rings;
2724 err = setup_irqs(dpni_dev);
2726 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2727 priv->poll_thread = kthread_run(poll_link_state, priv,
2728 "%s_poll_link", net_dev->name);
2729 if (IS_ERR(priv->poll_thread)) {
2730 dev_err(dev, "Error starting polling thread\n");
2731 goto err_poll_thread;
2733 priv->do_link_poll = true;
2736 err = register_netdev(net_dev);
2738 dev_err(dev, "register_netdev() failed\n");
2739 goto err_netdev_reg;
2742 dev_info(dev, "Probed interface %s\n", net_dev->name);
2746 if (priv->do_link_poll)
2747 kthread_stop(priv->poll_thread);
2749 fsl_mc_free_irqs(dpni_dev);
2755 free_percpu(priv->percpu_extras);
2756 err_alloc_percpu_extras:
2757 free_percpu(priv->percpu_stats);
2758 err_alloc_percpu_stats:
2767 fsl_mc_portal_free(priv->mc_io);
2769 dev_set_drvdata(dev, NULL);
2770 free_netdev(net_dev);
2775 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2778 struct net_device *net_dev;
2779 struct dpaa2_eth_priv *priv;
2782 net_dev = dev_get_drvdata(dev);
2783 priv = netdev_priv(net_dev);
2785 unregister_netdev(net_dev);
2787 if (priv->do_link_poll)
2788 kthread_stop(priv->poll_thread);
2790 fsl_mc_free_irqs(ls_dev);
2793 free_percpu(priv->percpu_stats);
2794 free_percpu(priv->percpu_extras);
2801 fsl_mc_portal_free(priv->mc_io);
2803 free_netdev(net_dev);
2805 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2810 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2812 .vendor = FSL_MC_VENDOR_FREESCALE,
2817 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2819 static struct fsl_mc_driver dpaa2_eth_driver = {
2821 .name = KBUILD_MODNAME,
2822 .owner = THIS_MODULE,
2824 .probe = dpaa2_eth_probe,
2825 .remove = dpaa2_eth_remove,
2826 .match_id_table = dpaa2_eth_match_id_table
2829 module_fsl_mc_driver(dpaa2_eth_driver);