2 * Copyright (C) 2015 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/init.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/interrupt.h>
52 #include <linux/ipv6.h>
53 #include <linux/page_ref.h>
54 #include <linux/pci.h>
55 #include <linux/pci_regs.h>
56 #include <linux/msi.h>
57 #include <linux/ethtool.h>
58 #include <linux/log2.h>
59 #include <linux/if_vlan.h>
60 #include <linux/random.h>
62 #include <linux/ktime.h>
64 #include <net/pkt_cls.h>
65 #include <net/vxlan.h>
67 #include "nfp_net_ctrl.h"
71 * nfp_net_get_fw_version() - Read and parse the FW version
72 * @fw_ver: Output fw_version structure to read to
73 * @ctrl_bar: Mapped address of the control BAR
75 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
76 void __iomem *ctrl_bar)
80 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
81 put_unaligned_le32(reg, fw_ver);
85 nfp_net_dma_map_rx(struct nfp_net *nn, void *frag, unsigned int bufsz,
88 return dma_map_single(&nn->pdev->dev, frag + NFP_NET_RX_BUF_HEADROOM,
89 bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
93 nfp_net_dma_unmap_rx(struct nfp_net *nn, dma_addr_t dma_addr,
94 unsigned int bufsz, int direction)
96 dma_unmap_single(&nn->pdev->dev, dma_addr,
97 bufsz - NFP_NET_RX_BUF_NON_DATA, direction);
102 * Firmware reconfig may take a while so we have two versions of it -
103 * synchronous and asynchronous (posted). All synchronous callers are holding
104 * RTNL so we don't have to worry about serializing them.
106 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
108 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
109 /* ensure update is written before pinging HW */
111 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
114 /* Pass 0 as update to run posted reconfigs. */
115 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
117 update |= nn->reconfig_posted;
118 nn->reconfig_posted = 0;
120 nfp_net_reconfig_start(nn, update);
122 nn->reconfig_timer_active = true;
123 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
126 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
130 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
133 if (reg & NFP_NET_CFG_UPDATE_ERR) {
134 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
136 } else if (last_check) {
137 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
144 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
146 bool timed_out = false;
148 /* Poll update field, waiting for NFP to ack the config */
149 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
151 timed_out = time_is_before_eq_jiffies(deadline);
154 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
157 return timed_out ? -EIO : 0;
160 static void nfp_net_reconfig_timer(unsigned long data)
162 struct nfp_net *nn = (void *)data;
164 spin_lock_bh(&nn->reconfig_lock);
166 nn->reconfig_timer_active = false;
168 /* If sync caller is present it will take over from us */
169 if (nn->reconfig_sync_present)
172 /* Read reconfig status and report errors */
173 nfp_net_reconfig_check_done(nn, true);
175 if (nn->reconfig_posted)
176 nfp_net_reconfig_start_async(nn, 0);
178 spin_unlock_bh(&nn->reconfig_lock);
182 * nfp_net_reconfig_post() - Post async reconfig request
183 * @nn: NFP Net device to reconfigure
184 * @update: The value for the update field in the BAR config
186 * Record FW reconfiguration request. Reconfiguration will be kicked off
187 * whenever reconfiguration machinery is idle. Multiple requests can be
190 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
192 spin_lock_bh(&nn->reconfig_lock);
194 /* Sync caller will kick off async reconf when it's done, just post */
195 if (nn->reconfig_sync_present) {
196 nn->reconfig_posted |= update;
200 /* Opportunistically check if the previous command is done */
201 if (!nn->reconfig_timer_active ||
202 nfp_net_reconfig_check_done(nn, false))
203 nfp_net_reconfig_start_async(nn, update);
205 nn->reconfig_posted |= update;
207 spin_unlock_bh(&nn->reconfig_lock);
211 * nfp_net_reconfig() - Reconfigure the firmware
212 * @nn: NFP Net device to reconfigure
213 * @update: The value for the update field in the BAR config
215 * Write the update word to the BAR and ping the reconfig queue. The
216 * poll until the firmware has acknowledged the update by zeroing the
219 * Return: Negative errno on error, 0 on success
221 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
223 bool cancelled_timer = false;
224 u32 pre_posted_requests;
227 spin_lock_bh(&nn->reconfig_lock);
229 nn->reconfig_sync_present = true;
231 if (nn->reconfig_timer_active) {
232 del_timer(&nn->reconfig_timer);
233 nn->reconfig_timer_active = false;
234 cancelled_timer = true;
236 pre_posted_requests = nn->reconfig_posted;
237 nn->reconfig_posted = 0;
239 spin_unlock_bh(&nn->reconfig_lock);
242 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
244 /* Run the posted reconfigs which were issued before we started */
245 if (pre_posted_requests) {
246 nfp_net_reconfig_start(nn, pre_posted_requests);
247 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
250 nfp_net_reconfig_start(nn, update);
251 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
253 spin_lock_bh(&nn->reconfig_lock);
255 if (nn->reconfig_posted)
256 nfp_net_reconfig_start_async(nn, 0);
258 nn->reconfig_sync_present = false;
260 spin_unlock_bh(&nn->reconfig_lock);
265 /* Interrupt configuration and handling
269 * nfp_net_irq_unmask() - Unmask automasked interrupt
270 * @nn: NFP Network structure
271 * @entry_nr: MSI-X table entry
273 * Clear the ICR for the IRQ entry.
275 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
277 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
282 * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
283 * @nn: NFP Network structure
284 * @nr_vecs: Number of MSI-X vectors to allocate
286 * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
288 * Return: Number of MSI-X vectors obtained or 0 on error.
290 static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
292 struct pci_dev *pdev = nn->pdev;
296 for (i = 0; i < nr_vecs; i++)
297 nn->irq_entries[i].entry = i;
299 nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
300 NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
302 nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
303 NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
311 * nfp_net_irqs_alloc() - allocates MSI-X irqs
312 * @nn: NFP Network structure
314 * Return: Number of irqs obtained or 0 on error.
316 int nfp_net_irqs_alloc(struct nfp_net *nn)
321 wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS;
323 n = nfp_net_msix_alloc(nn, wanted_irqs);
325 nn_err(nn, "Failed to allocate MSI-X IRQs\n");
329 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
330 nn->num_r_vecs = nn->max_r_vecs;
333 nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
340 * nfp_net_irqs_disable() - Disable interrupts
341 * @nn: NFP Network structure
343 * Undoes what @nfp_net_irqs_alloc() does.
345 void nfp_net_irqs_disable(struct nfp_net *nn)
347 pci_disable_msix(nn->pdev);
351 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
353 * @data: Opaque data structure
355 * Return: Indicate if the interrupt has been handled.
357 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
359 struct nfp_net_r_vector *r_vec = data;
361 napi_schedule_irqoff(&r_vec->napi);
363 /* The FW auto-masks any interrupt, either via the MASK bit in
364 * the MSI-X table or via the per entry ICR field. So there
365 * is no need to disable interrupts here.
371 * nfp_net_read_link_status() - Reread link status from control BAR
372 * @nn: NFP Network structure
374 static void nfp_net_read_link_status(struct nfp_net *nn)
380 spin_lock_irqsave(&nn->link_status_lock, flags);
382 sts = nn_readl(nn, NFP_NET_CFG_STS);
383 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
385 if (nn->link_up == link_up)
388 nn->link_up = link_up;
391 netif_carrier_on(nn->netdev);
392 netdev_info(nn->netdev, "NIC Link is Up\n");
394 netif_carrier_off(nn->netdev);
395 netdev_info(nn->netdev, "NIC Link is Down\n");
398 spin_unlock_irqrestore(&nn->link_status_lock, flags);
402 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
404 * @data: Opaque data structure
406 * Return: Indicate if the interrupt has been handled.
408 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
410 struct nfp_net *nn = data;
412 nfp_net_read_link_status(nn);
414 nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
420 * nfp_net_irq_exn() - Interrupt service routine for exceptions
422 * @data: Opaque data structure
424 * Return: Indicate if the interrupt has been handled.
426 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
428 struct nfp_net *nn = data;
430 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
431 /* XXX TO BE IMPLEMENTED */
436 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
437 * @tx_ring: TX ring structure
438 * @r_vec: IRQ vector servicing this ring
442 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
443 struct nfp_net_r_vector *r_vec, unsigned int idx)
445 struct nfp_net *nn = r_vec->nfp_net;
448 tx_ring->r_vec = r_vec;
450 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
451 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
455 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
456 * @rx_ring: RX ring structure
457 * @r_vec: IRQ vector servicing this ring
461 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
462 struct nfp_net_r_vector *r_vec, unsigned int idx)
464 struct nfp_net *nn = r_vec->nfp_net;
467 rx_ring->r_vec = r_vec;
469 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
470 rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
472 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
473 rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
477 * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
478 * @netdev: netdev structure
480 static void nfp_net_irqs_assign(struct net_device *netdev)
482 struct nfp_net *nn = netdev_priv(netdev);
483 struct nfp_net_r_vector *r_vec;
486 if (nn->num_rx_rings > nn->num_r_vecs ||
487 nn->num_tx_rings > nn->num_r_vecs)
488 nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
489 nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
491 nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
492 nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
494 nn->lsc_handler = nfp_net_irq_lsc;
495 nn->exn_handler = nfp_net_irq_exn;
497 for (r = 0; r < nn->num_r_vecs; r++) {
498 r_vec = &nn->r_vecs[r];
500 r_vec->handler = nfp_net_irq_rxtx;
501 r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
503 cpumask_set_cpu(r, &r_vec->affinity_mask);
508 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
509 * @nn: NFP Network structure
510 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
511 * @format: printf-style format to construct the interrupt name
512 * @name: Pointer to allocated space for interrupt name
513 * @name_sz: Size of space for interrupt name
514 * @vector_idx: Index of MSI-X vector used for this interrupt
515 * @handler: IRQ handler to register for this interrupt
518 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
519 const char *format, char *name, size_t name_sz,
520 unsigned int vector_idx, irq_handler_t handler)
522 struct msix_entry *entry;
525 entry = &nn->irq_entries[vector_idx];
527 snprintf(name, name_sz, format, netdev_name(nn->netdev));
528 err = request_irq(entry->vector, handler, 0, name, nn);
530 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
534 nn_writeb(nn, ctrl_offset, vector_idx);
540 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
541 * @nn: NFP Network structure
542 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
543 * @vector_idx: Index of MSI-X vector used for this interrupt
545 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
546 unsigned int vector_idx)
548 nn_writeb(nn, ctrl_offset, 0xff);
549 free_irq(nn->irq_entries[vector_idx].vector, nn);
554 * One queue controller peripheral queue is used for transmit. The
555 * driver en-queues packets for transmit by advancing the write
556 * pointer. The device indicates that packets have transmitted by
557 * advancing the read pointer. The driver maintains a local copy of
558 * the read and write pointer in @struct nfp_net_tx_ring. The driver
559 * keeps @wr_p in sync with the queue controller write pointer and can
560 * determine how many packets have been transmitted by comparing its
561 * copy of the read pointer @rd_p with the read pointer maintained by
562 * the queue controller peripheral.
566 * nfp_net_tx_full() - Check if the TX ring is full
567 * @tx_ring: TX ring to check
568 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
570 * This function checks, based on the *host copy* of read/write
571 * pointer if a given TX ring is full. The real TX queue may have
572 * some newly made available slots.
574 * Return: True if the ring is full.
576 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
578 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
581 /* Wrappers for deciding when to stop and restart TX queues */
582 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
584 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
587 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
589 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
593 * nfp_net_tx_ring_stop() - stop tx ring
594 * @nd_q: netdev queue
595 * @tx_ring: driver tx queue structure
597 * Safely stop TX ring. Remember that while we are running .start_xmit()
598 * someone else may be cleaning the TX ring completions so we need to be
599 * extra careful here.
601 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
602 struct nfp_net_tx_ring *tx_ring)
604 netif_tx_stop_queue(nd_q);
606 /* We can race with the TX completion out of NAPI so recheck */
608 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
609 netif_tx_start_queue(nd_q);
613 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
614 * @nn: NFP Net device
615 * @r_vec: per-ring structure
616 * @txbuf: Pointer to driver soft TX descriptor
617 * @txd: Pointer to HW TX descriptor
618 * @skb: Pointer to SKB
620 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
621 * Return error on packet header greater than maximum supported LSO header size.
623 static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
624 struct nfp_net_tx_buf *txbuf,
625 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
630 if (!skb_is_gso(skb))
633 if (!skb->encapsulation)
634 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
636 hdrlen = skb_inner_transport_header(skb) - skb->data +
637 inner_tcp_hdrlen(skb);
639 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
640 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
642 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
643 txd->l4_offset = hdrlen;
644 txd->mss = cpu_to_le16(mss);
645 txd->flags |= PCIE_DESC_TX_LSO;
647 u64_stats_update_begin(&r_vec->tx_sync);
649 u64_stats_update_end(&r_vec->tx_sync);
653 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
654 * @nn: NFP Net device
655 * @r_vec: per-ring structure
656 * @txbuf: Pointer to driver soft TX descriptor
657 * @txd: Pointer to TX descriptor
658 * @skb: Pointer to SKB
660 * This function sets the TX checksum flags in the TX descriptor based
661 * on the configuration and the protocol of the packet to be transmitted.
663 static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
664 struct nfp_net_tx_buf *txbuf,
665 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
667 struct ipv6hdr *ipv6h;
671 if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
674 if (skb->ip_summed != CHECKSUM_PARTIAL)
677 txd->flags |= PCIE_DESC_TX_CSUM;
678 if (skb->encapsulation)
679 txd->flags |= PCIE_DESC_TX_ENCAP;
681 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
682 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
684 if (iph->version == 4) {
685 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
686 l4_hdr = iph->protocol;
687 } else if (ipv6h->version == 6) {
688 l4_hdr = ipv6h->nexthdr;
690 nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
697 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
700 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
703 nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
708 u64_stats_update_begin(&r_vec->tx_sync);
709 if (skb->encapsulation)
710 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
712 r_vec->hw_csum_tx += txbuf->pkt_cnt;
713 u64_stats_update_end(&r_vec->tx_sync);
717 * nfp_net_tx() - Main transmit entry point
718 * @skb: SKB to transmit
719 * @netdev: netdev structure
721 * Return: NETDEV_TX_OK on success.
723 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
725 struct nfp_net *nn = netdev_priv(netdev);
726 const struct skb_frag_struct *frag;
727 struct nfp_net_r_vector *r_vec;
728 struct nfp_net_tx_desc *txd, txdg;
729 struct nfp_net_tx_buf *txbuf;
730 struct nfp_net_tx_ring *tx_ring;
731 struct netdev_queue *nd_q;
738 qidx = skb_get_queue_mapping(skb);
739 tx_ring = &nn->tx_rings[qidx];
740 r_vec = tx_ring->r_vec;
741 nd_q = netdev_get_tx_queue(nn->netdev, qidx);
743 nr_frags = skb_shinfo(skb)->nr_frags;
745 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
746 nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
747 qidx, tx_ring->wr_p, tx_ring->rd_p);
748 netif_tx_stop_queue(nd_q);
749 u64_stats_update_begin(&r_vec->tx_sync);
751 u64_stats_update_end(&r_vec->tx_sync);
752 return NETDEV_TX_BUSY;
755 /* Start with the head skbuf */
756 dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
758 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
761 wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
763 /* Stash the soft descriptor of the head then initialize it */
764 txbuf = &tx_ring->txbufs[wr_idx];
766 txbuf->dma_addr = dma_addr;
769 txbuf->real_len = skb->len;
771 /* Build TX descriptor */
772 txd = &tx_ring->txds[wr_idx];
773 txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
774 txd->dma_len = cpu_to_le16(skb_headlen(skb));
775 nfp_desc_set_dma_addr(txd, dma_addr);
776 txd->data_len = cpu_to_le16(skb->len);
782 nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
784 nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
786 if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
787 txd->flags |= PCIE_DESC_TX_VLAN;
788 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
793 /* all descs must match except for in addr, length and eop */
796 for (f = 0; f < nr_frags; f++) {
797 frag = &skb_shinfo(skb)->frags[f];
798 fsize = skb_frag_size(frag);
800 dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
801 fsize, DMA_TO_DEVICE);
802 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
805 wr_idx = (wr_idx + 1) & (tx_ring->cnt - 1);
806 tx_ring->txbufs[wr_idx].skb = skb;
807 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
808 tx_ring->txbufs[wr_idx].fidx = f;
810 txd = &tx_ring->txds[wr_idx];
812 txd->dma_len = cpu_to_le16(fsize);
813 nfp_desc_set_dma_addr(txd, dma_addr);
815 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
818 u64_stats_update_begin(&r_vec->tx_sync);
820 u64_stats_update_end(&r_vec->tx_sync);
823 netdev_tx_sent_queue(nd_q, txbuf->real_len);
825 tx_ring->wr_p += nr_frags + 1;
826 if (nfp_net_tx_ring_should_stop(tx_ring))
827 nfp_net_tx_ring_stop(nd_q, tx_ring);
829 tx_ring->wr_ptr_add += nr_frags + 1;
830 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
831 /* force memory write before we let HW know */
833 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
834 tx_ring->wr_ptr_add = 0;
837 skb_tx_timestamp(skb);
844 frag = &skb_shinfo(skb)->frags[f];
845 dma_unmap_page(&nn->pdev->dev,
846 tx_ring->txbufs[wr_idx].dma_addr,
847 skb_frag_size(frag), DMA_TO_DEVICE);
848 tx_ring->txbufs[wr_idx].skb = NULL;
849 tx_ring->txbufs[wr_idx].dma_addr = 0;
850 tx_ring->txbufs[wr_idx].fidx = -2;
853 wr_idx += tx_ring->cnt;
855 dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
856 skb_headlen(skb), DMA_TO_DEVICE);
857 tx_ring->txbufs[wr_idx].skb = NULL;
858 tx_ring->txbufs[wr_idx].dma_addr = 0;
859 tx_ring->txbufs[wr_idx].fidx = -2;
861 nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
862 u64_stats_update_begin(&r_vec->tx_sync);
864 u64_stats_update_end(&r_vec->tx_sync);
865 dev_kfree_skb_any(skb);
870 * nfp_net_tx_complete() - Handled completed TX packets
871 * @tx_ring: TX ring structure
873 * Return: Number of completed TX descriptors
875 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
877 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
878 struct nfp_net *nn = r_vec->nfp_net;
879 const struct skb_frag_struct *frag;
880 struct netdev_queue *nd_q;
881 u32 done_pkts = 0, done_bytes = 0;
888 /* Work out how many descriptors have been transmitted */
889 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
891 if (qcp_rd_p == tx_ring->qcp_rd_p)
894 if (qcp_rd_p > tx_ring->qcp_rd_p)
895 todo = qcp_rd_p - tx_ring->qcp_rd_p;
897 todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
900 idx = tx_ring->rd_p & (tx_ring->cnt - 1);
903 skb = tx_ring->txbufs[idx].skb;
907 nr_frags = skb_shinfo(skb)->nr_frags;
908 fidx = tx_ring->txbufs[idx].fidx;
912 dma_unmap_single(&nn->pdev->dev,
913 tx_ring->txbufs[idx].dma_addr,
914 skb_headlen(skb), DMA_TO_DEVICE);
916 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
917 done_bytes += tx_ring->txbufs[idx].real_len;
920 frag = &skb_shinfo(skb)->frags[fidx];
921 dma_unmap_page(&nn->pdev->dev,
922 tx_ring->txbufs[idx].dma_addr,
923 skb_frag_size(frag), DMA_TO_DEVICE);
926 /* check for last gather fragment */
927 if (fidx == nr_frags - 1)
928 dev_kfree_skb_any(skb);
930 tx_ring->txbufs[idx].dma_addr = 0;
931 tx_ring->txbufs[idx].skb = NULL;
932 tx_ring->txbufs[idx].fidx = -2;
935 tx_ring->qcp_rd_p = qcp_rd_p;
937 u64_stats_update_begin(&r_vec->tx_sync);
938 r_vec->tx_bytes += done_bytes;
939 r_vec->tx_pkts += done_pkts;
940 u64_stats_update_end(&r_vec->tx_sync);
942 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
943 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
944 if (nfp_net_tx_ring_should_wake(tx_ring)) {
945 /* Make sure TX thread will see updated tx_ring->rd_p */
948 if (unlikely(netif_tx_queue_stopped(nd_q)))
949 netif_tx_wake_queue(nd_q);
952 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
953 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
954 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
958 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
959 * @nn: NFP Net device
960 * @tx_ring: TX ring structure
962 * Assumes that the device is stopped
965 nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
967 const struct skb_frag_struct *frag;
968 struct netdev_queue *nd_q;
969 struct pci_dev *pdev = nn->pdev;
971 while (tx_ring->rd_p != tx_ring->wr_p) {
972 int nr_frags, fidx, idx;
975 idx = tx_ring->rd_p & (tx_ring->cnt - 1);
976 skb = tx_ring->txbufs[idx].skb;
977 nr_frags = skb_shinfo(skb)->nr_frags;
978 fidx = tx_ring->txbufs[idx].fidx;
982 dma_unmap_single(&pdev->dev,
983 tx_ring->txbufs[idx].dma_addr,
984 skb_headlen(skb), DMA_TO_DEVICE);
987 frag = &skb_shinfo(skb)->frags[fidx];
988 dma_unmap_page(&pdev->dev,
989 tx_ring->txbufs[idx].dma_addr,
990 skb_frag_size(frag), DMA_TO_DEVICE);
993 /* check for last gather fragment */
994 if (fidx == nr_frags - 1)
995 dev_kfree_skb_any(skb);
997 tx_ring->txbufs[idx].dma_addr = 0;
998 tx_ring->txbufs[idx].skb = NULL;
999 tx_ring->txbufs[idx].fidx = -2;
1001 tx_ring->qcp_rd_p++;
1005 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1008 tx_ring->qcp_rd_p = 0;
1009 tx_ring->wr_ptr_add = 0;
1011 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1012 netdev_tx_reset_queue(nd_q);
1015 static void nfp_net_tx_timeout(struct net_device *netdev)
1017 struct nfp_net *nn = netdev_priv(netdev);
1020 for (i = 0; i < nn->num_tx_rings; i++) {
1021 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1023 nn_warn(nn, "TX timeout on ring: %d\n", i);
1025 nn_warn(nn, "TX watchdog timeout\n");
1028 /* Receive processing
1031 nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
1033 unsigned int fl_bufsz;
1035 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1036 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1037 fl_bufsz += NFP_NET_MAX_PREPEND;
1039 fl_bufsz += nn->rx_offset;
1040 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + mtu;
1042 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1043 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1049 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1050 * @rx_ring: RX ring structure of the skb
1051 * @dma_addr: Pointer to storage for DMA address (output param)
1052 * @fl_bufsz: size of freelist buffers
1054 * This function will allcate a new page frag, map it for DMA.
1056 * Return: allocated page frag or NULL on failure.
1059 nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
1060 unsigned int fl_bufsz)
1062 struct nfp_net *nn = rx_ring->r_vec->nfp_net;
1065 frag = netdev_alloc_frag(fl_bufsz);
1067 nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
1071 *dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, DMA_FROM_DEVICE);
1072 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
1073 skb_free_frag(frag);
1074 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
1081 static void *nfp_net_napi_alloc_one(struct nfp_net *nn, dma_addr_t *dma_addr)
1085 frag = napi_alloc_frag(nn->fl_bufsz);
1087 nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
1091 *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, DMA_FROM_DEVICE);
1092 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
1093 skb_free_frag(frag);
1094 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
1102 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1103 * @rx_ring: RX ring structure
1104 * @frag: page fragment buffer
1105 * @dma_addr: DMA address of skb mapping
1107 static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
1108 void *frag, dma_addr_t dma_addr)
1110 unsigned int wr_idx;
1112 wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
1114 /* Stash SKB and DMA address away */
1115 rx_ring->rxbufs[wr_idx].frag = frag;
1116 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1118 /* Fill freelist descriptor */
1119 rx_ring->rxds[wr_idx].fld.reserved = 0;
1120 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1121 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
1124 rx_ring->wr_ptr_add++;
1125 if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
1126 /* Update write pointer of the freelist queue. Make
1127 * sure all writes are flushed before telling the hardware.
1130 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
1131 rx_ring->wr_ptr_add = 0;
1136 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1137 * @rx_ring: RX ring structure
1139 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1140 * (i.e. device was not enabled)!
1142 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1144 unsigned int wr_idx, last_idx;
1146 /* Move the empty entry to the end of the list */
1147 wr_idx = rx_ring->wr_p & (rx_ring->cnt - 1);
1148 last_idx = rx_ring->cnt - 1;
1149 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1150 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1151 rx_ring->rxbufs[last_idx].dma_addr = 0;
1152 rx_ring->rxbufs[last_idx].frag = NULL;
1154 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1157 rx_ring->wr_ptr_add = 0;
1161 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1162 * @nn: NFP Net device
1163 * @rx_ring: RX ring to remove buffers from
1165 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1166 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1167 * to restore required ring geometry.
1170 nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1174 for (i = 0; i < rx_ring->cnt - 1; i++) {
1175 /* NULL skb can only happen when initial filling of the ring
1176 * fails to allocate enough buffers and calls here to free
1177 * already allocated ones.
1179 if (!rx_ring->rxbufs[i].frag)
1182 nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
1183 rx_ring->bufsz, DMA_FROM_DEVICE);
1184 skb_free_frag(rx_ring->rxbufs[i].frag);
1185 rx_ring->rxbufs[i].dma_addr = 0;
1186 rx_ring->rxbufs[i].frag = NULL;
1191 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1192 * @nn: NFP Net device
1193 * @rx_ring: RX ring to remove buffers from
1196 nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1198 struct nfp_net_rx_buf *rxbufs;
1201 rxbufs = rx_ring->rxbufs;
1203 for (i = 0; i < rx_ring->cnt - 1; i++) {
1205 nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
1207 if (!rxbufs[i].frag) {
1208 nfp_net_rx_ring_bufs_free(nn, rx_ring);
1217 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1218 * @rx_ring: RX ring to fill
1220 static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
1224 for (i = 0; i < rx_ring->cnt - 1; i++)
1225 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].frag,
1226 rx_ring->rxbufs[i].dma_addr);
1230 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1231 * @flags: RX descriptor flags field in CPU byte order
1233 static int nfp_net_rx_csum_has_errors(u16 flags)
1235 u16 csum_all_checked, csum_all_ok;
1237 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1238 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1240 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1244 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1245 * @nn: NFP Net device
1246 * @r_vec: per-ring structure
1247 * @rxd: Pointer to RX descriptor
1248 * @skb: Pointer to SKB
1250 static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1251 struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
1253 skb_checksum_none_assert(skb);
1255 if (!(nn->netdev->features & NETIF_F_RXCSUM))
1258 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1259 u64_stats_update_begin(&r_vec->rx_sync);
1260 r_vec->hw_csum_rx_error++;
1261 u64_stats_update_end(&r_vec->rx_sync);
1265 /* Assume that the firmware will never report inner CSUM_OK unless outer
1266 * L4 headers were successfully parsed. FW will always report zero UDP
1267 * checksum as CSUM_OK.
1269 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1270 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1271 __skb_incr_checksum_unnecessary(skb);
1272 u64_stats_update_begin(&r_vec->rx_sync);
1273 r_vec->hw_csum_rx_ok++;
1274 u64_stats_update_end(&r_vec->rx_sync);
1277 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1278 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1279 __skb_incr_checksum_unnecessary(skb);
1280 u64_stats_update_begin(&r_vec->rx_sync);
1281 r_vec->hw_csum_rx_inner_ok++;
1282 u64_stats_update_end(&r_vec->rx_sync);
1286 static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
1287 unsigned int type, __be32 *hash)
1289 if (!(netdev->features & NETIF_F_RXHASH))
1293 case NFP_NET_RSS_IPV4:
1294 case NFP_NET_RSS_IPV6:
1295 case NFP_NET_RSS_IPV6_EX:
1296 skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
1299 skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
1305 nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
1306 struct nfp_net_rx_desc *rxd)
1308 struct nfp_net_rx_hash *rx_hash;
1310 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1313 rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
1315 nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
1320 nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
1323 u8 *data = skb->data - meta_len;
1326 meta_info = get_unaligned_be32(data);
1330 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1331 case NFP_NET_META_HASH:
1332 meta_info >>= NFP_NET_META_FIELD_SIZE;
1333 nfp_net_set_hash(netdev, skb,
1334 meta_info & NFP_NET_META_FIELD_MASK,
1338 case NFP_NET_META_MARK:
1339 skb->mark = get_unaligned_be32(data);
1346 meta_info >>= NFP_NET_META_FIELD_SIZE;
1353 nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
1354 struct nfp_net_rx_buf *rxbuf, struct sk_buff *skb)
1356 u64_stats_update_begin(&r_vec->rx_sync);
1358 u64_stats_update_end(&r_vec->rx_sync);
1360 /* skb is build based on the frag, free_skb() would free the frag
1361 * so to be able to reuse it we need an extra ref.
1363 if (skb && rxbuf && skb->head == rxbuf->frag)
1364 page_ref_inc(virt_to_head_page(rxbuf->frag));
1366 nfp_net_rx_give_one(rx_ring, rxbuf->frag, rxbuf->dma_addr);
1368 dev_kfree_skb_any(skb);
1372 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1373 * @rx_ring: RX ring to receive from
1374 * @budget: NAPI budget
1376 * Note, this function is separated out from the napi poll function to
1377 * more cleanly separate packet receive code from other bookkeeping
1378 * functions performed in the napi poll function.
1380 * Return: Number of packets received.
1382 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1384 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1385 struct nfp_net *nn = r_vec->nfp_net;
1386 unsigned int data_len, meta_len;
1387 struct nfp_net_rx_buf *rxbuf;
1388 struct nfp_net_rx_desc *rxd;
1389 dma_addr_t new_dma_addr;
1390 struct sk_buff *skb;
1391 int pkts_polled = 0;
1395 while (pkts_polled < budget) {
1396 idx = rx_ring->rd_p & (rx_ring->cnt - 1);
1398 rxd = &rx_ring->rxds[idx];
1399 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1402 /* Memory barrier to ensure that we won't do other reads
1403 * before the DD bit.
1410 rxbuf = &rx_ring->rxbufs[idx];
1411 skb = build_skb(rxbuf->frag, nn->fl_bufsz);
1412 if (unlikely(!skb)) {
1413 nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
1416 new_frag = nfp_net_napi_alloc_one(nn, &new_dma_addr);
1417 if (unlikely(!new_frag)) {
1418 nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
1422 nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[idx].dma_addr,
1423 nn->fl_bufsz, DMA_FROM_DEVICE);
1425 nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
1428 * <-- [rx_offset] -->
1429 * ---------------------------------------------------------
1430 * | [XX] | metadata | packet | XXXX |
1431 * ---------------------------------------------------------
1432 * <---------------- data_len --------------->
1434 * The rx_offset is fixed for all packets, the meta_len can vary
1435 * on a packet by packet basis. If rx_offset is set to zero
1436 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1437 * buffer and is immediately followed by the packet (no [XX]).
1439 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1440 data_len = le16_to_cpu(rxd->rxd.data_len);
1442 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1443 skb_reserve(skb, NFP_NET_RX_BUF_HEADROOM + meta_len);
1446 NFP_NET_RX_BUF_HEADROOM + nn->rx_offset);
1447 skb_put(skb, data_len - meta_len);
1450 u64_stats_update_begin(&r_vec->rx_sync);
1452 r_vec->rx_bytes += skb->len;
1453 u64_stats_update_end(&r_vec->rx_sync);
1455 if (nn->fw_ver.major <= 3) {
1456 nfp_net_set_hash_desc(nn->netdev, skb, rxd);
1457 } else if (meta_len) {
1460 end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
1461 if (unlikely(end != skb->data)) {
1462 nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
1463 nfp_net_rx_drop(r_vec, rx_ring, NULL, skb);
1468 skb_record_rx_queue(skb, rx_ring->idx);
1469 skb->protocol = eth_type_trans(skb, nn->netdev);
1471 nfp_net_rx_csum(nn, r_vec, rxd, skb);
1473 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1474 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1475 le16_to_cpu(rxd->rxd.vlan));
1477 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1484 * nfp_net_poll() - napi poll function
1485 * @napi: NAPI structure
1486 * @budget: NAPI budget
1488 * Return: number of packets polled.
1490 static int nfp_net_poll(struct napi_struct *napi, int budget)
1492 struct nfp_net_r_vector *r_vec =
1493 container_of(napi, struct nfp_net_r_vector, napi);
1494 unsigned int pkts_polled = 0;
1497 nfp_net_tx_complete(r_vec->tx_ring);
1499 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1501 if (pkts_polled < budget) {
1502 napi_complete_done(napi, pkts_polled);
1503 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx);
1509 /* Setup and Configuration
1513 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1514 * @tx_ring: TX ring to free
1516 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1518 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1519 struct nfp_net *nn = r_vec->nfp_net;
1520 struct pci_dev *pdev = nn->pdev;
1522 kfree(tx_ring->txbufs);
1525 dma_free_coherent(&pdev->dev, tx_ring->size,
1526 tx_ring->txds, tx_ring->dma);
1529 tx_ring->txbufs = NULL;
1530 tx_ring->txds = NULL;
1536 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1537 * @tx_ring: TX Ring structure to allocate
1538 * @cnt: Ring buffer count
1540 * Return: 0 on success, negative errno otherwise.
1542 static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
1544 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1545 struct nfp_net *nn = r_vec->nfp_net;
1546 struct pci_dev *pdev = nn->pdev;
1551 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1552 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1553 &tx_ring->dma, GFP_KERNEL);
1557 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
1558 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
1559 if (!tx_ring->txbufs)
1562 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
1564 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
1565 tx_ring->idx, tx_ring->qcidx,
1566 tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
1571 nfp_net_tx_ring_free(tx_ring);
1575 static struct nfp_net_tx_ring *
1576 nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s)
1578 struct nfp_net_tx_ring *rings;
1581 rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
1585 for (r = 0; r < nn->num_tx_rings; r++) {
1586 nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
1588 if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt))
1592 return s->rings = rings;
1596 nfp_net_tx_ring_free(&rings[r]);
1602 nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
1604 struct nfp_net_tx_ring *rings = s->rings;
1605 struct nfp_net_ring_set new = *s;
1608 s->dcnt = nn->txd_cnt;
1609 s->rings = nn->tx_rings;
1611 for (r = 0; r < nn->num_tx_rings; r++)
1612 nn->tx_rings[r].r_vec->tx_ring = &rings[r];
1614 nn->txd_cnt = new.dcnt;
1615 nn->tx_rings = new.rings;
1619 nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
1621 struct nfp_net_tx_ring *rings = s->rings;
1624 for (r = 0; r < nn->num_tx_rings; r++)
1625 nfp_net_tx_ring_free(&rings[r]);
1631 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1632 * @rx_ring: RX ring to free
1634 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1636 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1637 struct nfp_net *nn = r_vec->nfp_net;
1638 struct pci_dev *pdev = nn->pdev;
1640 kfree(rx_ring->rxbufs);
1643 dma_free_coherent(&pdev->dev, rx_ring->size,
1644 rx_ring->rxds, rx_ring->dma);
1647 rx_ring->rxbufs = NULL;
1648 rx_ring->rxds = NULL;
1654 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1655 * @rx_ring: RX ring to allocate
1656 * @fl_bufsz: Size of buffers to allocate
1657 * @cnt: Ring buffer count
1659 * Return: 0 on success, negative errno otherwise.
1662 nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
1665 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1666 struct nfp_net *nn = r_vec->nfp_net;
1667 struct pci_dev *pdev = nn->pdev;
1671 rx_ring->bufsz = fl_bufsz;
1673 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1674 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1675 &rx_ring->dma, GFP_KERNEL);
1679 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
1680 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
1681 if (!rx_ring->rxbufs)
1684 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1685 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1686 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
1691 nfp_net_rx_ring_free(rx_ring);
1695 static struct nfp_net_rx_ring *
1696 nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s)
1698 unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
1699 struct nfp_net_rx_ring *rings;
1702 rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
1706 for (r = 0; r < nn->num_rx_rings; r++) {
1707 nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
1709 if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
1712 if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
1716 return s->rings = rings;
1720 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1722 nfp_net_rx_ring_free(&rings[r]);
1729 nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
1731 struct nfp_net_rx_ring *rings = s->rings;
1732 struct nfp_net_ring_set new = *s;
1735 s->mtu = nn->netdev->mtu;
1736 s->dcnt = nn->rxd_cnt;
1737 s->rings = nn->rx_rings;
1739 for (r = 0; r < nn->num_rx_rings; r++)
1740 nn->rx_rings[r].r_vec->rx_ring = &rings[r];
1742 nn->netdev->mtu = new.mtu;
1743 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
1744 nn->rxd_cnt = new.dcnt;
1745 nn->rx_rings = new.rings;
1749 nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
1751 struct nfp_net_rx_ring *rings = s->rings;
1754 for (r = 0; r < nn->num_rx_rings; r++) {
1755 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1756 nfp_net_rx_ring_free(&rings[r]);
1763 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1766 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1769 if (idx < nn->num_tx_rings) {
1770 r_vec->tx_ring = &nn->tx_rings[idx];
1771 nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1773 r_vec->tx_ring = NULL;
1776 if (idx < nn->num_rx_rings) {
1777 r_vec->rx_ring = &nn->rx_rings[idx];
1778 nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1780 r_vec->rx_ring = NULL;
1783 snprintf(r_vec->name, sizeof(r_vec->name),
1784 "%s-rxtx-%d", nn->netdev->name, idx);
1785 err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
1787 nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
1790 disable_irq(entry->vector);
1793 netif_napi_add(nn->netdev, &r_vec->napi,
1794 nfp_net_poll, NAPI_POLL_WEIGHT);
1796 irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
1798 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
1804 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1806 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1808 irq_set_affinity_hint(entry->vector, NULL);
1809 netif_napi_del(&r_vec->napi);
1810 free_irq(entry->vector, r_vec);
1814 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
1815 * @nn: NFP Net device to reconfigure
1817 void nfp_net_rss_write_itbl(struct nfp_net *nn)
1821 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
1822 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
1823 get_unaligned_le32(nn->rss_itbl + i));
1827 * nfp_net_rss_write_key() - Write RSS hash key to device
1828 * @nn: NFP Net device to reconfigure
1830 void nfp_net_rss_write_key(struct nfp_net *nn)
1834 for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
1835 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
1836 get_unaligned_le32(nn->rss_key + i));
1840 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
1841 * @nn: NFP Net device to reconfigure
1843 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1849 /* Compute factor used to convert coalesce '_usecs' parameters to
1850 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1853 factor = nn->me_freq_mhz / 16;
1855 /* copy RX interrupt coalesce parameters */
1856 value = (nn->rx_coalesce_max_frames << 16) |
1857 (factor * nn->rx_coalesce_usecs);
1858 for (i = 0; i < nn->num_rx_rings; i++)
1859 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
1861 /* copy TX interrupt coalesce parameters */
1862 value = (nn->tx_coalesce_max_frames << 16) |
1863 (factor * nn->tx_coalesce_usecs);
1864 for (i = 0; i < nn->num_tx_rings; i++)
1865 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
1869 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
1870 * @nn: NFP Net device to reconfigure
1872 * Writes the MAC address from the netdev to the device control BAR. Does not
1873 * perform the required reconfig. We do a bit of byte swapping dance because
1876 static void nfp_net_write_mac_addr(struct nfp_net *nn)
1878 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
1879 get_unaligned_be32(nn->netdev->dev_addr));
1880 nn_writew(nn, NFP_NET_CFG_MACADDR + 6,
1881 get_unaligned_be16(nn->netdev->dev_addr + 4));
1884 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
1886 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
1887 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
1888 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
1890 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
1891 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
1892 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
1896 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1897 * @nn: NFP Net device to reconfigure
1899 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1901 u32 new_ctrl, update;
1905 new_ctrl = nn->ctrl;
1906 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
1907 update = NFP_NET_CFG_UPDATE_GEN;
1908 update |= NFP_NET_CFG_UPDATE_MSIX;
1909 update |= NFP_NET_CFG_UPDATE_RING;
1911 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1912 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
1914 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
1915 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
1917 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1918 err = nfp_net_reconfig(nn, update);
1920 nn_err(nn, "Could not disable device: %d\n", err);
1922 for (r = 0; r < nn->num_rx_rings; r++)
1923 nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1924 for (r = 0; r < nn->num_tx_rings; r++)
1925 nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1926 for (r = 0; r < nn->num_r_vecs; r++)
1927 nfp_net_vec_clear_ring_data(nn, r);
1929 nn->ctrl = new_ctrl;
1933 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
1934 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
1936 /* Write the DMA address, size and MSI-X info to the device */
1937 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
1938 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
1939 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx);
1943 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
1944 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
1946 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
1947 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
1948 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx);
1951 static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1953 u32 new_ctrl, update = 0;
1957 new_ctrl = nn->ctrl;
1959 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
1960 nfp_net_rss_write_key(nn);
1961 nfp_net_rss_write_itbl(nn);
1962 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
1963 update |= NFP_NET_CFG_UPDATE_RSS;
1966 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
1967 nfp_net_coalesce_write_cfg(nn);
1969 new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
1970 update |= NFP_NET_CFG_UPDATE_IRQMOD;
1973 for (r = 0; r < nn->num_tx_rings; r++)
1974 nfp_net_tx_ring_hw_cfg_write(nn, &nn->tx_rings[r], r);
1975 for (r = 0; r < nn->num_rx_rings; r++)
1976 nfp_net_rx_ring_hw_cfg_write(nn, &nn->rx_rings[r], r);
1978 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
1979 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
1981 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1982 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
1984 nfp_net_write_mac_addr(nn);
1986 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
1987 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
1990 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1991 update |= NFP_NET_CFG_UPDATE_GEN;
1992 update |= NFP_NET_CFG_UPDATE_MSIX;
1993 update |= NFP_NET_CFG_UPDATE_RING;
1994 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1995 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1997 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1998 err = nfp_net_reconfig(nn, update);
2000 nn->ctrl = new_ctrl;
2002 for (r = 0; r < nn->num_rx_rings; r++)
2003 nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
2005 /* Since reconfiguration requests while NFP is down are ignored we
2006 * have to wipe the entire VXLAN configuration and reinitialize it.
2008 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2009 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2010 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2011 udp_tunnel_get_rx_info(nn->netdev);
2018 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2019 * @nn: NFP Net device to reconfigure
2021 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2025 err = __nfp_net_set_config_and_enable(nn);
2027 nfp_net_clear_config_and_disable(nn);
2033 * nfp_net_open_stack() - Start the device from stack's perspective
2034 * @nn: NFP Net device to reconfigure
2036 static void nfp_net_open_stack(struct nfp_net *nn)
2040 for (r = 0; r < nn->num_r_vecs; r++) {
2041 napi_enable(&nn->r_vecs[r].napi);
2042 enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2045 netif_tx_wake_all_queues(nn->netdev);
2047 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2048 nfp_net_read_link_status(nn);
2051 static int nfp_net_netdev_open(struct net_device *netdev)
2053 struct nfp_net *nn = netdev_priv(netdev);
2056 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
2057 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
2061 /* Step 1: Allocate resources for rings and the like
2062 * - Request interrupts
2063 * - Allocate RX and TX ring resources
2064 * - Setup initial RSS table
2066 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2067 nn->exn_name, sizeof(nn->exn_name),
2068 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2071 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2072 nn->lsc_name, sizeof(nn->lsc_name),
2073 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2076 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2078 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
2080 if (!nn->rx_rings) {
2084 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
2086 if (!nn->tx_rings) {
2088 goto err_free_rx_rings;
2091 for (r = 0; r < nn->num_r_vecs; r++) {
2092 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2094 goto err_cleanup_vec_p;
2096 for (r = 0; r < nn->num_tx_rings; r++) {
2097 err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
2099 goto err_free_tx_ring_p;
2101 for (r = 0; r < nn->num_rx_rings; r++) {
2102 err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
2103 nn->fl_bufsz, nn->rxd_cnt);
2105 goto err_flush_free_rx_ring_p;
2107 err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
2109 goto err_free_rx_ring_p;
2112 err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
2114 goto err_free_rings;
2116 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
2118 goto err_free_rings;
2120 /* Step 2: Configure the NFP
2121 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2122 * - Write MAC address (in case it changed)
2124 * - Set the Freelist buffer size
2127 err = nfp_net_set_config_and_enable(nn);
2129 goto err_free_rings;
2131 /* Step 3: Enable for kernel
2132 * - put some freelist descriptors on each RX ring
2133 * - enable NAPI on each ring
2134 * - enable all TX queues
2137 nfp_net_open_stack(nn);
2142 r = nn->num_rx_rings;
2143 err_flush_free_rx_ring_p:
2145 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2147 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2149 r = nn->num_tx_rings;
2152 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2156 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2157 kfree(nn->tx_rings);
2159 kfree(nn->rx_rings);
2161 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2163 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2168 * nfp_net_close_stack() - Quiescent the stack (part of close)
2169 * @nn: NFP Net device to reconfigure
2171 static void nfp_net_close_stack(struct nfp_net *nn)
2175 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2176 netif_carrier_off(nn->netdev);
2177 nn->link_up = false;
2179 for (r = 0; r < nn->num_r_vecs; r++) {
2180 disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2181 napi_disable(&nn->r_vecs[r].napi);
2184 netif_tx_disable(nn->netdev);
2188 * nfp_net_close_free_all() - Free all runtime resources
2189 * @nn: NFP Net device to reconfigure
2191 static void nfp_net_close_free_all(struct nfp_net *nn)
2195 for (r = 0; r < nn->num_rx_rings; r++) {
2196 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2197 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2199 for (r = 0; r < nn->num_tx_rings; r++)
2200 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2201 for (r = 0; r < nn->num_r_vecs; r++)
2202 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2204 kfree(nn->rx_rings);
2205 kfree(nn->tx_rings);
2207 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2208 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2212 * nfp_net_netdev_close() - Called when the device is downed
2213 * @netdev: netdev structure
2215 static int nfp_net_netdev_close(struct net_device *netdev)
2217 struct nfp_net *nn = netdev_priv(netdev);
2219 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
2220 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
2224 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2226 nfp_net_close_stack(nn);
2230 nfp_net_clear_config_and_disable(nn);
2232 /* Step 3: Free resources
2234 nfp_net_close_free_all(nn);
2236 nn_dbg(nn, "%s down", netdev->name);
2240 static void nfp_net_set_rx_mode(struct net_device *netdev)
2242 struct nfp_net *nn = netdev_priv(netdev);
2245 new_ctrl = nn->ctrl;
2247 if (netdev->flags & IFF_PROMISC) {
2248 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2249 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2251 nn_warn(nn, "FW does not support promiscuous mode\n");
2253 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2256 if (new_ctrl == nn->ctrl)
2259 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2260 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2262 nn->ctrl = new_ctrl;
2266 nfp_net_ring_swap_enable(struct nfp_net *nn,
2267 struct nfp_net_ring_set *rx,
2268 struct nfp_net_ring_set *tx)
2271 nfp_net_rx_ring_set_swap(nn, rx);
2273 nfp_net_tx_ring_set_swap(nn, tx);
2275 return __nfp_net_set_config_and_enable(nn);
2279 nfp_net_ring_reconfig_down(struct nfp_net *nn,
2280 struct nfp_net_ring_set *rx,
2281 struct nfp_net_ring_set *tx)
2283 nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
2284 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
2285 nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
2286 nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
2290 nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_ring_set *rx,
2291 struct nfp_net_ring_set *tx)
2295 if (!netif_running(nn->netdev)) {
2296 nfp_net_ring_reconfig_down(nn, rx, tx);
2300 /* Prepare new rings */
2302 if (!nfp_net_rx_ring_set_prepare(nn, rx))
2306 if (!nfp_net_tx_ring_set_prepare(nn, tx)) {
2312 /* Stop device, swap in new rings, try to start the firmware */
2313 nfp_net_close_stack(nn);
2314 nfp_net_clear_config_and_disable(nn);
2316 err = nfp_net_ring_swap_enable(nn, rx, tx);
2320 nfp_net_clear_config_and_disable(nn);
2322 /* Try with old configuration and old rings */
2323 err2 = nfp_net_ring_swap_enable(nn, rx, tx);
2325 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2330 nfp_net_rx_ring_set_free(nn, rx);
2332 nfp_net_tx_ring_set_free(nn, tx);
2334 nfp_net_open_stack(nn);
2340 nfp_net_rx_ring_set_free(nn, rx);
2344 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2346 struct nfp_net *nn = netdev_priv(netdev);
2347 struct nfp_net_ring_set rx = {
2349 .dcnt = nn->rxd_cnt,
2352 return nfp_net_ring_reconfig(nn, &rx, NULL);
2355 static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
2356 struct rtnl_link_stats64 *stats)
2358 struct nfp_net *nn = netdev_priv(netdev);
2361 for (r = 0; r < nn->num_r_vecs; r++) {
2362 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2367 start = u64_stats_fetch_begin(&r_vec->rx_sync);
2368 data[0] = r_vec->rx_pkts;
2369 data[1] = r_vec->rx_bytes;
2370 data[2] = r_vec->rx_drops;
2371 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2372 stats->rx_packets += data[0];
2373 stats->rx_bytes += data[1];
2374 stats->rx_dropped += data[2];
2377 start = u64_stats_fetch_begin(&r_vec->tx_sync);
2378 data[0] = r_vec->tx_pkts;
2379 data[1] = r_vec->tx_bytes;
2380 data[2] = r_vec->tx_errors;
2381 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2382 stats->tx_packets += data[0];
2383 stats->tx_bytes += data[1];
2384 stats->tx_errors += data[2];
2390 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
2392 if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
2393 nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
2399 nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
2400 struct tc_to_netdev *tc)
2402 struct nfp_net *nn = netdev_priv(netdev);
2404 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2406 if (proto != htons(ETH_P_ALL))
2409 if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
2410 return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
2415 static int nfp_net_set_features(struct net_device *netdev,
2416 netdev_features_t features)
2418 netdev_features_t changed = netdev->features ^ features;
2419 struct nfp_net *nn = netdev_priv(netdev);
2423 /* Assume this is not called with features we have not advertised */
2425 new_ctrl = nn->ctrl;
2427 if (changed & NETIF_F_RXCSUM) {
2428 if (features & NETIF_F_RXCSUM)
2429 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2431 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
2434 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2435 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
2436 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2438 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
2441 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
2442 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
2443 new_ctrl |= NFP_NET_CFG_CTRL_LSO;
2445 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
2448 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2449 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2450 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2452 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
2455 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
2456 if (features & NETIF_F_HW_VLAN_CTAG_TX)
2457 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2459 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
2462 if (changed & NETIF_F_SG) {
2463 if (features & NETIF_F_SG)
2464 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
2466 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2469 if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
2470 nn_err(nn, "Cannot disable HW TC offload while in use\n");
2474 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2475 netdev->features, features, changed);
2477 if (new_ctrl == nn->ctrl)
2480 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
2481 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2482 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2486 nn->ctrl = new_ctrl;
2491 static netdev_features_t
2492 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2493 netdev_features_t features)
2497 /* We can't do TSO over double tagged packets (802.1AD) */
2498 features &= vlan_features_check(skb, features);
2500 if (!skb->encapsulation)
2503 /* Ensure that inner L4 header offset fits into TX descriptor field */
2504 if (skb_is_gso(skb)) {
2507 hdrlen = skb_inner_transport_header(skb) - skb->data +
2508 inner_tcp_hdrlen(skb);
2510 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
2511 features &= ~NETIF_F_GSO_MASK;
2514 /* VXLAN/GRE check */
2515 switch (vlan_get_protocol(skb)) {
2516 case htons(ETH_P_IP):
2517 l4_hdr = ip_hdr(skb)->protocol;
2519 case htons(ETH_P_IPV6):
2520 l4_hdr = ipv6_hdr(skb)->nexthdr;
2523 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2526 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
2527 skb->inner_protocol != htons(ETH_P_TEB) ||
2528 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
2529 (l4_hdr == IPPROTO_UDP &&
2530 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
2531 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
2532 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2538 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2539 * @nn: NFP Net device to reconfigure
2540 * @idx: Index into the port table where new port should be written
2541 * @port: UDP port to configure (pass zero to remove VXLAN port)
2543 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2547 nn->vxlan_ports[idx] = port;
2549 if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
2552 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2553 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
2554 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2555 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
2556 be16_to_cpu(nn->vxlan_ports[i]));
2558 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
2562 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2563 * @nn: NFP Network structure
2564 * @port: UDP port to look for
2566 * Return: if the port is already in the table -- it's position;
2567 * if the port is not in the table -- free position to use;
2568 * if the table is full -- -ENOSPC.
2570 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2572 int i, free_idx = -ENOSPC;
2574 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
2575 if (nn->vxlan_ports[i] == port)
2577 if (!nn->vxlan_usecnt[i])
2584 static void nfp_net_add_vxlan_port(struct net_device *netdev,
2585 struct udp_tunnel_info *ti)
2587 struct nfp_net *nn = netdev_priv(netdev);
2590 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2593 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2597 if (!nn->vxlan_usecnt[idx]++)
2598 nfp_net_set_vxlan_port(nn, idx, ti->port);
2601 static void nfp_net_del_vxlan_port(struct net_device *netdev,
2602 struct udp_tunnel_info *ti)
2604 struct nfp_net *nn = netdev_priv(netdev);
2607 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2610 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2611 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
2614 if (!--nn->vxlan_usecnt[idx])
2615 nfp_net_set_vxlan_port(nn, idx, 0);
2618 static const struct net_device_ops nfp_net_netdev_ops = {
2619 .ndo_open = nfp_net_netdev_open,
2620 .ndo_stop = nfp_net_netdev_close,
2621 .ndo_start_xmit = nfp_net_tx,
2622 .ndo_get_stats64 = nfp_net_stat64,
2623 .ndo_setup_tc = nfp_net_setup_tc,
2624 .ndo_tx_timeout = nfp_net_tx_timeout,
2625 .ndo_set_rx_mode = nfp_net_set_rx_mode,
2626 .ndo_change_mtu = nfp_net_change_mtu,
2627 .ndo_set_mac_address = eth_mac_addr,
2628 .ndo_set_features = nfp_net_set_features,
2629 .ndo_features_check = nfp_net_features_check,
2630 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
2631 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
2635 * nfp_net_info() - Print general info about the NIC
2636 * @nn: NFP Net device to reconfigure
2638 void nfp_net_info(struct nfp_net *nn)
2640 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2641 nn->is_vf ? "VF " : "",
2642 nn->num_tx_rings, nn->max_tx_rings,
2643 nn->num_rx_rings, nn->max_rx_rings);
2644 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2645 nn->fw_ver.resv, nn->fw_ver.class,
2646 nn->fw_ver.major, nn->fw_ver.minor,
2648 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2650 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2651 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2652 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2653 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2654 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2655 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2656 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2657 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2658 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2659 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2660 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2661 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
2662 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2663 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
2664 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
2665 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
2666 nfp_net_ebpf_capable(nn) ? "BPF " : "");
2670 * nfp_net_netdev_alloc() - Allocate netdev and related structure
2672 * @max_tx_rings: Maximum number of TX rings supported by device
2673 * @max_rx_rings: Maximum number of RX rings supported by device
2675 * This function allocates a netdev device and fills in the initial
2676 * part of the @struct nfp_net structure.
2678 * Return: NFP Net device structure, or ERR_PTR on error.
2680 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2681 unsigned int max_tx_rings,
2682 unsigned int max_rx_rings)
2684 struct net_device *netdev;
2687 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2688 max_tx_rings, max_rx_rings);
2690 return ERR_PTR(-ENOMEM);
2692 SET_NETDEV_DEV(netdev, &pdev->dev);
2693 nn = netdev_priv(netdev);
2695 nn->netdev = netdev;
2698 nn->max_tx_rings = max_tx_rings;
2699 nn->max_rx_rings = max_rx_rings;
2701 nn->num_tx_rings = min_t(unsigned int, max_tx_rings, num_online_cpus());
2702 nn->num_rx_rings = min_t(unsigned int, max_rx_rings,
2703 netif_get_num_default_rss_queues());
2705 nn->num_r_vecs = max(nn->num_tx_rings, nn->num_rx_rings);
2706 nn->num_r_vecs = min_t(unsigned int, nn->num_r_vecs, num_online_cpus());
2708 nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2709 nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2711 spin_lock_init(&nn->reconfig_lock);
2712 spin_lock_init(&nn->rx_filter_lock);
2713 spin_lock_init(&nn->link_status_lock);
2715 setup_timer(&nn->reconfig_timer,
2716 nfp_net_reconfig_timer, (unsigned long)nn);
2717 setup_timer(&nn->rx_filter_stats_timer,
2718 nfp_net_filter_stats_timer, (unsigned long)nn);
2724 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
2725 * @nn: NFP Net device to reconfigure
2727 void nfp_net_netdev_free(struct nfp_net *nn)
2729 free_netdev(nn->netdev);
2733 * nfp_net_rss_init() - Set the initial RSS parameters
2734 * @nn: NFP Net device to reconfigure
2736 static void nfp_net_rss_init(struct nfp_net *nn)
2740 netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
2742 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2744 ethtool_rxfh_indir_default(i, nn->num_rx_rings);
2746 /* Enable IPv4/IPv6 TCP by default */
2747 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2748 NFP_NET_CFG_RSS_IPV6_TCP |
2749 NFP_NET_CFG_RSS_TOEPLITZ |
2750 NFP_NET_CFG_RSS_MASK;
2754 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2755 * @nn: NFP Net device to reconfigure
2757 static void nfp_net_irqmod_init(struct nfp_net *nn)
2759 nn->rx_coalesce_usecs = 50;
2760 nn->rx_coalesce_max_frames = 64;
2761 nn->tx_coalesce_usecs = 50;
2762 nn->tx_coalesce_max_frames = 64;
2766 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
2767 * @netdev: netdev structure
2769 * Return: 0 on success or negative errno on error.
2771 int nfp_net_netdev_init(struct net_device *netdev)
2773 struct nfp_net *nn = netdev_priv(netdev);
2776 /* Get some of the read-only fields from the BAR */
2777 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2778 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2780 nfp_net_write_mac_addr(nn);
2782 /* Determine RX packet/metadata boundary offset */
2783 if (nn->fw_ver.major >= 2)
2784 nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2786 nn->rx_offset = NFP_NET_RX_OFFSET;
2788 /* Set default MTU and Freelist buffer size */
2789 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
2790 netdev->mtu = nn->max_mtu;
2792 netdev->mtu = NFP_NET_DEFAULT_MTU;
2793 nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, netdev->mtu);
2795 /* Advertise/enable offloads based on capabilities
2797 * Note: netdev->features show the currently enabled features
2798 * and netdev->hw_features advertises which features are
2799 * supported. By default we enable most features.
2801 netdev->hw_features = NETIF_F_HIGHDMA;
2802 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
2803 netdev->hw_features |= NETIF_F_RXCSUM;
2804 nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2806 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2807 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2808 nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2810 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2811 netdev->hw_features |= NETIF_F_SG;
2812 nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
2814 if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
2815 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2816 nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
2818 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
2819 netdev->hw_features |= NETIF_F_RXHASH;
2820 nfp_net_rss_init(nn);
2821 nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
2823 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
2824 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2825 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2826 netdev->hw_features |= NETIF_F_GSO_GRE |
2827 NETIF_F_GSO_UDP_TUNNEL;
2828 nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
2830 netdev->hw_enc_features = netdev->hw_features;
2833 netdev->vlan_features = netdev->hw_features;
2835 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
2836 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2837 nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2839 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
2840 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2841 nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2844 netdev->features = netdev->hw_features;
2846 if (nfp_net_ebpf_capable(nn))
2847 netdev->hw_features |= NETIF_F_HW_TC;
2849 /* Advertise but disable TSO by default. */
2850 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2852 /* Allow L2 Broadcast and Multicast through by default, if supported */
2853 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2854 nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
2855 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
2856 nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
2858 /* Allow IRQ moderation, if supported */
2859 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2860 nfp_net_irqmod_init(nn);
2861 nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2864 /* Stash the re-configuration queue away. First odd queue in TX Bar */
2865 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2867 /* Make sure the FW knows the netdev is supposed to be disabled here */
2868 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2869 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2870 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2871 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2872 NFP_NET_CFG_UPDATE_GEN);
2876 /* Finalise the netdev setup */
2877 netdev->netdev_ops = &nfp_net_netdev_ops;
2878 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2880 /* MTU range: 68 - hw-specific max */
2881 netdev->min_mtu = ETH_MIN_MTU;
2882 netdev->max_mtu = nn->max_mtu;
2884 netif_carrier_off(netdev);
2886 nfp_net_set_ethtool_ops(netdev);
2887 nfp_net_irqs_assign(netdev);
2889 return register_netdev(netdev);
2893 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
2894 * @netdev: netdev structure
2896 void nfp_net_netdev_clean(struct net_device *netdev)
2898 unregister_netdev(netdev);