2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/bitfield.h>
45 #include <linux/bpf.h>
46 #include <linux/bpf_trace.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/init.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/interrupt.h>
55 #include <linux/ipv6.h>
57 #include <linux/overflow.h>
58 #include <linux/page_ref.h>
59 #include <linux/pci.h>
60 #include <linux/pci_regs.h>
61 #include <linux/msi.h>
62 #include <linux/ethtool.h>
63 #include <linux/log2.h>
64 #include <linux/if_vlan.h>
65 #include <linux/random.h>
66 #include <linux/vmalloc.h>
67 #include <linux/ktime.h>
69 #include <net/switchdev.h>
70 #include <net/vxlan.h>
72 #include "nfpcore/nfp_nsp.h"
74 #include "nfp_net_ctrl.h"
76 #include "nfp_net_sriov.h"
80 * nfp_net_get_fw_version() - Read and parse the FW version
81 * @fw_ver: Output fw_version structure to read to
82 * @ctrl_bar: Mapped address of the control BAR
84 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
85 void __iomem *ctrl_bar)
89 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
90 put_unaligned_le32(reg, fw_ver);
93 static dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
95 return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
96 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
97 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
101 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
103 dma_sync_single_for_device(dp->dev, dma_addr,
104 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
108 static void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr)
110 dma_unmap_single_attrs(dp->dev, dma_addr,
111 dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
112 dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
115 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp, dma_addr_t dma_addr,
118 dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
119 len, dp->rx_dma_dir);
124 * Firmware reconfig may take a while so we have two versions of it -
125 * synchronous and asynchronous (posted). All synchronous callers are holding
126 * RTNL so we don't have to worry about serializing them.
128 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
130 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
131 /* ensure update is written before pinging HW */
133 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
136 /* Pass 0 as update to run posted reconfigs. */
137 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
139 update |= nn->reconfig_posted;
140 nn->reconfig_posted = 0;
142 nfp_net_reconfig_start(nn, update);
144 nn->reconfig_timer_active = true;
145 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
148 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
152 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
155 if (reg & NFP_NET_CFG_UPDATE_ERR) {
156 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
158 } else if (last_check) {
159 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
166 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
168 bool timed_out = false;
170 /* Poll update field, waiting for NFP to ack the config */
171 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
173 timed_out = time_is_before_eq_jiffies(deadline);
176 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
179 return timed_out ? -EIO : 0;
182 static void nfp_net_reconfig_timer(struct timer_list *t)
184 struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
186 spin_lock_bh(&nn->reconfig_lock);
188 nn->reconfig_timer_active = false;
190 /* If sync caller is present it will take over from us */
191 if (nn->reconfig_sync_present)
194 /* Read reconfig status and report errors */
195 nfp_net_reconfig_check_done(nn, true);
197 if (nn->reconfig_posted)
198 nfp_net_reconfig_start_async(nn, 0);
200 spin_unlock_bh(&nn->reconfig_lock);
204 * nfp_net_reconfig_post() - Post async reconfig request
205 * @nn: NFP Net device to reconfigure
206 * @update: The value for the update field in the BAR config
208 * Record FW reconfiguration request. Reconfiguration will be kicked off
209 * whenever reconfiguration machinery is idle. Multiple requests can be
212 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
214 spin_lock_bh(&nn->reconfig_lock);
216 /* Sync caller will kick off async reconf when it's done, just post */
217 if (nn->reconfig_sync_present) {
218 nn->reconfig_posted |= update;
222 /* Opportunistically check if the previous command is done */
223 if (!nn->reconfig_timer_active ||
224 nfp_net_reconfig_check_done(nn, false))
225 nfp_net_reconfig_start_async(nn, update);
227 nn->reconfig_posted |= update;
229 spin_unlock_bh(&nn->reconfig_lock);
232 static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
234 bool cancelled_timer = false;
235 u32 pre_posted_requests;
237 spin_lock_bh(&nn->reconfig_lock);
239 nn->reconfig_sync_present = true;
241 if (nn->reconfig_timer_active) {
242 nn->reconfig_timer_active = false;
243 cancelled_timer = true;
245 pre_posted_requests = nn->reconfig_posted;
246 nn->reconfig_posted = 0;
248 spin_unlock_bh(&nn->reconfig_lock);
250 if (cancelled_timer) {
251 del_timer_sync(&nn->reconfig_timer);
252 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
255 /* Run the posted reconfigs which were issued before we started */
256 if (pre_posted_requests) {
257 nfp_net_reconfig_start(nn, pre_posted_requests);
258 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
262 static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
264 nfp_net_reconfig_sync_enter(nn);
266 spin_lock_bh(&nn->reconfig_lock);
267 nn->reconfig_sync_present = false;
268 spin_unlock_bh(&nn->reconfig_lock);
272 * nfp_net_reconfig() - Reconfigure the firmware
273 * @nn: NFP Net device to reconfigure
274 * @update: The value for the update field in the BAR config
276 * Write the update word to the BAR and ping the reconfig queue. The
277 * poll until the firmware has acknowledged the update by zeroing the
280 * Return: Negative errno on error, 0 on success
282 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
286 nfp_net_reconfig_sync_enter(nn);
288 nfp_net_reconfig_start(nn, update);
289 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
291 spin_lock_bh(&nn->reconfig_lock);
293 if (nn->reconfig_posted)
294 nfp_net_reconfig_start_async(nn, 0);
296 nn->reconfig_sync_present = false;
298 spin_unlock_bh(&nn->reconfig_lock);
304 * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
305 * @nn: NFP Net device to reconfigure
306 * @mbox_cmd: The value for the mailbox command
308 * Helper function for mailbox updates
310 * Return: Negative errno on error, 0 on success
312 static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
314 u32 mbox = nn->tlv_caps.mbox_off;
317 if (!nfp_net_has_mbox(&nn->tlv_caps)) {
318 nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
322 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
324 ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
326 nn_err(nn, "Mailbox update error\n");
330 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
333 /* Interrupt configuration and handling
337 * nfp_net_irq_unmask() - Unmask automasked interrupt
338 * @nn: NFP Network structure
339 * @entry_nr: MSI-X table entry
341 * Clear the ICR for the IRQ entry.
343 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
345 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
350 * nfp_net_irqs_alloc() - allocates MSI-X irqs
351 * @pdev: PCI device structure
352 * @irq_entries: Array to be initialized and used to hold the irq entries
353 * @min_irqs: Minimal acceptable number of interrupts
354 * @wanted_irqs: Target number of interrupts to allocate
356 * Return: Number of irqs obtained or 0 on error.
359 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
360 unsigned int min_irqs, unsigned int wanted_irqs)
365 for (i = 0; i < wanted_irqs; i++)
366 irq_entries[i].entry = i;
368 got_irqs = pci_enable_msix_range(pdev, irq_entries,
369 min_irqs, wanted_irqs);
371 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
372 min_irqs, wanted_irqs, got_irqs);
376 if (got_irqs < wanted_irqs)
377 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
378 wanted_irqs, got_irqs);
384 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
385 * @nn: NFP Network structure
386 * @irq_entries: Table of allocated interrupts
387 * @n: Size of @irq_entries (number of entries to grab)
389 * After interrupts are allocated with nfp_net_irqs_alloc() this function
390 * should be called to assign them to a specific netdev (port).
393 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
396 struct nfp_net_dp *dp = &nn->dp;
398 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
399 dp->num_r_vecs = nn->max_r_vecs;
401 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
403 if (dp->num_rx_rings > dp->num_r_vecs ||
404 dp->num_tx_rings > dp->num_r_vecs)
405 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
406 dp->num_rx_rings, dp->num_tx_rings,
409 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
410 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
411 dp->num_stack_tx_rings = dp->num_tx_rings;
415 * nfp_net_irqs_disable() - Disable interrupts
416 * @pdev: PCI device structure
418 * Undoes what @nfp_net_irqs_alloc() does.
420 void nfp_net_irqs_disable(struct pci_dev *pdev)
422 pci_disable_msix(pdev);
426 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
428 * @data: Opaque data structure
430 * Return: Indicate if the interrupt has been handled.
432 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
434 struct nfp_net_r_vector *r_vec = data;
436 napi_schedule_irqoff(&r_vec->napi);
438 /* The FW auto-masks any interrupt, either via the MASK bit in
439 * the MSI-X table or via the per entry ICR field. So there
440 * is no need to disable interrupts here.
445 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
447 struct nfp_net_r_vector *r_vec = data;
449 tasklet_schedule(&r_vec->tasklet);
455 * nfp_net_read_link_status() - Reread link status from control BAR
456 * @nn: NFP Network structure
458 static void nfp_net_read_link_status(struct nfp_net *nn)
464 spin_lock_irqsave(&nn->link_status_lock, flags);
466 sts = nn_readl(nn, NFP_NET_CFG_STS);
467 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
469 if (nn->link_up == link_up)
472 nn->link_up = link_up;
474 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
477 netif_carrier_on(nn->dp.netdev);
478 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
480 netif_carrier_off(nn->dp.netdev);
481 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
484 spin_unlock_irqrestore(&nn->link_status_lock, flags);
488 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
490 * @data: Opaque data structure
492 * Return: Indicate if the interrupt has been handled.
494 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
496 struct nfp_net *nn = data;
497 struct msix_entry *entry;
499 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
501 nfp_net_read_link_status(nn);
503 nfp_net_irq_unmask(nn, entry->entry);
509 * nfp_net_irq_exn() - Interrupt service routine for exceptions
511 * @data: Opaque data structure
513 * Return: Indicate if the interrupt has been handled.
515 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
517 struct nfp_net *nn = data;
519 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
520 /* XXX TO BE IMPLEMENTED */
525 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
526 * @tx_ring: TX ring structure
527 * @r_vec: IRQ vector servicing this ring
529 * @is_xdp: Is this an XDP TX ring?
532 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
533 struct nfp_net_r_vector *r_vec, unsigned int idx,
536 struct nfp_net *nn = r_vec->nfp_net;
539 tx_ring->r_vec = r_vec;
540 tx_ring->is_xdp = is_xdp;
541 u64_stats_init(&tx_ring->r_vec->tx_sync);
543 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
544 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
548 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
549 * @rx_ring: RX ring structure
550 * @r_vec: IRQ vector servicing this ring
554 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
555 struct nfp_net_r_vector *r_vec, unsigned int idx)
557 struct nfp_net *nn = r_vec->nfp_net;
560 rx_ring->r_vec = r_vec;
561 u64_stats_init(&rx_ring->r_vec->rx_sync);
563 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
564 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
568 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
569 * @nn: NFP Network structure
570 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
571 * @format: printf-style format to construct the interrupt name
572 * @name: Pointer to allocated space for interrupt name
573 * @name_sz: Size of space for interrupt name
574 * @vector_idx: Index of MSI-X vector used for this interrupt
575 * @handler: IRQ handler to register for this interrupt
578 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
579 const char *format, char *name, size_t name_sz,
580 unsigned int vector_idx, irq_handler_t handler)
582 struct msix_entry *entry;
585 entry = &nn->irq_entries[vector_idx];
587 snprintf(name, name_sz, format, nfp_net_name(nn));
588 err = request_irq(entry->vector, handler, 0, name, nn);
590 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
594 nn_writeb(nn, ctrl_offset, entry->entry);
595 nfp_net_irq_unmask(nn, entry->entry);
601 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
602 * @nn: NFP Network structure
603 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
604 * @vector_idx: Index of MSI-X vector used for this interrupt
606 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
607 unsigned int vector_idx)
609 nn_writeb(nn, ctrl_offset, 0xff);
611 free_irq(nn->irq_entries[vector_idx].vector, nn);
616 * One queue controller peripheral queue is used for transmit. The
617 * driver en-queues packets for transmit by advancing the write
618 * pointer. The device indicates that packets have transmitted by
619 * advancing the read pointer. The driver maintains a local copy of
620 * the read and write pointer in @struct nfp_net_tx_ring. The driver
621 * keeps @wr_p in sync with the queue controller write pointer and can
622 * determine how many packets have been transmitted by comparing its
623 * copy of the read pointer @rd_p with the read pointer maintained by
624 * the queue controller peripheral.
628 * nfp_net_tx_full() - Check if the TX ring is full
629 * @tx_ring: TX ring to check
630 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
632 * This function checks, based on the *host copy* of read/write
633 * pointer if a given TX ring is full. The real TX queue may have
634 * some newly made available slots.
636 * Return: True if the ring is full.
638 static int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
640 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
643 /* Wrappers for deciding when to stop and restart TX queues */
644 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
646 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
649 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
651 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
655 * nfp_net_tx_ring_stop() - stop tx ring
656 * @nd_q: netdev queue
657 * @tx_ring: driver tx queue structure
659 * Safely stop TX ring. Remember that while we are running .start_xmit()
660 * someone else may be cleaning the TX ring completions so we need to be
661 * extra careful here.
663 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
664 struct nfp_net_tx_ring *tx_ring)
666 netif_tx_stop_queue(nd_q);
668 /* We can race with the TX completion out of NAPI so recheck */
670 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
671 netif_tx_start_queue(nd_q);
675 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
676 * @r_vec: per-ring structure
677 * @txbuf: Pointer to driver soft TX descriptor
678 * @txd: Pointer to HW TX descriptor
679 * @skb: Pointer to SKB
681 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
682 * Return error on packet header greater than maximum supported LSO header size.
684 static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
685 struct nfp_net_tx_buf *txbuf,
686 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
691 if (!skb_is_gso(skb))
694 if (!skb->encapsulation) {
695 txd->l3_offset = skb_network_offset(skb);
696 txd->l4_offset = skb_transport_offset(skb);
697 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
699 txd->l3_offset = skb_inner_network_offset(skb);
700 txd->l4_offset = skb_inner_transport_offset(skb);
701 hdrlen = skb_inner_transport_header(skb) - skb->data +
702 inner_tcp_hdrlen(skb);
705 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
706 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
708 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
709 txd->lso_hdrlen = hdrlen;
710 txd->mss = cpu_to_le16(mss);
711 txd->flags |= PCIE_DESC_TX_LSO;
713 u64_stats_update_begin(&r_vec->tx_sync);
715 u64_stats_update_end(&r_vec->tx_sync);
719 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
720 * @dp: NFP Net data path struct
721 * @r_vec: per-ring structure
722 * @txbuf: Pointer to driver soft TX descriptor
723 * @txd: Pointer to TX descriptor
724 * @skb: Pointer to SKB
726 * This function sets the TX checksum flags in the TX descriptor based
727 * on the configuration and the protocol of the packet to be transmitted.
729 static void nfp_net_tx_csum(struct nfp_net_dp *dp,
730 struct nfp_net_r_vector *r_vec,
731 struct nfp_net_tx_buf *txbuf,
732 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
734 struct ipv6hdr *ipv6h;
738 if (!(dp->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
741 if (skb->ip_summed != CHECKSUM_PARTIAL)
744 txd->flags |= PCIE_DESC_TX_CSUM;
745 if (skb->encapsulation)
746 txd->flags |= PCIE_DESC_TX_ENCAP;
748 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
749 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
751 if (iph->version == 4) {
752 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
753 l4_hdr = iph->protocol;
754 } else if (ipv6h->version == 6) {
755 l4_hdr = ipv6h->nexthdr;
757 nn_dp_warn(dp, "partial checksum but ipv=%x!\n", iph->version);
763 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
766 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
769 nn_dp_warn(dp, "partial checksum but l4 proto=%x!\n", l4_hdr);
773 u64_stats_update_begin(&r_vec->tx_sync);
774 if (skb->encapsulation)
775 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
777 r_vec->hw_csum_tx += txbuf->pkt_cnt;
778 u64_stats_update_end(&r_vec->tx_sync);
781 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
784 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
785 tx_ring->wr_ptr_add = 0;
788 static int nfp_net_prep_port_id(struct sk_buff *skb)
790 struct metadata_dst *md_dst = skb_metadata_dst(skb);
795 if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
798 if (unlikely(skb_cow_head(skb, 8)))
801 data = skb_push(skb, 8);
802 put_unaligned_be32(NFP_NET_META_PORTID, data);
803 put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
809 * nfp_net_tx() - Main transmit entry point
810 * @skb: SKB to transmit
811 * @netdev: netdev structure
813 * Return: NETDEV_TX_OK on success.
815 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
817 struct nfp_net *nn = netdev_priv(netdev);
818 const struct skb_frag_struct *frag;
819 struct nfp_net_tx_desc *txd, txdg;
820 int f, nr_frags, wr_idx, md_bytes;
821 struct nfp_net_tx_ring *tx_ring;
822 struct nfp_net_r_vector *r_vec;
823 struct nfp_net_tx_buf *txbuf;
824 struct netdev_queue *nd_q;
825 struct nfp_net_dp *dp;
831 qidx = skb_get_queue_mapping(skb);
832 tx_ring = &dp->tx_rings[qidx];
833 r_vec = tx_ring->r_vec;
834 nd_q = netdev_get_tx_queue(dp->netdev, qidx);
836 nr_frags = skb_shinfo(skb)->nr_frags;
838 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
839 nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
840 qidx, tx_ring->wr_p, tx_ring->rd_p);
841 netif_tx_stop_queue(nd_q);
842 nfp_net_tx_xmit_more_flush(tx_ring);
843 u64_stats_update_begin(&r_vec->tx_sync);
845 u64_stats_update_end(&r_vec->tx_sync);
846 return NETDEV_TX_BUSY;
849 md_bytes = nfp_net_prep_port_id(skb);
850 if (unlikely(md_bytes < 0)) {
851 nfp_net_tx_xmit_more_flush(tx_ring);
852 dev_kfree_skb_any(skb);
856 /* Start with the head skbuf */
857 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
859 if (dma_mapping_error(dp->dev, dma_addr))
862 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
864 /* Stash the soft descriptor of the head then initialize it */
865 txbuf = &tx_ring->txbufs[wr_idx];
867 txbuf->dma_addr = dma_addr;
870 txbuf->real_len = skb->len;
872 /* Build TX descriptor */
873 txd = &tx_ring->txds[wr_idx];
874 txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
875 txd->dma_len = cpu_to_le16(skb_headlen(skb));
876 nfp_desc_set_dma_addr(txd, dma_addr);
877 txd->data_len = cpu_to_le16(skb->len);
883 /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
884 nfp_net_tx_tso(r_vec, txbuf, txd, skb);
885 nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
886 if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
887 txd->flags |= PCIE_DESC_TX_VLAN;
888 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
893 /* all descs must match except for in addr, length and eop */
896 for (f = 0; f < nr_frags; f++) {
897 frag = &skb_shinfo(skb)->frags[f];
898 fsize = skb_frag_size(frag);
900 dma_addr = skb_frag_dma_map(dp->dev, frag, 0,
901 fsize, DMA_TO_DEVICE);
902 if (dma_mapping_error(dp->dev, dma_addr))
905 wr_idx = D_IDX(tx_ring, wr_idx + 1);
906 tx_ring->txbufs[wr_idx].skb = skb;
907 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
908 tx_ring->txbufs[wr_idx].fidx = f;
910 txd = &tx_ring->txds[wr_idx];
912 txd->dma_len = cpu_to_le16(fsize);
913 nfp_desc_set_dma_addr(txd, dma_addr);
915 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
918 u64_stats_update_begin(&r_vec->tx_sync);
920 u64_stats_update_end(&r_vec->tx_sync);
923 netdev_tx_sent_queue(nd_q, txbuf->real_len);
925 skb_tx_timestamp(skb);
927 tx_ring->wr_p += nr_frags + 1;
928 if (nfp_net_tx_ring_should_stop(tx_ring))
929 nfp_net_tx_ring_stop(nd_q, tx_ring);
931 tx_ring->wr_ptr_add += nr_frags + 1;
932 if (!skb->xmit_more || netif_xmit_stopped(nd_q))
933 nfp_net_tx_xmit_more_flush(tx_ring);
939 frag = &skb_shinfo(skb)->frags[f];
940 dma_unmap_page(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
941 skb_frag_size(frag), DMA_TO_DEVICE);
942 tx_ring->txbufs[wr_idx].skb = NULL;
943 tx_ring->txbufs[wr_idx].dma_addr = 0;
944 tx_ring->txbufs[wr_idx].fidx = -2;
947 wr_idx += tx_ring->cnt;
949 dma_unmap_single(dp->dev, tx_ring->txbufs[wr_idx].dma_addr,
950 skb_headlen(skb), DMA_TO_DEVICE);
951 tx_ring->txbufs[wr_idx].skb = NULL;
952 tx_ring->txbufs[wr_idx].dma_addr = 0;
953 tx_ring->txbufs[wr_idx].fidx = -2;
955 nn_dp_warn(dp, "Failed to map DMA TX buffer\n");
956 nfp_net_tx_xmit_more_flush(tx_ring);
957 u64_stats_update_begin(&r_vec->tx_sync);
959 u64_stats_update_end(&r_vec->tx_sync);
960 dev_kfree_skb_any(skb);
965 * nfp_net_tx_complete() - Handled completed TX packets
966 * @tx_ring: TX ring structure
967 * @budget: NAPI budget (only used as bool to determine if in NAPI context)
969 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
971 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
972 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
973 const struct skb_frag_struct *frag;
974 struct netdev_queue *nd_q;
975 u32 done_pkts = 0, done_bytes = 0;
982 if (tx_ring->wr_p == tx_ring->rd_p)
985 /* Work out how many descriptors have been transmitted */
986 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
988 if (qcp_rd_p == tx_ring->qcp_rd_p)
991 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
994 idx = D_IDX(tx_ring, tx_ring->rd_p++);
996 skb = tx_ring->txbufs[idx].skb;
1000 nr_frags = skb_shinfo(skb)->nr_frags;
1001 fidx = tx_ring->txbufs[idx].fidx;
1005 dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
1006 skb_headlen(skb), DMA_TO_DEVICE);
1008 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
1009 done_bytes += tx_ring->txbufs[idx].real_len;
1011 /* unmap fragment */
1012 frag = &skb_shinfo(skb)->frags[fidx];
1013 dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
1014 skb_frag_size(frag), DMA_TO_DEVICE);
1017 /* check for last gather fragment */
1018 if (fidx == nr_frags - 1)
1019 napi_consume_skb(skb, budget);
1021 tx_ring->txbufs[idx].dma_addr = 0;
1022 tx_ring->txbufs[idx].skb = NULL;
1023 tx_ring->txbufs[idx].fidx = -2;
1026 tx_ring->qcp_rd_p = qcp_rd_p;
1028 u64_stats_update_begin(&r_vec->tx_sync);
1029 r_vec->tx_bytes += done_bytes;
1030 r_vec->tx_pkts += done_pkts;
1031 u64_stats_update_end(&r_vec->tx_sync);
1036 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1037 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
1038 if (nfp_net_tx_ring_should_wake(tx_ring)) {
1039 /* Make sure TX thread will see updated tx_ring->rd_p */
1042 if (unlikely(netif_tx_queue_stopped(nd_q)))
1043 netif_tx_wake_queue(nd_q);
1046 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1047 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1048 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1051 static bool nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
1053 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1054 u32 done_pkts = 0, done_bytes = 0;
1059 /* Work out how many descriptors have been transmitted */
1060 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
1062 if (qcp_rd_p == tx_ring->qcp_rd_p)
1065 todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
1067 done_all = todo <= NFP_NET_XDP_MAX_COMPLETE;
1068 todo = min(todo, NFP_NET_XDP_MAX_COMPLETE);
1070 tx_ring->qcp_rd_p = D_IDX(tx_ring, tx_ring->qcp_rd_p + todo);
1074 idx = D_IDX(tx_ring, tx_ring->rd_p);
1077 done_bytes += tx_ring->txbufs[idx].real_len;
1080 u64_stats_update_begin(&r_vec->tx_sync);
1081 r_vec->tx_bytes += done_bytes;
1082 r_vec->tx_pkts += done_pkts;
1083 u64_stats_update_end(&r_vec->tx_sync);
1085 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
1086 "XDP TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1087 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
1093 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1094 * @dp: NFP Net data path struct
1095 * @tx_ring: TX ring structure
1097 * Assumes that the device is stopped, must be idempotent.
1100 nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
1102 const struct skb_frag_struct *frag;
1103 struct netdev_queue *nd_q;
1105 while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
1106 struct nfp_net_tx_buf *tx_buf;
1107 struct sk_buff *skb;
1110 idx = D_IDX(tx_ring, tx_ring->rd_p);
1111 tx_buf = &tx_ring->txbufs[idx];
1113 skb = tx_ring->txbufs[idx].skb;
1114 nr_frags = skb_shinfo(skb)->nr_frags;
1116 if (tx_buf->fidx == -1) {
1118 dma_unmap_single(dp->dev, tx_buf->dma_addr,
1119 skb_headlen(skb), DMA_TO_DEVICE);
1121 /* unmap fragment */
1122 frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
1123 dma_unmap_page(dp->dev, tx_buf->dma_addr,
1124 skb_frag_size(frag), DMA_TO_DEVICE);
1127 /* check for last gather fragment */
1128 if (tx_buf->fidx == nr_frags - 1)
1129 dev_kfree_skb_any(skb);
1131 tx_buf->dma_addr = 0;
1135 tx_ring->qcp_rd_p++;
1139 memset(tx_ring->txds, 0, tx_ring->size);
1142 tx_ring->qcp_rd_p = 0;
1143 tx_ring->wr_ptr_add = 0;
1145 if (tx_ring->is_xdp || !dp->netdev)
1148 nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
1149 netdev_tx_reset_queue(nd_q);
1152 static void nfp_net_tx_timeout(struct net_device *netdev)
1154 struct nfp_net *nn = netdev_priv(netdev);
1157 for (i = 0; i < nn->dp.netdev->real_num_tx_queues; i++) {
1158 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1160 nn_warn(nn, "TX timeout on ring: %d\n", i);
1162 nn_warn(nn, "TX watchdog timeout\n");
1165 /* Receive processing
1168 nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
1170 unsigned int fl_bufsz;
1172 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
1173 fl_bufsz += dp->rx_dma_off;
1174 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1175 fl_bufsz += NFP_NET_MAX_PREPEND;
1177 fl_bufsz += dp->rx_offset;
1178 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
1180 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
1181 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1187 nfp_net_free_frag(void *frag, bool xdp)
1190 skb_free_frag(frag);
1192 __free_page(virt_to_page(frag));
1196 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1197 * @dp: NFP Net data path struct
1198 * @dma_addr: Pointer to storage for DMA address (output param)
1200 * This function will allcate a new page frag, map it for DMA.
1202 * Return: allocated page frag or NULL on failure.
1204 static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1208 if (!dp->xdp_prog) {
1209 frag = netdev_alloc_frag(dp->fl_bufsz);
1213 page = alloc_page(GFP_KERNEL);
1214 frag = page ? page_address(page) : NULL;
1217 nn_dp_warn(dp, "Failed to alloc receive page frag\n");
1221 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1222 if (dma_mapping_error(dp->dev, *dma_addr)) {
1223 nfp_net_free_frag(frag, dp->xdp_prog);
1224 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1231 static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
1235 if (!dp->xdp_prog) {
1236 frag = napi_alloc_frag(dp->fl_bufsz);
1237 if (unlikely(!frag))
1242 page = dev_alloc_page();
1243 if (unlikely(!page))
1245 frag = page_address(page);
1248 *dma_addr = nfp_net_dma_map_rx(dp, frag);
1249 if (dma_mapping_error(dp->dev, *dma_addr)) {
1250 nfp_net_free_frag(frag, dp->xdp_prog);
1251 nn_dp_warn(dp, "Failed to map DMA RX buffer\n");
1259 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1260 * @dp: NFP Net data path struct
1261 * @rx_ring: RX ring structure
1262 * @frag: page fragment buffer
1263 * @dma_addr: DMA address of skb mapping
1265 static void nfp_net_rx_give_one(const struct nfp_net_dp *dp,
1266 struct nfp_net_rx_ring *rx_ring,
1267 void *frag, dma_addr_t dma_addr)
1269 unsigned int wr_idx;
1271 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1273 nfp_net_dma_sync_dev_rx(dp, dma_addr);
1275 /* Stash SKB and DMA address away */
1276 rx_ring->rxbufs[wr_idx].frag = frag;
1277 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1279 /* Fill freelist descriptor */
1280 rx_ring->rxds[wr_idx].fld.reserved = 0;
1281 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1282 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld,
1283 dma_addr + dp->rx_dma_off);
1286 if (!(rx_ring->wr_p % NFP_NET_FL_BATCH)) {
1287 /* Update write pointer of the freelist queue. Make
1288 * sure all writes are flushed before telling the hardware.
1291 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, NFP_NET_FL_BATCH);
1296 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1297 * @rx_ring: RX ring structure
1299 * Assumes that the device is stopped, must be idempotent.
1301 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1303 unsigned int wr_idx, last_idx;
1305 /* wr_p == rd_p means ring was never fed FL bufs. RX rings are always
1306 * kept at cnt - 1 FL bufs.
1308 if (rx_ring->wr_p == 0 && rx_ring->rd_p == 0)
1311 /* Move the empty entry to the end of the list */
1312 wr_idx = D_IDX(rx_ring, rx_ring->wr_p);
1313 last_idx = rx_ring->cnt - 1;
1314 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1315 rx_ring->rxbufs[wr_idx].frag = rx_ring->rxbufs[last_idx].frag;
1316 rx_ring->rxbufs[last_idx].dma_addr = 0;
1317 rx_ring->rxbufs[last_idx].frag = NULL;
1319 memset(rx_ring->rxds, 0, rx_ring->size);
1325 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1326 * @dp: NFP Net data path struct
1327 * @rx_ring: RX ring to remove buffers from
1329 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1330 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1331 * to restore required ring geometry.
1334 nfp_net_rx_ring_bufs_free(struct nfp_net_dp *dp,
1335 struct nfp_net_rx_ring *rx_ring)
1339 for (i = 0; i < rx_ring->cnt - 1; i++) {
1340 /* NULL skb can only happen when initial filling of the ring
1341 * fails to allocate enough buffers and calls here to free
1342 * already allocated ones.
1344 if (!rx_ring->rxbufs[i].frag)
1347 nfp_net_dma_unmap_rx(dp, rx_ring->rxbufs[i].dma_addr);
1348 nfp_net_free_frag(rx_ring->rxbufs[i].frag, dp->xdp_prog);
1349 rx_ring->rxbufs[i].dma_addr = 0;
1350 rx_ring->rxbufs[i].frag = NULL;
1355 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1356 * @dp: NFP Net data path struct
1357 * @rx_ring: RX ring to remove buffers from
1360 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp *dp,
1361 struct nfp_net_rx_ring *rx_ring)
1363 struct nfp_net_rx_buf *rxbufs;
1366 rxbufs = rx_ring->rxbufs;
1368 for (i = 0; i < rx_ring->cnt - 1; i++) {
1369 rxbufs[i].frag = nfp_net_rx_alloc_one(dp, &rxbufs[i].dma_addr);
1370 if (!rxbufs[i].frag) {
1371 nfp_net_rx_ring_bufs_free(dp, rx_ring);
1380 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1381 * @dp: NFP Net data path struct
1382 * @rx_ring: RX ring to fill
1385 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
1386 struct nfp_net_rx_ring *rx_ring)
1390 for (i = 0; i < rx_ring->cnt - 1; i++)
1391 nfp_net_rx_give_one(dp, rx_ring, rx_ring->rxbufs[i].frag,
1392 rx_ring->rxbufs[i].dma_addr);
1396 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1397 * @flags: RX descriptor flags field in CPU byte order
1399 static int nfp_net_rx_csum_has_errors(u16 flags)
1401 u16 csum_all_checked, csum_all_ok;
1403 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1404 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1406 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1410 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1411 * @dp: NFP Net data path struct
1412 * @r_vec: per-ring structure
1413 * @rxd: Pointer to RX descriptor
1414 * @meta: Parsed metadata prepend
1415 * @skb: Pointer to SKB
1417 static void nfp_net_rx_csum(struct nfp_net_dp *dp,
1418 struct nfp_net_r_vector *r_vec,
1419 struct nfp_net_rx_desc *rxd,
1420 struct nfp_meta_parsed *meta, struct sk_buff *skb)
1422 skb_checksum_none_assert(skb);
1424 if (!(dp->netdev->features & NETIF_F_RXCSUM))
1427 if (meta->csum_type) {
1428 skb->ip_summed = meta->csum_type;
1429 skb->csum = meta->csum;
1430 u64_stats_update_begin(&r_vec->rx_sync);
1431 r_vec->hw_csum_rx_complete++;
1432 u64_stats_update_end(&r_vec->rx_sync);
1436 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1437 u64_stats_update_begin(&r_vec->rx_sync);
1438 r_vec->hw_csum_rx_error++;
1439 u64_stats_update_end(&r_vec->rx_sync);
1443 /* Assume that the firmware will never report inner CSUM_OK unless outer
1444 * L4 headers were successfully parsed. FW will always report zero UDP
1445 * checksum as CSUM_OK.
1447 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1448 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1449 __skb_incr_checksum_unnecessary(skb);
1450 u64_stats_update_begin(&r_vec->rx_sync);
1451 r_vec->hw_csum_rx_ok++;
1452 u64_stats_update_end(&r_vec->rx_sync);
1455 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1456 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1457 __skb_incr_checksum_unnecessary(skb);
1458 u64_stats_update_begin(&r_vec->rx_sync);
1459 r_vec->hw_csum_rx_inner_ok++;
1460 u64_stats_update_end(&r_vec->rx_sync);
1465 nfp_net_set_hash(struct net_device *netdev, struct nfp_meta_parsed *meta,
1466 unsigned int type, __be32 *hash)
1468 if (!(netdev->features & NETIF_F_RXHASH))
1472 case NFP_NET_RSS_IPV4:
1473 case NFP_NET_RSS_IPV6:
1474 case NFP_NET_RSS_IPV6_EX:
1475 meta->hash_type = PKT_HASH_TYPE_L3;
1478 meta->hash_type = PKT_HASH_TYPE_L4;
1482 meta->hash = get_unaligned_be32(hash);
1486 nfp_net_set_hash_desc(struct net_device *netdev, struct nfp_meta_parsed *meta,
1487 void *data, struct nfp_net_rx_desc *rxd)
1489 struct nfp_net_rx_hash *rx_hash = data;
1491 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1494 nfp_net_set_hash(netdev, meta, get_unaligned_be32(&rx_hash->hash_type),
1499 nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
1500 void *data, int meta_len)
1504 meta_info = get_unaligned_be32(data);
1508 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1509 case NFP_NET_META_HASH:
1510 meta_info >>= NFP_NET_META_FIELD_SIZE;
1511 nfp_net_set_hash(netdev, meta,
1512 meta_info & NFP_NET_META_FIELD_MASK,
1516 case NFP_NET_META_MARK:
1517 meta->mark = get_unaligned_be32(data);
1520 case NFP_NET_META_PORTID:
1521 meta->portid = get_unaligned_be32(data);
1524 case NFP_NET_META_CSUM:
1525 meta->csum_type = CHECKSUM_COMPLETE;
1527 (__force __wsum)__get_unaligned_cpu32(data);
1534 meta_info >>= NFP_NET_META_FIELD_SIZE;
1541 nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
1542 struct nfp_net_rx_ring *rx_ring, struct nfp_net_rx_buf *rxbuf,
1543 struct sk_buff *skb)
1545 u64_stats_update_begin(&r_vec->rx_sync);
1547 /* If we have both skb and rxbuf the replacement buffer allocation
1548 * must have failed, count this as an alloc failure.
1551 r_vec->rx_replace_buf_alloc_fail++;
1552 u64_stats_update_end(&r_vec->rx_sync);
1554 /* skb is build based on the frag, free_skb() would free the frag
1555 * so to be able to reuse it we need an extra ref.
1557 if (skb && rxbuf && skb->head == rxbuf->frag)
1558 page_ref_inc(virt_to_head_page(rxbuf->frag));
1560 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag, rxbuf->dma_addr);
1562 dev_kfree_skb_any(skb);
1566 nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,
1567 struct nfp_net_tx_ring *tx_ring,
1568 struct nfp_net_rx_buf *rxbuf, unsigned int dma_off,
1569 unsigned int pkt_len, bool *completed)
1571 struct nfp_net_tx_buf *txbuf;
1572 struct nfp_net_tx_desc *txd;
1575 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1577 nfp_net_xdp_complete(tx_ring);
1581 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1582 nfp_net_rx_drop(dp, rx_ring->r_vec, rx_ring, rxbuf,
1588 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1590 /* Stash the soft descriptor of the head then initialize it */
1591 txbuf = &tx_ring->txbufs[wr_idx];
1593 nfp_net_rx_give_one(dp, rx_ring, txbuf->frag, txbuf->dma_addr);
1595 txbuf->frag = rxbuf->frag;
1596 txbuf->dma_addr = rxbuf->dma_addr;
1599 txbuf->real_len = pkt_len;
1601 dma_sync_single_for_device(dp->dev, rxbuf->dma_addr + dma_off,
1602 pkt_len, DMA_BIDIRECTIONAL);
1604 /* Build TX descriptor */
1605 txd = &tx_ring->txds[wr_idx];
1606 txd->offset_eop = PCIE_DESC_TX_EOP;
1607 txd->dma_len = cpu_to_le16(pkt_len);
1608 nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + dma_off);
1609 txd->data_len = cpu_to_le16(pkt_len);
1613 txd->lso_hdrlen = 0;
1616 tx_ring->wr_ptr_add++;
1621 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1622 * @rx_ring: RX ring to receive from
1623 * @budget: NAPI budget
1625 * Note, this function is separated out from the napi poll function to
1626 * more cleanly separate packet receive code from other bookkeeping
1627 * functions performed in the napi poll function.
1629 * Return: Number of packets received.
1631 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1633 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1634 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
1635 struct nfp_net_tx_ring *tx_ring;
1636 struct bpf_prog *xdp_prog;
1637 bool xdp_tx_cmpl = false;
1638 unsigned int true_bufsz;
1639 struct sk_buff *skb;
1640 int pkts_polled = 0;
1641 struct xdp_buff xdp;
1645 xdp_prog = READ_ONCE(dp->xdp_prog);
1646 true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
1647 xdp.rxq = &rx_ring->xdp_rxq;
1648 tx_ring = r_vec->xdp_ring;
1650 while (pkts_polled < budget) {
1651 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
1652 struct nfp_net_rx_buf *rxbuf;
1653 struct nfp_net_rx_desc *rxd;
1654 struct nfp_meta_parsed meta;
1655 struct net_device *netdev;
1656 dma_addr_t new_dma_addr;
1657 u32 meta_len_xdp = 0;
1660 idx = D_IDX(rx_ring, rx_ring->rd_p);
1662 rxd = &rx_ring->rxds[idx];
1663 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
1666 /* Memory barrier to ensure that we won't do other reads
1667 * before the DD bit.
1671 memset(&meta, 0, sizeof(meta));
1676 rxbuf = &rx_ring->rxbufs[idx];
1678 * <-- [rx_offset] -->
1679 * ---------------------------------------------------------
1680 * | [XX] | metadata | packet | XXXX |
1681 * ---------------------------------------------------------
1682 * <---------------- data_len --------------->
1684 * The rx_offset is fixed for all packets, the meta_len can vary
1685 * on a packet by packet basis. If rx_offset is set to zero
1686 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1687 * buffer and is immediately followed by the packet (no [XX]).
1689 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1690 data_len = le16_to_cpu(rxd->rxd.data_len);
1691 pkt_len = data_len - meta_len;
1693 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
1694 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1695 pkt_off += meta_len;
1697 pkt_off += dp->rx_offset;
1698 meta_off = pkt_off - meta_len;
1701 u64_stats_update_begin(&r_vec->rx_sync);
1703 r_vec->rx_bytes += pkt_len;
1704 u64_stats_update_end(&r_vec->rx_sync);
1706 if (unlikely(meta_len > NFP_NET_MAX_PREPEND ||
1707 (dp->rx_offset && meta_len > dp->rx_offset))) {
1708 nn_dp_warn(dp, "oversized RX packet metadata %u\n",
1710 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1714 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off,
1717 if (!dp->chained_metadata_format) {
1718 nfp_net_set_hash_desc(dp->netdev, &meta,
1719 rxbuf->frag + meta_off, rxd);
1720 } else if (meta_len) {
1723 end = nfp_net_parse_meta(dp->netdev, &meta,
1724 rxbuf->frag + meta_off,
1726 if (unlikely(end != rxbuf->frag + pkt_off)) {
1727 nn_dp_warn(dp, "invalid RX packet metadata\n");
1728 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1734 if (xdp_prog && !meta.portid) {
1735 void *orig_data = rxbuf->frag + pkt_off;
1736 unsigned int dma_off;
1739 xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
1740 xdp.data = orig_data;
1741 xdp.data_meta = orig_data;
1742 xdp.data_end = orig_data + pkt_len;
1744 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1746 pkt_len = xdp.data_end - xdp.data;
1747 pkt_off += xdp.data - orig_data;
1751 meta_len_xdp = xdp.data - xdp.data_meta;
1754 dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM;
1755 if (unlikely(!nfp_net_tx_xdp_buf(dp, rx_ring,
1760 trace_xdp_exception(dp->netdev,
1764 bpf_warn_invalid_xdp_action(act);
1767 trace_xdp_exception(dp->netdev, xdp_prog, act);
1770 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1776 if (likely(!meta.portid)) {
1777 netdev = dp->netdev;
1778 } else if (meta.portid == NFP_META_PORT_ID_CTRL) {
1779 struct nfp_net *nn = netdev_priv(dp->netdev);
1781 nfp_app_ctrl_rx_raw(nn->app, rxbuf->frag + pkt_off,
1783 nfp_net_rx_give_one(dp, rx_ring, rxbuf->frag,
1789 nn = netdev_priv(dp->netdev);
1790 netdev = nfp_app_repr_get(nn->app, meta.portid);
1791 if (unlikely(!netdev)) {
1792 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
1796 nfp_repr_inc_rx_stats(netdev, pkt_len);
1799 skb = build_skb(rxbuf->frag, true_bufsz);
1800 if (unlikely(!skb)) {
1801 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
1804 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
1805 if (unlikely(!new_frag)) {
1806 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
1810 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
1812 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
1814 skb_reserve(skb, pkt_off);
1815 skb_put(skb, pkt_len);
1817 skb->mark = meta.mark;
1818 skb_set_hash(skb, meta.hash, meta.hash_type);
1820 skb_record_rx_queue(skb, rx_ring->idx);
1821 skb->protocol = eth_type_trans(skb, netdev);
1823 nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
1825 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1826 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1827 le16_to_cpu(rxd->rxd.vlan));
1829 skb_metadata_set(skb, meta_len_xdp);
1831 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1835 if (tx_ring->wr_ptr_add)
1836 nfp_net_tx_xmit_more_flush(tx_ring);
1837 else if (unlikely(tx_ring->wr_p != tx_ring->rd_p) &&
1839 if (!nfp_net_xdp_complete(tx_ring))
1840 pkts_polled = budget;
1848 * nfp_net_poll() - napi poll function
1849 * @napi: NAPI structure
1850 * @budget: NAPI budget
1852 * Return: number of packets polled.
1854 static int nfp_net_poll(struct napi_struct *napi, int budget)
1856 struct nfp_net_r_vector *r_vec =
1857 container_of(napi, struct nfp_net_r_vector, napi);
1858 unsigned int pkts_polled = 0;
1861 nfp_net_tx_complete(r_vec->tx_ring, budget);
1863 pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
1865 if (pkts_polled < budget)
1866 if (napi_complete_done(napi, pkts_polled))
1867 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
1872 /* Control device data path
1876 nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1877 struct sk_buff *skb, bool old)
1879 unsigned int real_len = skb->len, meta_len = 0;
1880 struct nfp_net_tx_ring *tx_ring;
1881 struct nfp_net_tx_buf *txbuf;
1882 struct nfp_net_tx_desc *txd;
1883 struct nfp_net_dp *dp;
1884 dma_addr_t dma_addr;
1887 dp = &r_vec->nfp_net->dp;
1888 tx_ring = r_vec->tx_ring;
1890 if (WARN_ON_ONCE(skb_shinfo(skb)->nr_frags)) {
1891 nn_dp_warn(dp, "Driver's CTRL TX does not implement gather\n");
1895 if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
1896 u64_stats_update_begin(&r_vec->tx_sync);
1898 u64_stats_update_end(&r_vec->tx_sync);
1900 __skb_queue_tail(&r_vec->queue, skb);
1902 __skb_queue_head(&r_vec->queue, skb);
1906 if (nfp_app_ctrl_has_meta(nn->app)) {
1907 if (unlikely(skb_headroom(skb) < 8)) {
1908 nn_dp_warn(dp, "CTRL TX on skb without headroom\n");
1912 put_unaligned_be32(NFP_META_PORT_ID_CTRL, skb_push(skb, 4));
1913 put_unaligned_be32(NFP_NET_META_PORTID, skb_push(skb, 4));
1916 /* Start with the head skbuf */
1917 dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
1919 if (dma_mapping_error(dp->dev, dma_addr))
1922 wr_idx = D_IDX(tx_ring, tx_ring->wr_p);
1924 /* Stash the soft descriptor of the head then initialize it */
1925 txbuf = &tx_ring->txbufs[wr_idx];
1927 txbuf->dma_addr = dma_addr;
1930 txbuf->real_len = real_len;
1932 /* Build TX descriptor */
1933 txd = &tx_ring->txds[wr_idx];
1934 txd->offset_eop = meta_len | PCIE_DESC_TX_EOP;
1935 txd->dma_len = cpu_to_le16(skb_headlen(skb));
1936 nfp_desc_set_dma_addr(txd, dma_addr);
1937 txd->data_len = cpu_to_le16(skb->len);
1941 txd->lso_hdrlen = 0;
1944 tx_ring->wr_ptr_add++;
1945 nfp_net_tx_xmit_more_flush(tx_ring);
1950 nn_dp_warn(dp, "Failed to DMA map TX CTRL buffer\n");
1952 u64_stats_update_begin(&r_vec->tx_sync);
1954 u64_stats_update_end(&r_vec->tx_sync);
1955 dev_kfree_skb_any(skb);
1959 bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
1961 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
1963 return nfp_ctrl_tx_one(nn, r_vec, skb, false);
1966 bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
1968 struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
1971 spin_lock_bh(&r_vec->lock);
1972 ret = nfp_ctrl_tx_one(nn, r_vec, skb, false);
1973 spin_unlock_bh(&r_vec->lock);
1978 static void __nfp_ctrl_tx_queued(struct nfp_net_r_vector *r_vec)
1980 struct sk_buff *skb;
1982 while ((skb = __skb_dequeue(&r_vec->queue)))
1983 if (nfp_ctrl_tx_one(r_vec->nfp_net, r_vec, skb, true))
1988 nfp_ctrl_meta_ok(struct nfp_net *nn, void *data, unsigned int meta_len)
1990 u32 meta_type, meta_tag;
1992 if (!nfp_app_ctrl_has_meta(nn->app))
1998 meta_type = get_unaligned_be32(data);
1999 meta_tag = get_unaligned_be32(data + 4);
2001 return (meta_type == NFP_NET_META_PORTID &&
2002 meta_tag == NFP_META_PORT_ID_CTRL);
2006 nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
2007 struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring)
2009 unsigned int meta_len, data_len, meta_off, pkt_len, pkt_off;
2010 struct nfp_net_rx_buf *rxbuf;
2011 struct nfp_net_rx_desc *rxd;
2012 dma_addr_t new_dma_addr;
2013 struct sk_buff *skb;
2017 idx = D_IDX(rx_ring, rx_ring->rd_p);
2019 rxd = &rx_ring->rxds[idx];
2020 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD))
2023 /* Memory barrier to ensure that we won't do other reads
2024 * before the DD bit.
2030 rxbuf = &rx_ring->rxbufs[idx];
2031 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
2032 data_len = le16_to_cpu(rxd->rxd.data_len);
2033 pkt_len = data_len - meta_len;
2035 pkt_off = NFP_NET_RX_BUF_HEADROOM + dp->rx_dma_off;
2036 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
2037 pkt_off += meta_len;
2039 pkt_off += dp->rx_offset;
2040 meta_off = pkt_off - meta_len;
2043 u64_stats_update_begin(&r_vec->rx_sync);
2045 r_vec->rx_bytes += pkt_len;
2046 u64_stats_update_end(&r_vec->rx_sync);
2048 nfp_net_dma_sync_cpu_rx(dp, rxbuf->dma_addr + meta_off, data_len);
2050 if (unlikely(!nfp_ctrl_meta_ok(nn, rxbuf->frag + meta_off, meta_len))) {
2051 nn_dp_warn(dp, "incorrect metadata for ctrl packet (%d)\n",
2053 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2057 skb = build_skb(rxbuf->frag, dp->fl_bufsz);
2058 if (unlikely(!skb)) {
2059 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
2062 new_frag = nfp_net_napi_alloc_one(dp, &new_dma_addr);
2063 if (unlikely(!new_frag)) {
2064 nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
2068 nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
2070 nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
2072 skb_reserve(skb, pkt_off);
2073 skb_put(skb, pkt_len);
2075 nfp_app_ctrl_rx(nn->app, skb);
2080 static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
2082 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
2083 struct nfp_net *nn = r_vec->nfp_net;
2084 struct nfp_net_dp *dp = &nn->dp;
2085 unsigned int budget = 512;
2087 while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
2093 static void nfp_ctrl_poll(unsigned long arg)
2095 struct nfp_net_r_vector *r_vec = (void *)arg;
2097 spin_lock(&r_vec->lock);
2098 nfp_net_tx_complete(r_vec->tx_ring, 0);
2099 __nfp_ctrl_tx_queued(r_vec);
2100 spin_unlock(&r_vec->lock);
2102 if (nfp_ctrl_rx(r_vec)) {
2103 nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
2105 tasklet_schedule(&r_vec->tasklet);
2106 nn_dp_warn(&r_vec->nfp_net->dp,
2107 "control message budget exceeded!\n");
2111 /* Setup and Configuration
2115 * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
2116 * @nn: NFP Network structure
2118 static void nfp_net_vecs_init(struct nfp_net *nn)
2120 struct nfp_net_r_vector *r_vec;
2123 nn->lsc_handler = nfp_net_irq_lsc;
2124 nn->exn_handler = nfp_net_irq_exn;
2126 for (r = 0; r < nn->max_r_vecs; r++) {
2127 struct msix_entry *entry;
2129 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
2131 r_vec = &nn->r_vecs[r];
2132 r_vec->nfp_net = nn;
2133 r_vec->irq_entry = entry->entry;
2134 r_vec->irq_vector = entry->vector;
2136 if (nn->dp.netdev) {
2137 r_vec->handler = nfp_net_irq_rxtx;
2139 r_vec->handler = nfp_ctrl_irq_rxtx;
2141 __skb_queue_head_init(&r_vec->queue);
2142 spin_lock_init(&r_vec->lock);
2143 tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
2144 (unsigned long)r_vec);
2145 tasklet_disable(&r_vec->tasklet);
2148 cpumask_set_cpu(r, &r_vec->affinity_mask);
2153 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
2154 * @tx_ring: TX ring to free
2156 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
2158 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2159 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2161 kvfree(tx_ring->txbufs);
2164 dma_free_coherent(dp->dev, tx_ring->size,
2165 tx_ring->txds, tx_ring->dma);
2168 tx_ring->txbufs = NULL;
2169 tx_ring->txds = NULL;
2175 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
2176 * @dp: NFP Net data path struct
2177 * @tx_ring: TX Ring structure to allocate
2179 * Return: 0 on success, negative errno otherwise.
2182 nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
2184 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
2186 tx_ring->cnt = dp->txd_cnt;
2188 tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
2189 tx_ring->txds = dma_zalloc_coherent(dp->dev, tx_ring->size,
2191 GFP_KERNEL | __GFP_NOWARN);
2192 if (!tx_ring->txds) {
2193 netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2198 tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
2200 if (!tx_ring->txbufs)
2203 if (!tx_ring->is_xdp && dp->netdev)
2204 netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
2210 nfp_net_tx_ring_free(tx_ring);
2215 nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
2216 struct nfp_net_tx_ring *tx_ring)
2220 if (!tx_ring->is_xdp)
2223 for (i = 0; i < tx_ring->cnt; i++) {
2224 if (!tx_ring->txbufs[i].frag)
2227 nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
2228 __free_page(virt_to_page(tx_ring->txbufs[i].frag));
2233 nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
2234 struct nfp_net_tx_ring *tx_ring)
2236 struct nfp_net_tx_buf *txbufs = tx_ring->txbufs;
2239 if (!tx_ring->is_xdp)
2242 for (i = 0; i < tx_ring->cnt; i++) {
2243 txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
2244 if (!txbufs[i].frag) {
2245 nfp_net_tx_ring_bufs_free(dp, tx_ring);
2253 static int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2257 dp->tx_rings = kcalloc(dp->num_tx_rings, sizeof(*dp->tx_rings),
2262 for (r = 0; r < dp->num_tx_rings; r++) {
2265 if (r >= dp->num_stack_tx_rings)
2266 bias = dp->num_stack_tx_rings;
2268 nfp_net_tx_ring_init(&dp->tx_rings[r], &nn->r_vecs[r - bias],
2271 if (nfp_net_tx_ring_alloc(dp, &dp->tx_rings[r]))
2274 if (nfp_net_tx_ring_bufs_alloc(dp, &dp->tx_rings[r]))
2282 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2284 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2286 kfree(dp->tx_rings);
2290 static void nfp_net_tx_rings_free(struct nfp_net_dp *dp)
2294 for (r = 0; r < dp->num_tx_rings; r++) {
2295 nfp_net_tx_ring_bufs_free(dp, &dp->tx_rings[r]);
2296 nfp_net_tx_ring_free(&dp->tx_rings[r]);
2299 kfree(dp->tx_rings);
2303 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
2304 * @rx_ring: RX ring to free
2306 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
2308 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
2309 struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
2312 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
2313 kvfree(rx_ring->rxbufs);
2316 dma_free_coherent(dp->dev, rx_ring->size,
2317 rx_ring->rxds, rx_ring->dma);
2320 rx_ring->rxbufs = NULL;
2321 rx_ring->rxds = NULL;
2327 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
2328 * @dp: NFP Net data path struct
2329 * @rx_ring: RX ring to allocate
2331 * Return: 0 on success, negative errno otherwise.
2334 nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
2339 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
2345 rx_ring->cnt = dp->rxd_cnt;
2346 rx_ring->size = array_size(rx_ring->cnt, sizeof(*rx_ring->rxds));
2347 rx_ring->rxds = dma_zalloc_coherent(dp->dev, rx_ring->size,
2349 GFP_KERNEL | __GFP_NOWARN);
2350 if (!rx_ring->rxds) {
2351 netdev_warn(dp->netdev, "failed to allocate RX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
2356 rx_ring->rxbufs = kvcalloc(rx_ring->cnt, sizeof(*rx_ring->rxbufs),
2358 if (!rx_ring->rxbufs)
2364 nfp_net_rx_ring_free(rx_ring);
2368 static int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp)
2372 dp->rx_rings = kcalloc(dp->num_rx_rings, sizeof(*dp->rx_rings),
2377 for (r = 0; r < dp->num_rx_rings; r++) {
2378 nfp_net_rx_ring_init(&dp->rx_rings[r], &nn->r_vecs[r], r);
2380 if (nfp_net_rx_ring_alloc(dp, &dp->rx_rings[r]))
2383 if (nfp_net_rx_ring_bufs_alloc(dp, &dp->rx_rings[r]))
2391 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2393 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2395 kfree(dp->rx_rings);
2399 static void nfp_net_rx_rings_free(struct nfp_net_dp *dp)
2403 for (r = 0; r < dp->num_rx_rings; r++) {
2404 nfp_net_rx_ring_bufs_free(dp, &dp->rx_rings[r]);
2405 nfp_net_rx_ring_free(&dp->rx_rings[r]);
2408 kfree(dp->rx_rings);
2412 nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
2413 struct nfp_net_r_vector *r_vec, int idx)
2415 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
2417 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
2419 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
2420 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
2424 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
2431 netif_napi_add(nn->dp.netdev, &r_vec->napi,
2432 nfp_net_poll, NAPI_POLL_WEIGHT);
2434 tasklet_enable(&r_vec->tasklet);
2436 snprintf(r_vec->name, sizeof(r_vec->name),
2437 "%s-rxtx-%d", nfp_net_name(nn), idx);
2438 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
2442 netif_napi_del(&r_vec->napi);
2444 tasklet_disable(&r_vec->tasklet);
2446 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
2449 disable_irq(r_vec->irq_vector);
2451 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
2453 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
2460 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
2462 irq_set_affinity_hint(r_vec->irq_vector, NULL);
2464 netif_napi_del(&r_vec->napi);
2466 tasklet_disable(&r_vec->tasklet);
2468 free_irq(r_vec->irq_vector, r_vec);
2472 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
2473 * @nn: NFP Net device to reconfigure
2475 void nfp_net_rss_write_itbl(struct nfp_net *nn)
2479 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
2480 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
2481 get_unaligned_le32(nn->rss_itbl + i));
2485 * nfp_net_rss_write_key() - Write RSS hash key to device
2486 * @nn: NFP Net device to reconfigure
2488 void nfp_net_rss_write_key(struct nfp_net *nn)
2492 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
2493 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
2494 get_unaligned_le32(nn->rss_key + i));
2498 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
2499 * @nn: NFP Net device to reconfigure
2501 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
2507 /* Compute factor used to convert coalesce '_usecs' parameters to
2508 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2511 factor = nn->tlv_caps.me_freq_mhz / 16;
2513 /* copy RX interrupt coalesce parameters */
2514 value = (nn->rx_coalesce_max_frames << 16) |
2515 (factor * nn->rx_coalesce_usecs);
2516 for (i = 0; i < nn->dp.num_rx_rings; i++)
2517 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
2519 /* copy TX interrupt coalesce parameters */
2520 value = (nn->tx_coalesce_max_frames << 16) |
2521 (factor * nn->tx_coalesce_usecs);
2522 for (i = 0; i < nn->dp.num_tx_rings; i++)
2523 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
2527 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
2528 * @nn: NFP Net device to reconfigure
2529 * @addr: MAC address to write
2531 * Writes the MAC address from the netdev to the device control BAR. Does not
2532 * perform the required reconfig. We do a bit of byte swapping dance because
2535 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
2537 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
2538 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
2541 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
2543 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
2544 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
2545 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
2547 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
2548 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
2549 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
2553 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
2554 * @nn: NFP Net device to reconfigure
2556 * Warning: must be fully idempotent.
2558 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
2560 u32 new_ctrl, update;
2564 new_ctrl = nn->dp.ctrl;
2565 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
2566 update = NFP_NET_CFG_UPDATE_GEN;
2567 update |= NFP_NET_CFG_UPDATE_MSIX;
2568 update |= NFP_NET_CFG_UPDATE_RING;
2570 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2571 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
2573 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2574 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2576 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2577 err = nfp_net_reconfig(nn, update);
2579 nn_err(nn, "Could not disable device: %d\n", err);
2581 for (r = 0; r < nn->dp.num_rx_rings; r++)
2582 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
2583 for (r = 0; r < nn->dp.num_tx_rings; r++)
2584 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
2585 for (r = 0; r < nn->dp.num_r_vecs; r++)
2586 nfp_net_vec_clear_ring_data(nn, r);
2588 nn->dp.ctrl = new_ctrl;
2592 nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
2593 struct nfp_net_rx_ring *rx_ring, unsigned int idx)
2595 /* Write the DMA address, size and MSI-X info to the device */
2596 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
2597 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
2598 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
2602 nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
2603 struct nfp_net_tx_ring *tx_ring, unsigned int idx)
2605 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
2606 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
2607 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
2611 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2612 * @nn: NFP Net device to reconfigure
2614 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2616 u32 bufsz, new_ctrl, update = 0;
2620 new_ctrl = nn->dp.ctrl;
2622 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
2623 nfp_net_rss_write_key(nn);
2624 nfp_net_rss_write_itbl(nn);
2625 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
2626 update |= NFP_NET_CFG_UPDATE_RSS;
2629 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
2630 nfp_net_coalesce_write_cfg(nn);
2631 update |= NFP_NET_CFG_UPDATE_IRQMOD;
2634 for (r = 0; r < nn->dp.num_tx_rings; r++)
2635 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
2636 for (r = 0; r < nn->dp.num_rx_rings; r++)
2637 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
2639 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->dp.num_tx_rings == 64 ?
2640 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_tx_rings) - 1);
2642 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->dp.num_rx_rings == 64 ?
2643 0xffffffffffffffffULL : ((u64)1 << nn->dp.num_rx_rings) - 1);
2646 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2648 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
2650 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
2651 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
2654 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2655 update |= NFP_NET_CFG_UPDATE_GEN;
2656 update |= NFP_NET_CFG_UPDATE_MSIX;
2657 update |= NFP_NET_CFG_UPDATE_RING;
2658 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2659 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2661 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2662 err = nfp_net_reconfig(nn, update);
2664 nfp_net_clear_config_and_disable(nn);
2668 nn->dp.ctrl = new_ctrl;
2670 for (r = 0; r < nn->dp.num_rx_rings; r++)
2671 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
2673 /* Since reconfiguration requests while NFP is down are ignored we
2674 * have to wipe the entire VXLAN configuration and reinitialize it.
2676 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2677 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2678 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2679 udp_tunnel_get_rx_info(nn->dp.netdev);
2686 * nfp_net_close_stack() - Quiesce the stack (part of close)
2687 * @nn: NFP Net device to reconfigure
2689 static void nfp_net_close_stack(struct nfp_net *nn)
2693 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2694 netif_carrier_off(nn->dp.netdev);
2695 nn->link_up = false;
2697 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2698 disable_irq(nn->r_vecs[r].irq_vector);
2699 napi_disable(&nn->r_vecs[r].napi);
2702 netif_tx_disable(nn->dp.netdev);
2706 * nfp_net_close_free_all() - Free all runtime resources
2707 * @nn: NFP Net device to reconfigure
2709 static void nfp_net_close_free_all(struct nfp_net *nn)
2713 nfp_net_tx_rings_free(&nn->dp);
2714 nfp_net_rx_rings_free(&nn->dp);
2716 for (r = 0; r < nn->dp.num_r_vecs; r++)
2717 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2719 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2720 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2724 * nfp_net_netdev_close() - Called when the device is downed
2725 * @netdev: netdev structure
2727 static int nfp_net_netdev_close(struct net_device *netdev)
2729 struct nfp_net *nn = netdev_priv(netdev);
2731 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2733 nfp_net_close_stack(nn);
2737 nfp_net_clear_config_and_disable(nn);
2738 nfp_port_configure(netdev, false);
2740 /* Step 3: Free resources
2742 nfp_net_close_free_all(nn);
2744 nn_dbg(nn, "%s down", netdev->name);
2748 void nfp_ctrl_close(struct nfp_net *nn)
2754 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2755 disable_irq(nn->r_vecs[r].irq_vector);
2756 tasklet_disable(&nn->r_vecs[r].tasklet);
2759 nfp_net_clear_config_and_disable(nn);
2761 nfp_net_close_free_all(nn);
2767 * nfp_net_open_stack() - Start the device from stack's perspective
2768 * @nn: NFP Net device to reconfigure
2770 static void nfp_net_open_stack(struct nfp_net *nn)
2774 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2775 napi_enable(&nn->r_vecs[r].napi);
2776 enable_irq(nn->r_vecs[r].irq_vector);
2779 netif_tx_wake_all_queues(nn->dp.netdev);
2781 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2782 nfp_net_read_link_status(nn);
2785 static int nfp_net_open_alloc_all(struct nfp_net *nn)
2789 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2790 nn->exn_name, sizeof(nn->exn_name),
2791 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2794 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2795 nn->lsc_name, sizeof(nn->lsc_name),
2796 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2799 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2801 for (r = 0; r < nn->dp.num_r_vecs; r++) {
2802 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2804 goto err_cleanup_vec_p;
2807 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
2809 goto err_cleanup_vec;
2811 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
2813 goto err_free_rx_rings;
2815 for (r = 0; r < nn->max_r_vecs; r++)
2816 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2821 nfp_net_rx_rings_free(&nn->dp);
2823 r = nn->dp.num_r_vecs;
2826 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2827 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2829 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2833 static int nfp_net_netdev_open(struct net_device *netdev)
2835 struct nfp_net *nn = netdev_priv(netdev);
2838 /* Step 1: Allocate resources for rings and the like
2839 * - Request interrupts
2840 * - Allocate RX and TX ring resources
2841 * - Setup initial RSS table
2843 err = nfp_net_open_alloc_all(nn);
2847 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
2851 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
2855 /* Step 2: Configure the NFP
2856 * - Ifup the physical interface if it exists
2857 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2858 * - Write MAC address (in case it changed)
2860 * - Set the Freelist buffer size
2863 err = nfp_port_configure(netdev, true);
2867 err = nfp_net_set_config_and_enable(nn);
2869 goto err_port_disable;
2871 /* Step 3: Enable for kernel
2872 * - put some freelist descriptors on each RX ring
2873 * - enable NAPI on each ring
2874 * - enable all TX queues
2877 nfp_net_open_stack(nn);
2882 nfp_port_configure(netdev, false);
2884 nfp_net_close_free_all(nn);
2888 int nfp_ctrl_open(struct nfp_net *nn)
2892 /* ring dumping depends on vNICs being opened/closed under rtnl */
2895 err = nfp_net_open_alloc_all(nn);
2899 err = nfp_net_set_config_and_enable(nn);
2903 for (r = 0; r < nn->dp.num_r_vecs; r++)
2904 enable_irq(nn->r_vecs[r].irq_vector);
2911 nfp_net_close_free_all(nn);
2917 static void nfp_net_set_rx_mode(struct net_device *netdev)
2919 struct nfp_net *nn = netdev_priv(netdev);
2922 new_ctrl = nn->dp.ctrl;
2924 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
2925 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
2927 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
2929 if (netdev->flags & IFF_PROMISC) {
2930 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2931 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2933 nn_warn(nn, "FW does not support promiscuous mode\n");
2935 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2938 if (new_ctrl == nn->dp.ctrl)
2941 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2942 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2944 nn->dp.ctrl = new_ctrl;
2947 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
2951 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2953 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
2956 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
2958 struct nfp_net_dp new_dp = *dp;
2963 nn->dp.netdev->mtu = new_dp.mtu;
2965 if (!netif_is_rxfh_configured(nn->dp.netdev))
2966 nfp_net_rss_init_itbl(nn);
2969 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
2974 nfp_net_dp_swap(nn, dp);
2976 for (r = 0; r < nn->max_r_vecs; r++)
2977 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
2979 err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings);
2983 if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) {
2984 err = netif_set_real_num_tx_queues(nn->dp.netdev,
2985 nn->dp.num_stack_tx_rings);
2990 return nfp_net_set_config_and_enable(nn);
2993 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
2995 struct nfp_net_dp *new;
2997 new = kmalloc(sizeof(*new), GFP_KERNEL);
3003 /* Clear things which need to be recomputed */
3005 new->tx_rings = NULL;
3006 new->rx_rings = NULL;
3007 new->num_r_vecs = 0;
3008 new->num_stack_tx_rings = 0;
3014 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
3015 struct netlink_ext_ack *extack)
3017 /* XDP-enabled tests */
3020 if (dp->fl_bufsz > PAGE_SIZE) {
3021 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
3024 if (dp->num_tx_rings > nn->max_tx_rings) {
3025 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
3032 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
3033 struct netlink_ext_ack *extack)
3037 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
3039 dp->num_stack_tx_rings = dp->num_tx_rings;
3041 dp->num_stack_tx_rings -= dp->num_rx_rings;
3043 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
3045 err = nfp_net_check_config(nn, dp, extack);
3049 if (!netif_running(dp->netdev)) {
3050 nfp_net_dp_swap(nn, dp);
3055 /* Prepare new rings */
3056 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
3057 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
3060 goto err_cleanup_vecs;
3064 err = nfp_net_rx_rings_prepare(nn, dp);
3066 goto err_cleanup_vecs;
3068 err = nfp_net_tx_rings_prepare(nn, dp);
3072 /* Stop device, swap in new rings, try to start the firmware */
3073 nfp_net_close_stack(nn);
3074 nfp_net_clear_config_and_disable(nn);
3076 err = nfp_net_dp_swap_enable(nn, dp);
3080 nfp_net_clear_config_and_disable(nn);
3082 /* Try with old configuration and old rings */
3083 err2 = nfp_net_dp_swap_enable(nn, dp);
3085 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
3088 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3089 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3091 nfp_net_rx_rings_free(dp);
3092 nfp_net_tx_rings_free(dp);
3094 nfp_net_open_stack(nn);
3101 nfp_net_rx_rings_free(dp);
3103 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
3104 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
3109 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
3111 struct nfp_net *nn = netdev_priv(netdev);
3112 struct nfp_net_dp *dp;
3115 err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
3119 dp = nfp_net_clone_dp(nn);
3125 return nfp_net_ring_reconfig(nn, dp, NULL);
3129 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3131 struct nfp_net *nn = netdev_priv(netdev);
3133 /* Priority tagged packets with vlan id 0 are processed by the
3134 * NFP as untagged packets
3139 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3140 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3143 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
3147 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3149 struct nfp_net *nn = netdev_priv(netdev);
3151 /* Priority tagged packets with vlan id 0 are processed by the
3152 * NFP as untagged packets
3157 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
3158 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
3161 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3164 static void nfp_net_stat64(struct net_device *netdev,
3165 struct rtnl_link_stats64 *stats)
3167 struct nfp_net *nn = netdev_priv(netdev);
3170 /* Collect software stats */
3171 for (r = 0; r < nn->max_r_vecs; r++) {
3172 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
3177 start = u64_stats_fetch_begin(&r_vec->rx_sync);
3178 data[0] = r_vec->rx_pkts;
3179 data[1] = r_vec->rx_bytes;
3180 data[2] = r_vec->rx_drops;
3181 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
3182 stats->rx_packets += data[0];
3183 stats->rx_bytes += data[1];
3184 stats->rx_dropped += data[2];
3187 start = u64_stats_fetch_begin(&r_vec->tx_sync);
3188 data[0] = r_vec->tx_pkts;
3189 data[1] = r_vec->tx_bytes;
3190 data[2] = r_vec->tx_errors;
3191 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
3192 stats->tx_packets += data[0];
3193 stats->tx_bytes += data[1];
3194 stats->tx_errors += data[2];
3197 /* Add in device stats */
3198 stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
3199 stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
3200 stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
3202 stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
3203 stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
3206 static int nfp_net_set_features(struct net_device *netdev,
3207 netdev_features_t features)
3209 netdev_features_t changed = netdev->features ^ features;
3210 struct nfp_net *nn = netdev_priv(netdev);
3214 /* Assume this is not called with features we have not advertised */
3216 new_ctrl = nn->dp.ctrl;
3218 if (changed & NETIF_F_RXCSUM) {
3219 if (features & NETIF_F_RXCSUM)
3220 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3222 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
3225 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
3226 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
3227 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3229 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
3232 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
3233 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
3234 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3235 NFP_NET_CFG_CTRL_LSO;
3237 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3240 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
3241 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3242 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3244 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
3247 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
3248 if (features & NETIF_F_HW_VLAN_CTAG_TX)
3249 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3251 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
3254 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
3255 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3256 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3258 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
3261 if (changed & NETIF_F_SG) {
3262 if (features & NETIF_F_SG)
3263 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
3265 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
3268 err = nfp_port_set_features(netdev, features);
3272 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
3273 netdev->features, features, changed);
3275 if (new_ctrl == nn->dp.ctrl)
3278 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
3279 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
3280 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
3284 nn->dp.ctrl = new_ctrl;
3289 static netdev_features_t
3290 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
3291 netdev_features_t features)
3295 /* We can't do TSO over double tagged packets (802.1AD) */
3296 features &= vlan_features_check(skb, features);
3298 if (!skb->encapsulation)
3301 /* Ensure that inner L4 header offset fits into TX descriptor field */
3302 if (skb_is_gso(skb)) {
3305 hdrlen = skb_inner_transport_header(skb) - skb->data +
3306 inner_tcp_hdrlen(skb);
3308 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
3309 features &= ~NETIF_F_GSO_MASK;
3312 /* VXLAN/GRE check */
3313 switch (vlan_get_protocol(skb)) {
3314 case htons(ETH_P_IP):
3315 l4_hdr = ip_hdr(skb)->protocol;
3317 case htons(ETH_P_IPV6):
3318 l4_hdr = ipv6_hdr(skb)->nexthdr;
3321 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3324 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
3325 skb->inner_protocol != htons(ETH_P_TEB) ||
3326 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
3327 (l4_hdr == IPPROTO_UDP &&
3328 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
3329 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
3330 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3336 nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
3338 struct nfp_net *nn = netdev_priv(netdev);
3342 return nfp_port_get_phys_port_name(netdev, name, len);
3344 if (nn->dp.is_vf || nn->vnic_no_name)
3347 n = snprintf(name, len, "n%d", nn->id);
3355 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
3356 * @nn: NFP Net device to reconfigure
3357 * @idx: Index into the port table where new port should be written
3358 * @port: UDP port to configure (pass zero to remove VXLAN port)
3360 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
3364 nn->vxlan_ports[idx] = port;
3366 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
3369 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
3370 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
3371 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
3372 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
3373 be16_to_cpu(nn->vxlan_ports[i]));
3375 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
3379 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
3380 * @nn: NFP Network structure
3381 * @port: UDP port to look for
3383 * Return: if the port is already in the table -- it's position;
3384 * if the port is not in the table -- free position to use;
3385 * if the table is full -- -ENOSPC.
3387 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
3389 int i, free_idx = -ENOSPC;
3391 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
3392 if (nn->vxlan_ports[i] == port)
3394 if (!nn->vxlan_usecnt[i])
3401 static void nfp_net_add_vxlan_port(struct net_device *netdev,
3402 struct udp_tunnel_info *ti)
3404 struct nfp_net *nn = netdev_priv(netdev);
3407 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3410 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3414 if (!nn->vxlan_usecnt[idx]++)
3415 nfp_net_set_vxlan_port(nn, idx, ti->port);
3418 static void nfp_net_del_vxlan_port(struct net_device *netdev,
3419 struct udp_tunnel_info *ti)
3421 struct nfp_net *nn = netdev_priv(netdev);
3424 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3427 idx = nfp_net_find_vxlan_idx(nn, ti->port);
3428 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
3431 if (!--nn->vxlan_usecnt[idx])
3432 nfp_net_set_vxlan_port(nn, idx, 0);
3435 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
3437 struct bpf_prog *prog = bpf->prog;
3438 struct nfp_net_dp *dp;
3441 if (!xdp_attachment_flags_ok(&nn->xdp, bpf))
3444 if (!prog == !nn->dp.xdp_prog) {
3445 WRITE_ONCE(nn->dp.xdp_prog, prog);
3446 xdp_attachment_setup(&nn->xdp, bpf);
3450 dp = nfp_net_clone_dp(nn);
3454 dp->xdp_prog = prog;
3455 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
3456 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
3457 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
3459 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
3460 err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
3464 xdp_attachment_setup(&nn->xdp, bpf);
3468 static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
3472 if (!xdp_attachment_flags_ok(&nn->xdp_hw, bpf))
3475 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
3479 xdp_attachment_setup(&nn->xdp_hw, bpf);
3483 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
3485 struct nfp_net *nn = netdev_priv(netdev);
3487 switch (xdp->command) {
3488 case XDP_SETUP_PROG:
3489 return nfp_net_xdp_setup_drv(nn, xdp);
3490 case XDP_SETUP_PROG_HW:
3491 return nfp_net_xdp_setup_hw(nn, xdp);
3492 case XDP_QUERY_PROG:
3493 return xdp_attachment_query(&nn->xdp, xdp);
3494 case XDP_QUERY_PROG_HW:
3495 return xdp_attachment_query(&nn->xdp_hw, xdp);
3497 return nfp_app_bpf(nn->app, nn, xdp);
3501 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
3503 struct nfp_net *nn = netdev_priv(netdev);
3504 struct sockaddr *saddr = addr;
3507 err = eth_prepare_mac_addr_change(netdev, addr);
3511 nfp_net_write_mac_addr(nn, saddr->sa_data);
3513 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
3517 eth_commit_mac_addr_change(netdev, addr);
3522 const struct net_device_ops nfp_net_netdev_ops = {
3523 .ndo_init = nfp_app_ndo_init,
3524 .ndo_uninit = nfp_app_ndo_uninit,
3525 .ndo_open = nfp_net_netdev_open,
3526 .ndo_stop = nfp_net_netdev_close,
3527 .ndo_start_xmit = nfp_net_tx,
3528 .ndo_get_stats64 = nfp_net_stat64,
3529 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3530 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3531 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3532 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3533 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
3534 .ndo_get_vf_config = nfp_app_get_vf_config,
3535 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
3536 .ndo_setup_tc = nfp_port_setup_tc,
3537 .ndo_tx_timeout = nfp_net_tx_timeout,
3538 .ndo_set_rx_mode = nfp_net_set_rx_mode,
3539 .ndo_change_mtu = nfp_net_change_mtu,
3540 .ndo_set_mac_address = nfp_net_set_mac_address,
3541 .ndo_set_features = nfp_net_set_features,
3542 .ndo_features_check = nfp_net_features_check,
3543 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
3544 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
3545 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
3546 .ndo_bpf = nfp_net_xdp,
3550 * nfp_net_info() - Print general info about the NIC
3551 * @nn: NFP Net device to reconfigure
3553 void nfp_net_info(struct nfp_net *nn)
3555 nn_info(nn, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
3556 nn->dp.is_vf ? "VF " : "",
3557 nn->dp.num_tx_rings, nn->max_tx_rings,
3558 nn->dp.num_rx_rings, nn->max_rx_rings);
3559 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3560 nn->fw_ver.resv, nn->fw_ver.class,
3561 nn->fw_ver.major, nn->fw_ver.minor,
3563 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3565 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
3566 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
3567 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
3568 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
3569 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
3570 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
3571 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
3572 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
3573 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
3574 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
3575 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
3576 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
3577 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
3578 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
3579 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
3580 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
3581 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
3582 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
3583 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
3584 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
3585 "RXCSUM_COMPLETE " : "",
3586 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
3587 nfp_app_extra_cap(nn->app, nn));
3591 * nfp_net_alloc() - Allocate netdev and related structure
3593 * @needs_netdev: Whether to allocate a netdev for this vNIC
3594 * @max_tx_rings: Maximum number of TX rings supported by device
3595 * @max_rx_rings: Maximum number of RX rings supported by device
3597 * This function allocates a netdev device and fills in the initial
3598 * part of the @struct nfp_net structure. In case of control device
3599 * nfp_net structure is allocated without the netdev.
3601 * Return: NFP Net device structure, or ERR_PTR on error.
3603 struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
3604 unsigned int max_tx_rings,
3605 unsigned int max_rx_rings)
3610 struct net_device *netdev;
3612 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
3613 max_tx_rings, max_rx_rings);
3615 return ERR_PTR(-ENOMEM);
3617 SET_NETDEV_DEV(netdev, &pdev->dev);
3618 nn = netdev_priv(netdev);
3619 nn->dp.netdev = netdev;
3621 nn = vzalloc(sizeof(*nn));
3623 return ERR_PTR(-ENOMEM);
3626 nn->dp.dev = &pdev->dev;
3629 nn->max_tx_rings = max_tx_rings;
3630 nn->max_rx_rings = max_rx_rings;
3632 nn->dp.num_tx_rings = min_t(unsigned int,
3633 max_tx_rings, num_online_cpus());
3634 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
3635 netif_get_num_default_rss_queues());
3637 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
3638 nn->dp.num_r_vecs = min_t(unsigned int,
3639 nn->dp.num_r_vecs, num_online_cpus());
3641 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
3642 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
3644 spin_lock_init(&nn->reconfig_lock);
3645 spin_lock_init(&nn->link_status_lock);
3647 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
3653 * nfp_net_free() - Undo what @nfp_net_alloc() did
3654 * @nn: NFP Net device to reconfigure
3656 void nfp_net_free(struct nfp_net *nn)
3658 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
3660 free_netdev(nn->dp.netdev);
3666 * nfp_net_rss_key_sz() - Get current size of the RSS key
3667 * @nn: NFP Net device instance
3669 * Return: size of the RSS key for currently selected hash function.
3671 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
3673 switch (nn->rss_hfunc) {
3674 case ETH_RSS_HASH_TOP:
3675 return NFP_NET_CFG_RSS_KEY_SZ;
3676 case ETH_RSS_HASH_XOR:
3678 case ETH_RSS_HASH_CRC32:
3682 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
3687 * nfp_net_rss_init() - Set the initial RSS parameters
3688 * @nn: NFP Net device to reconfigure
3690 static void nfp_net_rss_init(struct nfp_net *nn)
3692 unsigned long func_bit, rss_cap_hfunc;
3695 /* Read the RSS function capability and select first supported func */
3696 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
3697 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
3699 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
3700 NFP_NET_CFG_RSS_TOEPLITZ);
3702 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
3703 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
3704 dev_warn(nn->dp.dev,
3705 "Bad RSS config, defaulting to Toeplitz hash\n");
3706 func_bit = ETH_RSS_HASH_TOP_BIT;
3708 nn->rss_hfunc = 1 << func_bit;
3710 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
3712 nfp_net_rss_init_itbl(nn);
3714 /* Enable IPv4/IPv6 TCP by default */
3715 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
3716 NFP_NET_CFG_RSS_IPV6_TCP |
3717 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
3718 NFP_NET_CFG_RSS_MASK;
3722 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
3723 * @nn: NFP Net device to reconfigure
3725 static void nfp_net_irqmod_init(struct nfp_net *nn)
3727 nn->rx_coalesce_usecs = 50;
3728 nn->rx_coalesce_max_frames = 64;
3729 nn->tx_coalesce_usecs = 50;
3730 nn->tx_coalesce_max_frames = 64;
3733 static void nfp_net_netdev_init(struct nfp_net *nn)
3735 struct net_device *netdev = nn->dp.netdev;
3737 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
3739 netdev->mtu = nn->dp.mtu;
3741 /* Advertise/enable offloads based on capabilities
3743 * Note: netdev->features show the currently enabled features
3744 * and netdev->hw_features advertises which features are
3745 * supported. By default we enable most features.
3747 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
3748 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3750 netdev->hw_features = NETIF_F_HIGHDMA;
3751 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
3752 netdev->hw_features |= NETIF_F_RXCSUM;
3753 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
3755 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
3756 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3757 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
3759 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
3760 netdev->hw_features |= NETIF_F_SG;
3761 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
3763 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
3764 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3765 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3766 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
3767 NFP_NET_CFG_CTRL_LSO;
3769 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
3770 netdev->hw_features |= NETIF_F_RXHASH;
3771 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
3772 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3773 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3774 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
3776 if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
3777 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
3778 netdev->hw_features |= NETIF_F_GSO_GRE;
3779 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
3781 if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
3782 netdev->hw_enc_features = netdev->hw_features;
3784 netdev->vlan_features = netdev->hw_features;
3786 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
3787 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3788 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
3790 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
3791 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
3792 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
3794 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
3795 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
3798 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
3799 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3800 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
3803 netdev->features = netdev->hw_features;
3805 if (nfp_app_has_tc(nn->app) && nn->port)
3806 netdev->hw_features |= NETIF_F_HW_TC;
3808 /* Advertise but disable TSO by default. */
3809 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
3810 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
3812 /* Finalise the netdev setup */
3813 netdev->netdev_ops = &nfp_net_netdev_ops;
3814 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
3816 SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
3818 /* MTU range: 68 - hw-specific max */
3819 netdev->min_mtu = ETH_MIN_MTU;
3820 netdev->max_mtu = nn->max_mtu;
3822 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
3824 netif_carrier_off(netdev);
3826 nfp_net_set_ethtool_ops(netdev);
3829 static int nfp_net_read_caps(struct nfp_net *nn)
3831 /* Get some of the read-only fields from the BAR */
3832 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
3833 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
3835 /* ABI 4.x and ctrl vNIC always use chained metadata, in other cases
3836 * we allow use of non-chained metadata if RSS(v1) is the only
3837 * advertised capability requiring metadata.
3839 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
3841 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
3842 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
3843 /* RSS(v1) uses non-chained metadata format, except in ABI 4.x where
3844 * it has the same meaning as RSSv2.
3846 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
3847 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
3849 /* Determine RX packet/metadata boundary offset */
3850 if (nn->fw_ver.major >= 2) {
3853 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
3854 if (reg > NFP_NET_MAX_PREPEND) {
3855 nn_err(nn, "Invalid rx offset: %d\n", reg);
3858 nn->dp.rx_offset = reg;
3860 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
3863 /* For control vNICs mask out the capabilities app doesn't want. */
3865 nn->cap &= nn->app->type->ctrl_cap_mask;
3871 * nfp_net_init() - Initialise/finalise the nfp_net structure
3872 * @nn: NFP Net device structure
3874 * Return: 0 on success or negative errno on error.
3876 int nfp_net_init(struct nfp_net *nn)
3880 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
3882 err = nfp_net_read_caps(nn);
3886 /* Set default MTU and Freelist buffer size */
3887 if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
3888 if (nn->app->ctrl_mtu <= nn->max_mtu) {
3889 nn->dp.mtu = nn->app->ctrl_mtu;
3891 if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX)
3892 nn_warn(nn, "app requested MTU above max supported %u > %u\n",
3893 nn->app->ctrl_mtu, nn->max_mtu);
3894 nn->dp.mtu = nn->max_mtu;
3896 } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
3897 nn->dp.mtu = nn->max_mtu;
3899 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
3901 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
3903 if (nfp_app_ctrl_uses_data_vnics(nn->app))
3904 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
3906 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
3907 nfp_net_rss_init(nn);
3908 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
3909 NFP_NET_CFG_CTRL_RSS;
3912 /* Allow L2 Broadcast and Multicast through by default, if supported */
3913 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
3914 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
3916 /* Allow IRQ moderation, if supported */
3917 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
3918 nfp_net_irqmod_init(nn);
3919 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
3922 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
3928 nfp_net_netdev_init(nn);
3930 /* Stash the re-configuration queue away. First odd queue in TX Bar */
3931 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
3933 /* Make sure the FW knows the netdev is supposed to be disabled here */
3934 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
3935 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
3936 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
3937 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
3938 NFP_NET_CFG_UPDATE_GEN);
3942 nfp_net_vecs_init(nn);
3946 return register_netdev(nn->dp.netdev);
3950 * nfp_net_clean() - Undo what nfp_net_init() did.
3951 * @nn: NFP Net device structure
3953 void nfp_net_clean(struct nfp_net *nn)
3958 unregister_netdev(nn->dp.netdev);
3959 nfp_net_reconfig_wait_posted(nn);