2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/log2.h>
17 #include <linux/prefetch.h>
18 #include <linux/irq.h>
19 #include <linux/iommu.h>
20 #include <linux/bpf.h>
21 #include <linux/bpf_trace.h>
22 #include <linux/filter.h>
26 #include "nicvf_queues.h"
27 #include "thunder_bgx.h"
29 #define DRV_NAME "thunder-nicvf"
30 #define DRV_VERSION "1.0"
32 /* Supported devices */
33 static const struct pci_device_id nicvf_id_table[] = {
34 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
35 PCI_DEVICE_ID_THUNDER_NIC_VF,
37 PCI_SUBSYS_DEVID_88XX_NIC_VF) },
38 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
39 PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
41 PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
42 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
43 PCI_DEVICE_ID_THUNDER_NIC_VF,
45 PCI_SUBSYS_DEVID_81XX_NIC_VF) },
46 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
47 PCI_DEVICE_ID_THUNDER_NIC_VF,
49 PCI_SUBSYS_DEVID_83XX_NIC_VF) },
50 { 0, } /* end of table */
53 MODULE_AUTHOR("Sunil Goutham");
54 MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
55 MODULE_LICENSE("GPL v2");
56 MODULE_VERSION(DRV_VERSION);
57 MODULE_DEVICE_TABLE(pci, nicvf_id_table);
59 static int debug = 0x00;
60 module_param(debug, int, 0644);
61 MODULE_PARM_DESC(debug, "Debug message level bitmap");
63 static int cpi_alg = CPI_ALG_NONE;
64 module_param(cpi_alg, int, S_IRUGO);
65 MODULE_PARM_DESC(cpi_alg,
66 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
68 static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
71 return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
76 /* The Cavium ThunderX network controller can *only* be found in SoCs
77 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
78 * registers on this platform are implicitly strongly ordered with respect
79 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
80 * with no memory barriers in this driver. The readq()/writeq() functions add
81 * explicit ordering operation which in this case are redundant, and only
85 /* Register read/write APIs */
86 void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
88 writeq_relaxed(val, nic->reg_base + offset);
91 u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
93 return readq_relaxed(nic->reg_base + offset);
96 void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
99 void __iomem *addr = nic->reg_base + offset;
101 writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
104 u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
106 void __iomem *addr = nic->reg_base + offset;
108 return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
111 /* VF -> PF mailbox communication */
112 static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
114 u64 *msg = (u64 *)mbx;
116 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
117 nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
120 int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
122 int timeout = NIC_MBOX_MSG_TIMEOUT;
125 nic->pf_acked = false;
126 nic->pf_nacked = false;
128 nicvf_write_to_mbx(nic, mbx);
130 /* Wait for previous message to be acked, timeout 2sec */
131 while (!nic->pf_acked) {
132 if (nic->pf_nacked) {
133 netdev_err(nic->netdev,
134 "PF NACK to mbox msg 0x%02x from VF%d\n",
135 (mbx->msg.msg & 0xFF), nic->vf_id);
143 netdev_err(nic->netdev,
144 "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
145 (mbx->msg.msg & 0xFF), nic->vf_id);
152 /* Checks if VF is able to comminicate with PF
153 * and also gets the VNIC number this VF is associated to.
155 static int nicvf_check_pf_ready(struct nicvf *nic)
157 union nic_mbx mbx = {};
159 mbx.msg.msg = NIC_MBOX_MSG_READY;
160 if (nicvf_send_msg_to_pf(nic, &mbx)) {
161 netdev_err(nic->netdev,
162 "PF didn't respond to READY msg\n");
169 static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
172 nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
174 nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
177 static void nicvf_handle_mbx_intr(struct nicvf *nic)
179 union nic_mbx mbx = {};
184 mbx_addr = NIC_VF_PF_MAILBOX_0_1;
185 mbx_data = (u64 *)&mbx;
187 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
188 *mbx_data = nicvf_reg_read(nic, mbx_addr);
190 mbx_addr += sizeof(u64);
193 netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
194 switch (mbx.msg.msg) {
195 case NIC_MBOX_MSG_READY:
196 nic->pf_acked = true;
197 nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
198 nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
199 nic->node = mbx.nic_cfg.node_id;
200 if (!nic->set_mac_pending)
201 ether_addr_copy(nic->netdev->dev_addr,
202 mbx.nic_cfg.mac_addr);
203 nic->sqs_mode = mbx.nic_cfg.sqs_mode;
204 nic->loopback_supported = mbx.nic_cfg.loopback_supported;
205 nic->link_up = false;
209 case NIC_MBOX_MSG_ACK:
210 nic->pf_acked = true;
212 case NIC_MBOX_MSG_NACK:
213 nic->pf_nacked = true;
215 case NIC_MBOX_MSG_RSS_SIZE:
216 nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
217 nic->pf_acked = true;
219 case NIC_MBOX_MSG_BGX_STATS:
220 nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
221 nic->pf_acked = true;
223 case NIC_MBOX_MSG_BGX_LINK_CHANGE:
224 nic->pf_acked = true;
225 nic->link_up = mbx.link_status.link_up;
226 nic->duplex = mbx.link_status.duplex;
227 nic->speed = mbx.link_status.speed;
228 nic->mac_type = mbx.link_status.mac_type;
230 netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n",
232 nic->duplex == DUPLEX_FULL ?
234 netif_carrier_on(nic->netdev);
235 netif_tx_start_all_queues(nic->netdev);
237 netdev_info(nic->netdev, "Link is Down\n");
238 netif_carrier_off(nic->netdev);
239 netif_tx_stop_all_queues(nic->netdev);
242 case NIC_MBOX_MSG_ALLOC_SQS:
243 nic->sqs_count = mbx.sqs_alloc.qs_count;
244 nic->pf_acked = true;
246 case NIC_MBOX_MSG_SNICVF_PTR:
247 /* Primary VF: make note of secondary VF's pointer
248 * to be used while packet transmission.
250 nic->snicvf[mbx.nicvf.sqs_id] =
251 (struct nicvf *)mbx.nicvf.nicvf;
252 nic->pf_acked = true;
254 case NIC_MBOX_MSG_PNICVF_PTR:
255 /* Secondary VF/Qset: make note of primary VF's pointer
256 * to be used while packet reception, to handover packet
257 * to primary VF's netdev.
259 nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
260 nic->pf_acked = true;
262 case NIC_MBOX_MSG_PFC:
263 nic->pfc.autoneg = mbx.pfc.autoneg;
264 nic->pfc.fc_rx = mbx.pfc.fc_rx;
265 nic->pfc.fc_tx = mbx.pfc.fc_tx;
266 nic->pf_acked = true;
269 netdev_err(nic->netdev,
270 "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
273 nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
276 static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
278 union nic_mbx mbx = {};
280 mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
281 mbx.mac.vf_id = nic->vf_id;
282 ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
284 return nicvf_send_msg_to_pf(nic, &mbx);
287 static void nicvf_config_cpi(struct nicvf *nic)
289 union nic_mbx mbx = {};
291 mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
292 mbx.cpi_cfg.vf_id = nic->vf_id;
293 mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
294 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
296 nicvf_send_msg_to_pf(nic, &mbx);
299 static void nicvf_get_rss_size(struct nicvf *nic)
301 union nic_mbx mbx = {};
303 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
304 mbx.rss_size.vf_id = nic->vf_id;
305 nicvf_send_msg_to_pf(nic, &mbx);
308 void nicvf_config_rss(struct nicvf *nic)
310 union nic_mbx mbx = {};
311 struct nicvf_rss_info *rss = &nic->rss_info;
312 int ind_tbl_len = rss->rss_size;
315 mbx.rss_cfg.vf_id = nic->vf_id;
316 mbx.rss_cfg.hash_bits = rss->hash_bits;
317 while (ind_tbl_len) {
318 mbx.rss_cfg.tbl_offset = nextq;
319 mbx.rss_cfg.tbl_len = min(ind_tbl_len,
320 RSS_IND_TBL_LEN_PER_MBX_MSG);
321 mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
322 NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
324 for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
325 mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
327 nicvf_send_msg_to_pf(nic, &mbx);
329 ind_tbl_len -= mbx.rss_cfg.tbl_len;
333 void nicvf_set_rss_key(struct nicvf *nic)
335 struct nicvf_rss_info *rss = &nic->rss_info;
336 u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
339 for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
340 nicvf_reg_write(nic, key_addr, rss->key[idx]);
341 key_addr += sizeof(u64);
345 static int nicvf_rss_init(struct nicvf *nic)
347 struct nicvf_rss_info *rss = &nic->rss_info;
350 nicvf_get_rss_size(nic);
352 if (cpi_alg != CPI_ALG_NONE) {
360 netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
361 nicvf_set_rss_key(nic);
363 rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
364 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
366 rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
368 for (idx = 0; idx < rss->rss_size; idx++)
369 rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
371 nicvf_config_rss(nic);
375 /* Request PF to allocate additional Qsets */
376 static void nicvf_request_sqs(struct nicvf *nic)
378 union nic_mbx mbx = {};
380 int sqs_count = nic->sqs_count;
381 int rx_queues = 0, tx_queues = 0;
383 /* Only primary VF should request */
384 if (nic->sqs_mode || !nic->sqs_count)
387 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
388 mbx.sqs_alloc.vf_id = nic->vf_id;
389 mbx.sqs_alloc.qs_count = nic->sqs_count;
390 if (nicvf_send_msg_to_pf(nic, &mbx)) {
391 /* No response from PF */
396 /* Return if no Secondary Qsets available */
400 if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
401 rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
403 tx_queues = nic->tx_queues + nic->xdp_tx_queues;
404 if (tx_queues > MAX_SND_QUEUES_PER_QS)
405 tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
407 /* Set no of Rx/Tx queues in each of the SQsets */
408 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
409 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
410 mbx.nicvf.vf_id = nic->vf_id;
411 mbx.nicvf.sqs_id = sqs;
412 nicvf_send_msg_to_pf(nic, &mbx);
414 nic->snicvf[sqs]->sqs_id = sqs;
415 if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
416 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
417 rx_queues -= MAX_RCV_QUEUES_PER_QS;
419 nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
423 if (tx_queues > MAX_SND_QUEUES_PER_QS) {
424 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
425 tx_queues -= MAX_SND_QUEUES_PER_QS;
427 nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
431 nic->snicvf[sqs]->qs->cq_cnt =
432 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
434 /* Initialize secondary Qset's queues and its interrupts */
435 nicvf_open(nic->snicvf[sqs]->netdev);
438 /* Update stack with actual Rx/Tx queue count allocated */
439 if (sqs_count != nic->sqs_count)
440 nicvf_set_real_num_queues(nic->netdev,
441 nic->tx_queues, nic->rx_queues);
444 /* Send this Qset's nicvf pointer to PF.
445 * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
446 * so that packets received by these Qsets can use primary VF's netdev
448 static void nicvf_send_vf_struct(struct nicvf *nic)
450 union nic_mbx mbx = {};
452 mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
453 mbx.nicvf.sqs_mode = nic->sqs_mode;
454 mbx.nicvf.nicvf = (u64)nic;
455 nicvf_send_msg_to_pf(nic, &mbx);
458 static void nicvf_get_primary_vf_struct(struct nicvf *nic)
460 union nic_mbx mbx = {};
462 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
463 nicvf_send_msg_to_pf(nic, &mbx);
466 int nicvf_set_real_num_queues(struct net_device *netdev,
467 int tx_queues, int rx_queues)
471 err = netif_set_real_num_tx_queues(netdev, tx_queues);
474 "Failed to set no of Tx queues: %d\n", tx_queues);
478 err = netif_set_real_num_rx_queues(netdev, rx_queues);
481 "Failed to set no of Rx queues: %d\n", rx_queues);
485 static int nicvf_init_resources(struct nicvf *nic)
490 nicvf_qset_config(nic, true);
492 /* Initialize queues and HW for data transfer */
493 err = nicvf_config_data_transfer(nic, true);
495 netdev_err(nic->netdev,
496 "Failed to alloc/config VF's QSet resources\n");
503 static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
504 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
505 struct sk_buff **skb)
511 u64 dma_addr, cpu_addr;
514 /* Retrieve packet buffer's DMA address and length */
515 len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
516 dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
518 cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
521 cpu_addr = (u64)phys_to_virt(cpu_addr);
522 page = virt_to_page((void *)cpu_addr);
524 xdp.data_hard_start = page_address(page);
525 xdp.data = (void *)cpu_addr;
526 xdp.data_end = xdp.data + len;
527 orig_data = xdp.data;
530 action = bpf_prog_run_xdp(prog, &xdp);
533 /* Check if XDP program has changed headers */
534 if (orig_data != xdp.data) {
535 len = xdp.data_end - xdp.data;
536 offset = orig_data - xdp.data;
542 /* Check if it's a recycled page, if not
543 * unmap the DMA mapping.
545 * Recycled page holds an extra reference.
547 if (page_ref_count(page) == 1) {
548 dma_addr &= PAGE_MASK;
549 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
550 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
552 DMA_ATTR_SKIP_CPU_SYNC);
555 /* Build SKB and pass on packet to network stack */
556 *skb = build_skb(xdp.data,
557 RCV_FRAG_LEN - cqe_rx->align_pad + offset);
564 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
567 bpf_warn_invalid_xdp_action(action);
570 trace_xdp_exception(nic->netdev, prog, action);
573 /* Check if it's a recycled page, if not
574 * unmap the DMA mapping.
576 * Recycled page holds an extra reference.
578 if (page_ref_count(page) == 1) {
579 dma_addr &= PAGE_MASK;
580 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
581 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
583 DMA_ATTR_SKIP_CPU_SYNC);
591 static void nicvf_snd_pkt_handler(struct net_device *netdev,
592 struct cqe_send_t *cqe_tx,
593 int budget, int *subdesc_cnt,
594 unsigned int *tx_pkts, unsigned int *tx_bytes)
596 struct sk_buff *skb = NULL;
598 struct nicvf *nic = netdev_priv(netdev);
599 struct snd_queue *sq;
600 struct sq_hdr_subdesc *hdr;
601 struct sq_hdr_subdesc *tso_sqe;
603 sq = &nic->qs->sq[cqe_tx->sq_idx];
605 hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
606 if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
609 /* Check for errors */
610 if (cqe_tx->send_status)
611 nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
613 /* Is this a XDP designated Tx queue */
615 page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
616 /* Check if it's recycled page or else unmap DMA mapping */
617 if (page && (page_ref_count(page) == 1))
618 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
621 /* Release page reference for recycling */
624 sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
625 *subdesc_cnt += hdr->subdesc_cnt + 1;
629 skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
631 /* Check for dummy descriptor used for HW TSO offload on 88xx */
632 if (hdr->dont_send) {
633 /* Get actual TSO descriptors and free them */
635 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
636 nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
637 tso_sqe->subdesc_cnt);
638 *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
640 nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
643 *subdesc_cnt += hdr->subdesc_cnt + 1;
646 *tx_bytes += skb->len;
647 napi_consume_skb(skb, budget);
648 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
650 /* In case of SW TSO on 88xx, only last segment will have
651 * a SKB attached, so just free SQEs here.
654 *subdesc_cnt += hdr->subdesc_cnt + 1;
658 static inline void nicvf_set_rxhash(struct net_device *netdev,
659 struct cqe_rx_t *cqe_rx,
665 if (!(netdev->features & NETIF_F_RXHASH))
668 switch (cqe_rx->rss_alg) {
671 hash_type = PKT_HASH_TYPE_L4;
672 hash = cqe_rx->rss_tag;
675 hash_type = PKT_HASH_TYPE_L3;
676 hash = cqe_rx->rss_tag;
679 hash_type = PKT_HASH_TYPE_NONE;
683 skb_set_hash(skb, hash, hash_type);
686 static void nicvf_rcv_pkt_handler(struct net_device *netdev,
687 struct napi_struct *napi,
688 struct cqe_rx_t *cqe_rx, struct snd_queue *sq)
690 struct sk_buff *skb = NULL;
691 struct nicvf *nic = netdev_priv(netdev);
692 struct nicvf *snic = nic;
696 rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
699 /* Use primary VF's 'nicvf' struct */
701 netdev = nic->netdev;
704 /* Check for errors */
705 if (cqe_rx->err_level || cqe_rx->err_opcode) {
706 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
707 if (err && !cqe_rx->rb_cnt)
711 /* For XDP, ignore pkts spanning multiple pages */
712 if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
713 /* Packet consumed by XDP */
714 if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, &skb))
717 skb = nicvf_get_rcv_skb(snic, cqe_rx,
718 nic->xdp_prog ? true : false);
724 if (netif_msg_pktdata(nic)) {
725 netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
726 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
727 skb->data, skb->len, true);
730 /* If error packet, drop it here */
732 dev_kfree_skb_any(skb);
736 nicvf_set_rxhash(netdev, cqe_rx, skb);
738 skb_record_rx_queue(skb, rq_idx);
739 if (netdev->hw_features & NETIF_F_RXCSUM) {
740 /* HW by default verifies TCP/UDP/SCTP checksums */
741 skb->ip_summed = CHECKSUM_UNNECESSARY;
743 skb_checksum_none_assert(skb);
746 skb->protocol = eth_type_trans(skb, netdev);
748 /* Check for stripped VLAN */
749 if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
750 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
751 ntohs((__force __be16)cqe_rx->vlan_tci));
753 if (napi && (netdev->features & NETIF_F_GRO))
754 napi_gro_receive(napi, skb);
756 netif_receive_skb(skb);
759 static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
760 struct napi_struct *napi, int budget)
762 int processed_cqe, work_done = 0, tx_done = 0;
763 int cqe_count, cqe_head;
765 struct nicvf *nic = netdev_priv(netdev);
766 struct queue_set *qs = nic->qs;
767 struct cmp_queue *cq = &qs->cq[cq_idx];
768 struct cqe_rx_t *cq_desc;
769 struct netdev_queue *txq;
770 struct snd_queue *sq = &qs->sq[cq_idx];
771 unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
773 spin_lock_bh(&cq->lock);
776 /* Get no of valid CQ entries to process */
777 cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
778 cqe_count &= CQ_CQE_COUNT;
782 /* Get head of the valid CQ entries */
783 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
786 while (processed_cqe < cqe_count) {
787 /* Get the CQ descriptor */
788 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
790 cqe_head &= (cq->dmem.q_len - 1);
791 /* Initiate prefetch for next descriptor */
792 prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
794 if ((work_done >= budget) && napi &&
795 (cq_desc->cqe_type != CQE_TYPE_SEND)) {
799 switch (cq_desc->cqe_type) {
801 nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq);
805 nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
806 budget, &subdesc_cnt,
807 &tx_pkts, &tx_bytes);
810 case CQE_TYPE_INVALID:
811 case CQE_TYPE_RX_SPLIT:
812 case CQE_TYPE_RX_TCP:
813 case CQE_TYPE_SEND_PTP:
820 /* Ring doorbell to inform H/W to reuse processed CQEs */
821 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
822 cq_idx, processed_cqe);
824 if ((work_done < budget) && napi)
828 /* Update SQ's descriptor free count */
830 nicvf_put_sq_desc(sq, subdesc_cnt);
832 txq_idx = nicvf_netdev_qidx(nic, cq_idx);
833 /* Handle XDP TX queues */
834 if (nic->pnicvf->xdp_prog) {
835 if (txq_idx < nic->pnicvf->xdp_tx_queues) {
836 nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
840 txq_idx -= nic->pnicvf->xdp_tx_queues;
843 /* Wakeup TXQ if its stopped earlier due to SQ full */
845 (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
846 netdev = nic->pnicvf->netdev;
847 txq = netdev_get_tx_queue(netdev, txq_idx);
849 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
851 /* To read updated queue and carrier status */
853 if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
854 netif_tx_wake_queue(txq);
856 this_cpu_inc(nic->drv_stats->txq_wake);
857 netif_warn(nic, tx_err, netdev,
858 "Transmit queue wakeup SQ%d\n", txq_idx);
863 spin_unlock_bh(&cq->lock);
867 static int nicvf_poll(struct napi_struct *napi, int budget)
871 struct net_device *netdev = napi->dev;
872 struct nicvf *nic = netdev_priv(netdev);
873 struct nicvf_cq_poll *cq;
875 cq = container_of(napi, struct nicvf_cq_poll, napi);
876 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
878 if (work_done < budget) {
879 /* Slow packet rate, exit polling */
880 napi_complete_done(napi, work_done);
881 /* Re-enable interrupts */
882 cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
884 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
885 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
886 cq->cq_idx, cq_head);
887 nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
892 /* Qset error interrupt handler
894 * As of now only CQ errors are handled
896 static void nicvf_handle_qs_err(unsigned long data)
898 struct nicvf *nic = (struct nicvf *)data;
899 struct queue_set *qs = nic->qs;
903 netif_tx_disable(nic->netdev);
905 /* Check if it is CQ err */
906 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
907 status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
909 if (!(status & CQ_ERR_MASK))
911 /* Process already queued CQEs and reconfig CQ */
912 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
913 nicvf_sq_disable(nic, qidx);
914 nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
915 nicvf_cmp_queue_config(nic, qs, qidx, true);
916 nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
917 nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
919 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
922 netif_tx_start_all_queues(nic->netdev);
923 /* Re-enable Qset error interrupt */
924 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
927 static void nicvf_dump_intr_status(struct nicvf *nic)
929 netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
930 nicvf_reg_read(nic, NIC_VF_INT));
933 static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
935 struct nicvf *nic = (struct nicvf *)nicvf_irq;
938 nicvf_dump_intr_status(nic);
940 intr = nicvf_reg_read(nic, NIC_VF_INT);
941 /* Check for spurious interrupt */
942 if (!(intr & NICVF_INTR_MBOX_MASK))
945 nicvf_handle_mbx_intr(nic);
950 static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
952 struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
953 struct nicvf *nic = cq_poll->nicvf;
954 int qidx = cq_poll->cq_idx;
956 nicvf_dump_intr_status(nic);
958 /* Disable interrupts */
959 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
962 napi_schedule_irqoff(&cq_poll->napi);
964 /* Clear interrupt */
965 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
970 static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
972 struct nicvf *nic = (struct nicvf *)nicvf_irq;
976 nicvf_dump_intr_status(nic);
978 /* Disable RBDR interrupt and schedule softirq */
979 for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
980 if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
982 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
983 tasklet_hi_schedule(&nic->rbdr_task);
984 /* Clear interrupt */
985 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
991 static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
993 struct nicvf *nic = (struct nicvf *)nicvf_irq;
995 nicvf_dump_intr_status(nic);
997 /* Disable Qset err interrupt and schedule softirq */
998 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
999 tasklet_hi_schedule(&nic->qs_err_task);
1000 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1005 static void nicvf_set_irq_affinity(struct nicvf *nic)
1009 for (vec = 0; vec < nic->num_vec; vec++) {
1010 if (!nic->irq_allocated[vec])
1013 if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
1016 if (vec < NICVF_INTR_ID_SQ)
1017 /* Leave CPU0 for RBDR and other interrupts */
1018 cpu = nicvf_netdev_qidx(nic, vec) + 1;
1022 cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
1023 nic->affinity_mask[vec]);
1024 irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
1025 nic->affinity_mask[vec]);
1029 static int nicvf_register_interrupts(struct nicvf *nic)
1033 for_each_cq_irq(irq)
1034 sprintf(nic->irq_name[irq], "%s-rxtx-%d",
1035 nic->pnicvf->netdev->name,
1036 nicvf_netdev_qidx(nic, irq));
1038 for_each_sq_irq(irq)
1039 sprintf(nic->irq_name[irq], "%s-sq-%d",
1040 nic->pnicvf->netdev->name,
1041 nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
1043 for_each_rbdr_irq(irq)
1044 sprintf(nic->irq_name[irq], "%s-rbdr-%d",
1045 nic->pnicvf->netdev->name,
1046 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1048 /* Register CQ interrupts */
1049 for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
1050 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1052 0, nic->irq_name[irq], nic->napi[irq]);
1055 nic->irq_allocated[irq] = true;
1058 /* Register RBDR interrupt */
1059 for (irq = NICVF_INTR_ID_RBDR;
1060 irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
1061 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1062 nicvf_rbdr_intr_handler,
1063 0, nic->irq_name[irq], nic);
1066 nic->irq_allocated[irq] = true;
1069 /* Register QS error interrupt */
1070 sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
1071 nic->pnicvf->netdev->name,
1072 nic->sqs_mode ? (nic->sqs_id + 1) : 0);
1073 irq = NICVF_INTR_ID_QS_ERR;
1074 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1075 nicvf_qs_err_intr_handler,
1076 0, nic->irq_name[irq], nic);
1080 nic->irq_allocated[irq] = true;
1082 /* Set IRQ affinities */
1083 nicvf_set_irq_affinity(nic);
1087 netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
1092 static void nicvf_unregister_interrupts(struct nicvf *nic)
1094 struct pci_dev *pdev = nic->pdev;
1097 /* Free registered interrupts */
1098 for (irq = 0; irq < nic->num_vec; irq++) {
1099 if (!nic->irq_allocated[irq])
1102 irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
1103 free_cpumask_var(nic->affinity_mask[irq]);
1105 if (irq < NICVF_INTR_ID_SQ)
1106 free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
1108 free_irq(pci_irq_vector(pdev, irq), nic);
1110 nic->irq_allocated[irq] = false;
1114 pci_free_irq_vectors(pdev);
1118 /* Initialize MSIX vectors and register MISC interrupt.
1119 * Send READY message to PF to check if its alive
1121 static int nicvf_register_misc_interrupt(struct nicvf *nic)
1124 int irq = NICVF_INTR_ID_MISC;
1126 /* Return if mailbox interrupt is already registered */
1127 if (nic->pdev->msix_enabled)
1131 nic->num_vec = pci_msix_vec_count(nic->pdev);
1132 ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
1135 netdev_err(nic->netdev,
1136 "Req for #%d msix vectors failed\n", nic->num_vec);
1140 sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
1141 /* Register Misc interrupt */
1142 ret = request_irq(pci_irq_vector(nic->pdev, irq),
1143 nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
1147 nic->irq_allocated[irq] = true;
1149 /* Enable mailbox interrupt */
1150 nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
1152 /* Check if VF is able to communicate with PF */
1153 if (!nicvf_check_pf_ready(nic)) {
1154 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1155 nicvf_unregister_interrupts(nic);
1162 static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
1164 struct nicvf *nic = netdev_priv(netdev);
1165 int qid = skb_get_queue_mapping(skb);
1166 struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
1168 struct snd_queue *sq;
1171 /* Check for minimum packet length */
1172 if (skb->len <= ETH_HLEN) {
1174 return NETDEV_TX_OK;
1177 /* In XDP case, initial HW tx queues are used for XDP,
1178 * but stack's queue mapping starts at '0', so skip the
1179 * Tx queues attached to Rx queues for XDP.
1182 qid += nic->xdp_tx_queues;
1185 /* Get secondary Qset's SQ structure */
1186 if (qid >= MAX_SND_QUEUES_PER_QS) {
1187 tmp = qid / MAX_SND_QUEUES_PER_QS;
1188 snic = (struct nicvf *)nic->snicvf[tmp - 1];
1190 netdev_warn(nic->netdev,
1191 "Secondary Qset#%d's ptr not initialized\n",
1194 return NETDEV_TX_OK;
1196 qid = qid % MAX_SND_QUEUES_PER_QS;
1199 sq = &snic->qs->sq[qid];
1200 if (!netif_tx_queue_stopped(txq) &&
1201 !nicvf_sq_append_skb(snic, sq, skb, qid)) {
1202 netif_tx_stop_queue(txq);
1204 /* Barrier, so that stop_queue visible to other cpus */
1207 /* Check again, incase another cpu freed descriptors */
1208 if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
1209 netif_tx_wake_queue(txq);
1211 this_cpu_inc(nic->drv_stats->txq_stop);
1212 netif_warn(nic, tx_err, netdev,
1213 "Transmit ring full, stopping SQ%d\n", qid);
1215 return NETDEV_TX_BUSY;
1218 return NETDEV_TX_OK;
1221 static inline void nicvf_free_cq_poll(struct nicvf *nic)
1223 struct nicvf_cq_poll *cq_poll;
1226 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1227 cq_poll = nic->napi[qidx];
1230 nic->napi[qidx] = NULL;
1235 int nicvf_stop(struct net_device *netdev)
1238 struct nicvf *nic = netdev_priv(netdev);
1239 struct queue_set *qs = nic->qs;
1240 struct nicvf_cq_poll *cq_poll = NULL;
1241 union nic_mbx mbx = {};
1243 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1244 nicvf_send_msg_to_pf(nic, &mbx);
1246 netif_carrier_off(netdev);
1247 netif_tx_stop_all_queues(nic->netdev);
1248 nic->link_up = false;
1250 /* Teardown secondary qsets first */
1251 if (!nic->sqs_mode) {
1252 for (qidx = 0; qidx < nic->sqs_count; qidx++) {
1253 if (!nic->snicvf[qidx])
1255 nicvf_stop(nic->snicvf[qidx]->netdev);
1256 nic->snicvf[qidx] = NULL;
1260 /* Disable RBDR & QS error interrupts */
1261 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
1262 nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
1263 nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
1265 nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
1266 nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
1268 /* Wait for pending IRQ handlers to finish */
1269 for (irq = 0; irq < nic->num_vec; irq++)
1270 synchronize_irq(pci_irq_vector(nic->pdev, irq));
1272 tasklet_kill(&nic->rbdr_task);
1273 tasklet_kill(&nic->qs_err_task);
1274 if (nic->rb_work_scheduled)
1275 cancel_delayed_work_sync(&nic->rbdr_work);
1277 for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
1278 cq_poll = nic->napi[qidx];
1281 napi_synchronize(&cq_poll->napi);
1282 /* CQ intr is enabled while napi_complete,
1285 nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
1286 nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
1287 napi_disable(&cq_poll->napi);
1288 netif_napi_del(&cq_poll->napi);
1291 netif_tx_disable(netdev);
1293 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1294 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1296 /* Free resources */
1297 nicvf_config_data_transfer(nic, false);
1299 /* Disable HW Qset */
1300 nicvf_qset_config(nic, false);
1302 /* disable mailbox interrupt */
1303 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1305 nicvf_unregister_interrupts(nic);
1307 nicvf_free_cq_poll(nic);
1309 /* Clear multiqset info */
1315 static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
1317 union nic_mbx mbx = {};
1319 mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
1320 mbx.frs.max_frs = mtu;
1321 mbx.frs.vf_id = nic->vf_id;
1323 return nicvf_send_msg_to_pf(nic, &mbx);
1326 int nicvf_open(struct net_device *netdev)
1329 struct nicvf *nic = netdev_priv(netdev);
1330 struct queue_set *qs = nic->qs;
1331 struct nicvf_cq_poll *cq_poll = NULL;
1332 union nic_mbx mbx = {};
1334 netif_carrier_off(netdev);
1336 err = nicvf_register_misc_interrupt(nic);
1340 /* Register NAPI handler for processing CQEs */
1341 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1342 cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
1347 cq_poll->cq_idx = qidx;
1348 cq_poll->nicvf = nic;
1349 netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
1351 napi_enable(&cq_poll->napi);
1352 nic->napi[qidx] = cq_poll;
1355 /* Check if we got MAC address from PF or else generate a radom MAC */
1356 if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
1357 eth_hw_addr_random(netdev);
1358 nicvf_hw_set_mac_addr(nic, netdev);
1361 if (nic->set_mac_pending) {
1362 nic->set_mac_pending = false;
1363 nicvf_hw_set_mac_addr(nic, netdev);
1366 /* Init tasklet for handling Qset err interrupt */
1367 tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
1368 (unsigned long)nic);
1370 /* Init RBDR tasklet which will refill RBDR */
1371 tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
1372 (unsigned long)nic);
1373 INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
1375 /* Configure CPI alorithm */
1376 nic->cpi_alg = cpi_alg;
1378 nicvf_config_cpi(nic);
1380 nicvf_request_sqs(nic);
1382 nicvf_get_primary_vf_struct(nic);
1384 /* Configure receive side scaling and MTU */
1385 if (!nic->sqs_mode) {
1386 nicvf_rss_init(nic);
1387 err = nicvf_update_hw_max_frs(nic, netdev->mtu);
1391 /* Clear percpu stats */
1392 for_each_possible_cpu(cpu)
1393 memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
1394 sizeof(struct nicvf_drv_stats));
1397 err = nicvf_register_interrupts(nic);
1401 /* Initialize the queues */
1402 err = nicvf_init_resources(nic);
1406 /* Make sure queue initialization is written */
1409 nicvf_reg_write(nic, NIC_VF_INT, -1);
1410 /* Enable Qset err interrupt */
1411 nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
1413 /* Enable completion queue interrupt */
1414 for (qidx = 0; qidx < qs->cq_cnt; qidx++)
1415 nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
1417 /* Enable RBDR threshold interrupt */
1418 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
1419 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
1421 /* Send VF config done msg to PF */
1422 mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
1423 nicvf_write_to_mbx(nic, &mbx);
1427 nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
1428 nicvf_unregister_interrupts(nic);
1429 tasklet_kill(&nic->qs_err_task);
1430 tasklet_kill(&nic->rbdr_task);
1432 for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
1433 cq_poll = nic->napi[qidx];
1436 napi_disable(&cq_poll->napi);
1437 netif_napi_del(&cq_poll->napi);
1439 nicvf_free_cq_poll(nic);
1443 static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1445 struct nicvf *nic = netdev_priv(netdev);
1446 int orig_mtu = netdev->mtu;
1448 netdev->mtu = new_mtu;
1450 if (!netif_running(netdev))
1453 if (nicvf_update_hw_max_frs(nic, new_mtu)) {
1454 netdev->mtu = orig_mtu;
1461 static int nicvf_set_mac_address(struct net_device *netdev, void *p)
1463 struct sockaddr *addr = p;
1464 struct nicvf *nic = netdev_priv(netdev);
1466 if (!is_valid_ether_addr(addr->sa_data))
1467 return -EADDRNOTAVAIL;
1469 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1471 if (nic->pdev->msix_enabled) {
1472 if (nicvf_hw_set_mac_addr(nic, netdev))
1475 nic->set_mac_pending = true;
1481 void nicvf_update_lmac_stats(struct nicvf *nic)
1484 union nic_mbx mbx = {};
1486 if (!netif_running(nic->netdev))
1489 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
1490 mbx.bgx_stats.vf_id = nic->vf_id;
1492 mbx.bgx_stats.rx = 1;
1493 while (stat < BGX_RX_STATS_COUNT) {
1494 mbx.bgx_stats.idx = stat;
1495 if (nicvf_send_msg_to_pf(nic, &mbx))
1503 mbx.bgx_stats.rx = 0;
1504 while (stat < BGX_TX_STATS_COUNT) {
1505 mbx.bgx_stats.idx = stat;
1506 if (nicvf_send_msg_to_pf(nic, &mbx))
1512 void nicvf_update_stats(struct nicvf *nic)
1516 struct nicvf_hw_stats *stats = &nic->hw_stats;
1517 struct nicvf_drv_stats *drv_stats;
1518 struct queue_set *qs = nic->qs;
1520 #define GET_RX_STATS(reg) \
1521 nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
1522 #define GET_TX_STATS(reg) \
1523 nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
1525 stats->rx_bytes = GET_RX_STATS(RX_OCTS);
1526 stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
1527 stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
1528 stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
1529 stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
1530 stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
1531 stats->rx_drop_red = GET_RX_STATS(RX_RED);
1532 stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
1533 stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
1534 stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
1535 stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
1536 stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
1537 stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
1538 stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
1540 stats->tx_bytes = GET_TX_STATS(TX_OCTS);
1541 stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
1542 stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
1543 stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
1544 stats->tx_drops = GET_TX_STATS(TX_DROP);
1546 /* On T88 pass 2.0, the dummy SQE added for TSO notification
1547 * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
1548 * pointed by dummy SQE and results in tx_drops counter being
1549 * incremented. Subtracting it from tx_tso counter will give
1550 * exact tx_drops counter.
1552 if (nic->t88 && nic->hw_tso) {
1553 for_each_possible_cpu(cpu) {
1554 drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
1555 tmp_stats += drv_stats->tx_tso;
1557 stats->tx_drops = tmp_stats - stats->tx_drops;
1559 stats->tx_frames = stats->tx_ucast_frames +
1560 stats->tx_bcast_frames +
1561 stats->tx_mcast_frames;
1562 stats->rx_frames = stats->rx_ucast_frames +
1563 stats->rx_bcast_frames +
1564 stats->rx_mcast_frames;
1565 stats->rx_drops = stats->rx_drop_red +
1566 stats->rx_drop_overrun;
1568 /* Update RQ and SQ stats */
1569 for (qidx = 0; qidx < qs->rq_cnt; qidx++)
1570 nicvf_update_rq_stats(nic, qidx);
1571 for (qidx = 0; qidx < qs->sq_cnt; qidx++)
1572 nicvf_update_sq_stats(nic, qidx);
1575 static void nicvf_get_stats64(struct net_device *netdev,
1576 struct rtnl_link_stats64 *stats)
1578 struct nicvf *nic = netdev_priv(netdev);
1579 struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
1581 nicvf_update_stats(nic);
1583 stats->rx_bytes = hw_stats->rx_bytes;
1584 stats->rx_packets = hw_stats->rx_frames;
1585 stats->rx_dropped = hw_stats->rx_drops;
1586 stats->multicast = hw_stats->rx_mcast_frames;
1588 stats->tx_bytes = hw_stats->tx_bytes;
1589 stats->tx_packets = hw_stats->tx_frames;
1590 stats->tx_dropped = hw_stats->tx_drops;
1594 static void nicvf_tx_timeout(struct net_device *dev)
1596 struct nicvf *nic = netdev_priv(dev);
1598 netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
1600 this_cpu_inc(nic->drv_stats->tx_timeout);
1601 schedule_work(&nic->reset_task);
1604 static void nicvf_reset_task(struct work_struct *work)
1608 nic = container_of(work, struct nicvf, reset_task);
1610 if (!netif_running(nic->netdev))
1613 nicvf_stop(nic->netdev);
1614 nicvf_open(nic->netdev);
1615 netif_trans_update(nic->netdev);
1618 static int nicvf_config_loopback(struct nicvf *nic,
1619 netdev_features_t features)
1621 union nic_mbx mbx = {};
1623 mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
1624 mbx.lbk.vf_id = nic->vf_id;
1625 mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
1627 return nicvf_send_msg_to_pf(nic, &mbx);
1630 static netdev_features_t nicvf_fix_features(struct net_device *netdev,
1631 netdev_features_t features)
1633 struct nicvf *nic = netdev_priv(netdev);
1635 if ((features & NETIF_F_LOOPBACK) &&
1636 netif_running(netdev) && !nic->loopback_supported)
1637 features &= ~NETIF_F_LOOPBACK;
1642 static int nicvf_set_features(struct net_device *netdev,
1643 netdev_features_t features)
1645 struct nicvf *nic = netdev_priv(netdev);
1646 netdev_features_t changed = features ^ netdev->features;
1648 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1649 nicvf_config_vlan_stripping(nic, features);
1651 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1652 return nicvf_config_loopback(nic, features);
1657 static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
1659 u8 cq_count, txq_count;
1661 /* Set XDP Tx queue count same as Rx queue count */
1663 nic->xdp_tx_queues = 0;
1665 nic->xdp_tx_queues = nic->rx_queues;
1667 /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
1668 * needs to be allocated, check how many.
1670 txq_count = nic->xdp_tx_queues + nic->tx_queues;
1671 cq_count = max(nic->rx_queues, txq_count);
1672 if (cq_count > MAX_CMP_QUEUES_PER_QS) {
1673 nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
1674 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
1679 /* Set primary Qset's resources */
1680 nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
1681 nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
1682 nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
1685 nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
1688 static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1690 struct net_device *dev = nic->netdev;
1691 bool if_up = netif_running(nic->netdev);
1692 struct bpf_prog *old_prog;
1693 bool bpf_attached = false;
1695 /* For now just support only the usual MTU sized frames */
1696 if (prog && (dev->mtu > 1500)) {
1697 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1702 /* ALL SQs attached to CQs i.e same as RQs, are treated as
1703 * XDP Tx queues and more Tx queues are allocated for
1704 * network stack to send pkts out.
1706 * No of Tx queues are either same as Rx queues or whatever
1707 * is left in max no of queues possible.
1709 if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
1711 "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
1717 nicvf_stop(nic->netdev);
1719 old_prog = xchg(&nic->xdp_prog, prog);
1720 /* Detach old prog, if any */
1722 bpf_prog_put(old_prog);
1724 if (nic->xdp_prog) {
1725 /* Attach BPF program */
1726 nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
1727 if (!IS_ERR(nic->xdp_prog))
1728 bpf_attached = true;
1731 /* Calculate Tx queues needed for XDP and network stack */
1732 nicvf_set_xdp_queues(nic, bpf_attached);
1735 /* Reinitialize interface, clean slate */
1736 nicvf_open(nic->netdev);
1737 netif_trans_update(nic->netdev);
1743 static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
1745 struct nicvf *nic = netdev_priv(netdev);
1747 /* To avoid checks while retrieving buffer address from CQE_RX,
1748 * do not support XDP for T88 pass1.x silicons which are anyway
1749 * not in use widely.
1751 if (pass1_silicon(nic->pdev))
1754 switch (xdp->command) {
1755 case XDP_SETUP_PROG:
1756 return nicvf_xdp_setup(nic, xdp->prog);
1757 case XDP_QUERY_PROG:
1758 xdp->prog_attached = !!nic->xdp_prog;
1759 xdp->prog_id = nic->xdp_prog ? nic->xdp_prog->aux->id : 0;
1766 static const struct net_device_ops nicvf_netdev_ops = {
1767 .ndo_open = nicvf_open,
1768 .ndo_stop = nicvf_stop,
1769 .ndo_start_xmit = nicvf_xmit,
1770 .ndo_change_mtu = nicvf_change_mtu,
1771 .ndo_set_mac_address = nicvf_set_mac_address,
1772 .ndo_get_stats64 = nicvf_get_stats64,
1773 .ndo_tx_timeout = nicvf_tx_timeout,
1774 .ndo_fix_features = nicvf_fix_features,
1775 .ndo_set_features = nicvf_set_features,
1776 .ndo_xdp = nicvf_xdp,
1779 static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1781 struct device *dev = &pdev->dev;
1782 struct net_device *netdev;
1787 err = pci_enable_device(pdev);
1789 dev_err(dev, "Failed to enable PCI device\n");
1793 err = pci_request_regions(pdev, DRV_NAME);
1795 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1796 goto err_disable_device;
1799 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1801 dev_err(dev, "Unable to get usable DMA configuration\n");
1802 goto err_release_regions;
1805 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1807 dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
1808 goto err_release_regions;
1811 qcount = netif_get_num_default_rss_queues();
1813 /* Restrict multiqset support only for host bound VFs */
1814 if (pdev->is_virtfn) {
1815 /* Set max number of queues per VF */
1816 qcount = min_t(int, num_online_cpus(),
1817 (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
1820 netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
1823 goto err_release_regions;
1826 pci_set_drvdata(pdev, netdev);
1828 SET_NETDEV_DEV(netdev, &pdev->dev);
1830 nic = netdev_priv(netdev);
1831 nic->netdev = netdev;
1834 nic->max_queues = qcount;
1836 /* MAP VF's configuration registers */
1837 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1838 if (!nic->reg_base) {
1839 dev_err(dev, "Cannot map config register space, aborting\n");
1841 goto err_free_netdev;
1844 nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
1845 if (!nic->drv_stats) {
1847 goto err_free_netdev;
1850 err = nicvf_set_qset_resources(nic);
1852 goto err_free_netdev;
1854 /* Check if PF is alive and get MAC address for this VF */
1855 err = nicvf_register_misc_interrupt(nic);
1857 goto err_free_netdev;
1859 nicvf_send_vf_struct(nic);
1861 if (!pass1_silicon(nic->pdev))
1864 /* Get iommu domain for iova to physical addr conversion */
1865 nic->iommu_domain = iommu_get_domain_for_dev(dev);
1867 pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
1868 if (sdevid == 0xA134)
1871 /* Check if this VF is in QS only mode */
1875 err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
1877 goto err_unregister_interrupts;
1879 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
1880 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
1881 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1882 NETIF_F_HW_VLAN_CTAG_RX);
1884 netdev->hw_features |= NETIF_F_RXHASH;
1886 netdev->features |= netdev->hw_features;
1887 netdev->hw_features |= NETIF_F_LOOPBACK;
1889 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1890 NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
1892 netdev->netdev_ops = &nicvf_netdev_ops;
1893 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1895 /* MTU range: 64 - 9200 */
1896 netdev->min_mtu = NIC_HW_MIN_FRS;
1897 netdev->max_mtu = NIC_HW_MAX_FRS;
1899 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1901 err = register_netdev(netdev);
1903 dev_err(dev, "Failed to register netdevice\n");
1904 goto err_unregister_interrupts;
1907 nic->msg_enable = debug;
1909 nicvf_set_ethtool_ops(netdev);
1913 err_unregister_interrupts:
1914 nicvf_unregister_interrupts(nic);
1916 pci_set_drvdata(pdev, NULL);
1918 free_percpu(nic->drv_stats);
1919 free_netdev(netdev);
1920 err_release_regions:
1921 pci_release_regions(pdev);
1923 pci_disable_device(pdev);
1927 static void nicvf_remove(struct pci_dev *pdev)
1929 struct net_device *netdev = pci_get_drvdata(pdev);
1931 struct net_device *pnetdev;
1936 nic = netdev_priv(netdev);
1937 pnetdev = nic->pnicvf->netdev;
1939 /* Check if this Qset is assigned to different VF.
1940 * If yes, clean primary and all secondary Qsets.
1942 if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
1943 unregister_netdev(pnetdev);
1944 nicvf_unregister_interrupts(nic);
1945 pci_set_drvdata(pdev, NULL);
1947 free_percpu(nic->drv_stats);
1948 free_netdev(netdev);
1949 pci_release_regions(pdev);
1950 pci_disable_device(pdev);
1953 static void nicvf_shutdown(struct pci_dev *pdev)
1958 static struct pci_driver nicvf_driver = {
1960 .id_table = nicvf_id_table,
1961 .probe = nicvf_probe,
1962 .remove = nicvf_remove,
1963 .shutdown = nicvf_shutdown,
1966 static int __init nicvf_init_module(void)
1968 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1970 return pci_register_driver(&nicvf_driver);
1973 static void __exit nicvf_cleanup_module(void)
1975 pci_unregister_driver(&nicvf_driver);
1978 module_init(nicvf_init_module);
1979 module_exit(nicvf_cleanup_module);