1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/module.h>
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
71 #define BNXT_TX_TIMEOUT (5 * HZ)
72 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
74 MODULE_LICENSE("GPL");
75 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
77 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
78 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
79 #define BNXT_RX_COPY_THRESH 256
81 #define BNXT_TX_PUSH_THRESH 164
128 NETXTREME_E_P5_VF_HV,
131 /* indexed by enum above */
132 static const struct {
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
177 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
178 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
179 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
182 static const struct pci_device_id bnxt_pci_tbl[] = {
183 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
185 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
186 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
187 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
188 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
189 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
190 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
192 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
193 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
194 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
195 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
196 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
197 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
198 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
199 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
200 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
201 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
202 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
203 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
204 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
205 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
206 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
207 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
210 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
214 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
215 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
216 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
217 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
218 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
219 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
220 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
221 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
224 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
225 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
226 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
227 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
228 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
229 #ifdef CONFIG_BNXT_SRIOV
230 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
231 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
232 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
233 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
235 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
236 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
237 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
238 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
239 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
240 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
241 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
242 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
243 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
244 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
245 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
246 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
247 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
248 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
249 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
250 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
255 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
257 static const u16 bnxt_vf_req_snif[] = {
261 HWRM_CFA_L2_FILTER_ALLOC,
264 static const u16 bnxt_async_events_arr[] = {
265 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
266 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
267 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
268 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
269 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
270 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
271 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
272 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
273 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
274 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
275 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
276 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
279 static struct workqueue_struct *bnxt_pf_wq;
281 static bool bnxt_vf_pciid(enum board_idx idx)
283 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
284 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
285 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
288 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
289 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
290 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
292 #define BNXT_CP_DB_IRQ_DIS(db) \
293 writel(DB_CP_IRQ_DIS_FLAGS, db)
295 #define BNXT_DB_CQ(db, idx) \
296 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
298 #define BNXT_DB_NQ_P5(db, idx) \
299 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
301 #define BNXT_DB_CQ_ARM(db, idx) \
302 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
304 #define BNXT_DB_NQ_ARM_P5(db, idx) \
305 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
307 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
309 if (bp->flags & BNXT_FLAG_CHIP_P5)
310 BNXT_DB_NQ_P5(db, idx);
315 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
317 if (bp->flags & BNXT_FLAG_CHIP_P5)
318 BNXT_DB_NQ_ARM_P5(db, idx);
320 BNXT_DB_CQ_ARM(db, idx);
323 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
325 if (bp->flags & BNXT_FLAG_CHIP_P5)
326 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
332 const u16 bnxt_lhint_arr[] = {
333 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
334 TX_BD_FLAGS_LHINT_512_TO_1023,
335 TX_BD_FLAGS_LHINT_1024_TO_2047,
336 TX_BD_FLAGS_LHINT_1024_TO_2047,
337 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
338 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
339 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
340 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
341 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
354 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
356 struct metadata_dst *md_dst = skb_metadata_dst(skb);
358 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
361 return md_dst->u.port_info.port_id;
364 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
366 struct bnxt *bp = netdev_priv(dev);
368 struct tx_bd_ext *txbd1;
369 struct netdev_queue *txq;
372 unsigned int length, pad = 0;
373 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
375 struct pci_dev *pdev = bp->pdev;
376 struct bnxt_tx_ring_info *txr;
377 struct bnxt_sw_tx_bd *tx_buf;
380 i = skb_get_queue_mapping(skb);
381 if (unlikely(i >= bp->tx_nr_rings)) {
382 dev_kfree_skb_any(skb);
386 txq = netdev_get_tx_queue(dev, i);
387 txr = &bp->tx_ring[bp->tx_ring_map[i]];
390 free_size = bnxt_tx_avail(bp, txr);
391 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
392 netif_tx_stop_queue(txq);
393 return NETDEV_TX_BUSY;
397 len = skb_headlen(skb);
398 last_frag = skb_shinfo(skb)->nr_frags;
400 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
402 txbd->tx_bd_opaque = prod;
404 tx_buf = &txr->tx_buf_ring[prod];
406 tx_buf->nr_frags = last_frag;
409 cfa_action = bnxt_xmit_get_cfa_action(skb);
410 if (skb_vlan_tag_present(skb)) {
411 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
412 skb_vlan_tag_get(skb);
413 /* Currently supports 8021Q, 8021AD vlan offloads
414 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
416 if (skb->vlan_proto == htons(ETH_P_8021Q))
417 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
420 if (unlikely(skb->no_fcs)) {
421 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
425 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
426 struct tx_push_buffer *tx_push_buf = txr->tx_push;
427 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
428 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
429 void __iomem *db = txr->tx_db.doorbell;
430 void *pdata = tx_push_buf->data;
434 /* Set COAL_NOW to be ready quickly for the next push */
435 tx_push->tx_bd_len_flags_type =
436 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
437 TX_BD_TYPE_LONG_TX_BD |
438 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
439 TX_BD_FLAGS_COAL_NOW |
440 TX_BD_FLAGS_PACKET_END |
441 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
443 if (skb->ip_summed == CHECKSUM_PARTIAL)
444 tx_push1->tx_bd_hsize_lflags =
445 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
447 tx_push1->tx_bd_hsize_lflags = 0;
449 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
450 tx_push1->tx_bd_cfa_action =
451 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
453 end = pdata + length;
454 end = PTR_ALIGN(end, 8) - 1;
457 skb_copy_from_linear_data(skb, pdata, len);
459 for (j = 0; j < last_frag; j++) {
460 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
463 fptr = skb_frag_address_safe(frag);
467 memcpy(pdata, fptr, skb_frag_size(frag));
468 pdata += skb_frag_size(frag);
471 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
472 txbd->tx_bd_haddr = txr->data_mapping;
473 prod = NEXT_TX(prod);
474 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
475 memcpy(txbd, tx_push1, sizeof(*txbd));
476 prod = NEXT_TX(prod);
478 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
482 netdev_tx_sent_queue(txq, skb->len);
483 wmb(); /* Sync is_push and byte queue before pushing data */
485 push_len = (length + sizeof(*tx_push) + 7) / 8;
487 __iowrite64_copy(db, tx_push_buf, 16);
488 __iowrite32_copy(db + 4, tx_push_buf + 1,
489 (push_len - 16) << 1);
491 __iowrite64_copy(db, tx_push_buf, push_len);
498 if (length < BNXT_MIN_PKT_SIZE) {
499 pad = BNXT_MIN_PKT_SIZE - length;
500 if (skb_pad(skb, pad)) {
501 /* SKB already freed. */
505 length = BNXT_MIN_PKT_SIZE;
508 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
510 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
511 dev_kfree_skb_any(skb);
516 dma_unmap_addr_set(tx_buf, mapping, mapping);
517 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
518 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
520 txbd->tx_bd_haddr = cpu_to_le64(mapping);
522 prod = NEXT_TX(prod);
523 txbd1 = (struct tx_bd_ext *)
524 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
526 txbd1->tx_bd_hsize_lflags = lflags;
527 if (skb_is_gso(skb)) {
530 if (skb->encapsulation)
531 hdr_len = skb_inner_network_offset(skb) +
532 skb_inner_network_header_len(skb) +
533 inner_tcp_hdrlen(skb);
535 hdr_len = skb_transport_offset(skb) +
538 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
540 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
541 length = skb_shinfo(skb)->gso_size;
542 txbd1->tx_bd_mss = cpu_to_le32(length);
544 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 txbd1->tx_bd_hsize_lflags |=
546 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
547 txbd1->tx_bd_mss = 0;
551 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
552 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
557 flags |= bnxt_lhint_arr[length];
558 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
560 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
561 txbd1->tx_bd_cfa_action =
562 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
563 for (i = 0; i < last_frag; i++) {
564 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
566 prod = NEXT_TX(prod);
567 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
569 len = skb_frag_size(frag);
570 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
573 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
576 tx_buf = &txr->tx_buf_ring[prod];
577 dma_unmap_addr_set(tx_buf, mapping, mapping);
579 txbd->tx_bd_haddr = cpu_to_le64(mapping);
581 flags = len << TX_BD_LEN_SHIFT;
582 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
586 txbd->tx_bd_len_flags_type =
587 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
588 TX_BD_FLAGS_PACKET_END);
590 netdev_tx_sent_queue(txq, skb->len);
592 /* Sync BD data before updating doorbell */
595 prod = NEXT_TX(prod);
598 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
599 bnxt_db_write(bp, &txr->tx_db, prod);
603 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
604 if (netdev_xmit_more() && !tx_buf->is_push)
605 bnxt_db_write(bp, &txr->tx_db, prod);
607 netif_tx_stop_queue(txq);
609 /* netif_tx_stop_queue() must be done before checking
610 * tx index in bnxt_tx_avail() below, because in
611 * bnxt_tx_int(), we update tx index before checking for
612 * netif_tx_queue_stopped().
615 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
616 netif_tx_wake_queue(txq);
623 /* start back at beginning and unmap skb */
625 tx_buf = &txr->tx_buf_ring[prod];
627 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
628 skb_headlen(skb), PCI_DMA_TODEVICE);
629 prod = NEXT_TX(prod);
631 /* unmap remaining mapped pages */
632 for (i = 0; i < last_frag; i++) {
633 prod = NEXT_TX(prod);
634 tx_buf = &txr->tx_buf_ring[prod];
635 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
636 skb_frag_size(&skb_shinfo(skb)->frags[i]),
640 dev_kfree_skb_any(skb);
644 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
646 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
647 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
648 u16 cons = txr->tx_cons;
649 struct pci_dev *pdev = bp->pdev;
651 unsigned int tx_bytes = 0;
653 for (i = 0; i < nr_pkts; i++) {
654 struct bnxt_sw_tx_bd *tx_buf;
658 tx_buf = &txr->tx_buf_ring[cons];
659 cons = NEXT_TX(cons);
663 if (tx_buf->is_push) {
668 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
669 skb_headlen(skb), PCI_DMA_TODEVICE);
670 last = tx_buf->nr_frags;
672 for (j = 0; j < last; j++) {
673 cons = NEXT_TX(cons);
674 tx_buf = &txr->tx_buf_ring[cons];
677 dma_unmap_addr(tx_buf, mapping),
678 skb_frag_size(&skb_shinfo(skb)->frags[j]),
683 cons = NEXT_TX(cons);
685 tx_bytes += skb->len;
686 dev_kfree_skb_any(skb);
689 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
692 /* Need to make the tx_cons update visible to bnxt_start_xmit()
693 * before checking for netif_tx_queue_stopped(). Without the
694 * memory barrier, there is a small possibility that bnxt_start_xmit()
695 * will miss it and cause the queue to be stopped forever.
699 if (unlikely(netif_tx_queue_stopped(txq)) &&
700 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
701 __netif_tx_lock(txq, smp_processor_id());
702 if (netif_tx_queue_stopped(txq) &&
703 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
704 txr->dev_state != BNXT_DEV_STATE_CLOSING)
705 netif_tx_wake_queue(txq);
706 __netif_tx_unlock(txq);
710 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
711 struct bnxt_rx_ring_info *rxr,
714 struct device *dev = &bp->pdev->dev;
717 page = page_pool_dev_alloc_pages(rxr->page_pool);
721 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
722 DMA_ATTR_WEAK_ORDERING);
723 if (dma_mapping_error(dev, *mapping)) {
724 page_pool_recycle_direct(rxr->page_pool, page);
727 *mapping += bp->rx_dma_offset;
731 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
735 struct pci_dev *pdev = bp->pdev;
737 data = kmalloc(bp->rx_buf_size, gfp);
741 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
742 bp->rx_buf_use_size, bp->rx_dir,
743 DMA_ATTR_WEAK_ORDERING);
745 if (dma_mapping_error(&pdev->dev, *mapping)) {
752 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
755 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
756 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
759 if (BNXT_RX_PAGE_MODE(bp)) {
761 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
767 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
769 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
775 rx_buf->data_ptr = data + bp->rx_offset;
777 rx_buf->mapping = mapping;
779 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
783 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
785 u16 prod = rxr->rx_prod;
786 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
787 struct rx_bd *cons_bd, *prod_bd;
789 prod_rx_buf = &rxr->rx_buf_ring[prod];
790 cons_rx_buf = &rxr->rx_buf_ring[cons];
792 prod_rx_buf->data = data;
793 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
795 prod_rx_buf->mapping = cons_rx_buf->mapping;
797 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
798 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
800 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
803 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
805 u16 next, max = rxr->rx_agg_bmap_size;
807 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
809 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
813 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
814 struct bnxt_rx_ring_info *rxr,
818 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
819 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
820 struct pci_dev *pdev = bp->pdev;
823 u16 sw_prod = rxr->rx_sw_agg_prod;
824 unsigned int offset = 0;
826 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
829 page = alloc_page(gfp);
833 rxr->rx_page_offset = 0;
835 offset = rxr->rx_page_offset;
836 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
837 if (rxr->rx_page_offset == PAGE_SIZE)
842 page = alloc_page(gfp);
847 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
848 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
849 DMA_ATTR_WEAK_ORDERING);
850 if (dma_mapping_error(&pdev->dev, mapping)) {
855 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
856 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
858 __set_bit(sw_prod, rxr->rx_agg_bmap);
859 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
860 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
862 rx_agg_buf->page = page;
863 rx_agg_buf->offset = offset;
864 rx_agg_buf->mapping = mapping;
865 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
866 rxbd->rx_bd_opaque = sw_prod;
870 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
871 struct bnxt_cp_ring_info *cpr,
872 u16 cp_cons, u16 curr)
874 struct rx_agg_cmp *agg;
876 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
877 agg = (struct rx_agg_cmp *)
878 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
882 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
883 struct bnxt_rx_ring_info *rxr,
884 u16 agg_id, u16 curr)
886 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
888 return &tpa_info->agg_arr[curr];
891 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
892 u16 start, u32 agg_bufs, bool tpa)
894 struct bnxt_napi *bnapi = cpr->bnapi;
895 struct bnxt *bp = bnapi->bp;
896 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
897 u16 prod = rxr->rx_agg_prod;
898 u16 sw_prod = rxr->rx_sw_agg_prod;
902 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
905 for (i = 0; i < agg_bufs; i++) {
907 struct rx_agg_cmp *agg;
908 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
909 struct rx_bd *prod_bd;
913 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
915 agg = bnxt_get_agg(bp, cpr, idx, start + i);
916 cons = agg->rx_agg_cmp_opaque;
917 __clear_bit(cons, rxr->rx_agg_bmap);
919 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
920 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
922 __set_bit(sw_prod, rxr->rx_agg_bmap);
923 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
924 cons_rx_buf = &rxr->rx_agg_ring[cons];
926 /* It is possible for sw_prod to be equal to cons, so
927 * set cons_rx_buf->page to NULL first.
929 page = cons_rx_buf->page;
930 cons_rx_buf->page = NULL;
931 prod_rx_buf->page = page;
932 prod_rx_buf->offset = cons_rx_buf->offset;
934 prod_rx_buf->mapping = cons_rx_buf->mapping;
936 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
938 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
939 prod_bd->rx_bd_opaque = sw_prod;
941 prod = NEXT_RX_AGG(prod);
942 sw_prod = NEXT_RX_AGG(sw_prod);
944 rxr->rx_agg_prod = prod;
945 rxr->rx_sw_agg_prod = sw_prod;
948 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
949 struct bnxt_rx_ring_info *rxr,
950 u16 cons, void *data, u8 *data_ptr,
952 unsigned int offset_and_len)
954 unsigned int payload = offset_and_len >> 16;
955 unsigned int len = offset_and_len & 0xffff;
957 struct page *page = data;
958 u16 prod = rxr->rx_prod;
962 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
964 bnxt_reuse_rx_data(rxr, cons, data);
967 dma_addr -= bp->rx_dma_offset;
968 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
969 DMA_ATTR_WEAK_ORDERING);
970 page_pool_release_page(rxr->page_pool, page);
972 if (unlikely(!payload))
973 payload = eth_get_headlen(bp->dev, data_ptr, len);
975 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
981 off = (void *)data_ptr - page_address(page);
982 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
983 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
984 payload + NET_IP_ALIGN);
986 frag = &skb_shinfo(skb)->frags[0];
987 skb_frag_size_sub(frag, payload);
988 skb_frag_off_add(frag, payload);
989 skb->data_len -= payload;
990 skb->tail += payload;
995 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
996 struct bnxt_rx_ring_info *rxr, u16 cons,
997 void *data, u8 *data_ptr,
999 unsigned int offset_and_len)
1001 u16 prod = rxr->rx_prod;
1002 struct sk_buff *skb;
1005 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1006 if (unlikely(err)) {
1007 bnxt_reuse_rx_data(rxr, cons, data);
1011 skb = build_skb(data, 0);
1012 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1013 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1019 skb_reserve(skb, bp->rx_offset);
1020 skb_put(skb, offset_and_len & 0xffff);
1024 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1025 struct bnxt_cp_ring_info *cpr,
1026 struct sk_buff *skb, u16 idx,
1027 u32 agg_bufs, bool tpa)
1029 struct bnxt_napi *bnapi = cpr->bnapi;
1030 struct pci_dev *pdev = bp->pdev;
1031 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1032 u16 prod = rxr->rx_agg_prod;
1033 bool p5_tpa = false;
1036 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1039 for (i = 0; i < agg_bufs; i++) {
1041 struct rx_agg_cmp *agg;
1042 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1047 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1049 agg = bnxt_get_agg(bp, cpr, idx, i);
1050 cons = agg->rx_agg_cmp_opaque;
1051 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1052 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1054 cons_rx_buf = &rxr->rx_agg_ring[cons];
1055 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1056 cons_rx_buf->offset, frag_len);
1057 __clear_bit(cons, rxr->rx_agg_bmap);
1059 /* It is possible for bnxt_alloc_rx_page() to allocate
1060 * a sw_prod index that equals the cons index, so we
1061 * need to clear the cons entry now.
1063 mapping = cons_rx_buf->mapping;
1064 page = cons_rx_buf->page;
1065 cons_rx_buf->page = NULL;
1067 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1068 struct skb_shared_info *shinfo;
1069 unsigned int nr_frags;
1071 shinfo = skb_shinfo(skb);
1072 nr_frags = --shinfo->nr_frags;
1073 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1077 cons_rx_buf->page = page;
1079 /* Update prod since possibly some pages have been
1080 * allocated already.
1082 rxr->rx_agg_prod = prod;
1083 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1087 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1089 DMA_ATTR_WEAK_ORDERING);
1091 skb->data_len += frag_len;
1092 skb->len += frag_len;
1093 skb->truesize += PAGE_SIZE;
1095 prod = NEXT_RX_AGG(prod);
1097 rxr->rx_agg_prod = prod;
1101 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1102 u8 agg_bufs, u32 *raw_cons)
1105 struct rx_agg_cmp *agg;
1107 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1108 last = RING_CMP(*raw_cons);
1109 agg = (struct rx_agg_cmp *)
1110 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1111 return RX_AGG_CMP_VALID(agg, *raw_cons);
1114 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1118 struct bnxt *bp = bnapi->bp;
1119 struct pci_dev *pdev = bp->pdev;
1120 struct sk_buff *skb;
1122 skb = napi_alloc_skb(&bnapi->napi, len);
1126 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1129 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1130 len + NET_IP_ALIGN);
1132 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1139 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1140 u32 *raw_cons, void *cmp)
1142 struct rx_cmp *rxcmp = cmp;
1143 u32 tmp_raw_cons = *raw_cons;
1144 u8 cmp_type, agg_bufs = 0;
1146 cmp_type = RX_CMP_TYPE(rxcmp);
1148 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1149 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1151 RX_CMP_AGG_BUFS_SHIFT;
1152 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1153 struct rx_tpa_end_cmp *tpa_end = cmp;
1155 if (bp->flags & BNXT_FLAG_CHIP_P5)
1158 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1162 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1165 *raw_cons = tmp_raw_cons;
1169 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1171 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1175 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1177 schedule_delayed_work(&bp->fw_reset_task, delay);
1180 static void bnxt_queue_sp_work(struct bnxt *bp)
1183 queue_work(bnxt_pf_wq, &bp->sp_task);
1185 schedule_work(&bp->sp_task);
1188 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1190 if (!rxr->bnapi->in_reset) {
1191 rxr->bnapi->in_reset = true;
1192 if (bp->flags & BNXT_FLAG_CHIP_P5)
1193 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1195 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1196 bnxt_queue_sp_work(bp);
1198 rxr->rx_next_cons = 0xffff;
1201 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1203 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1204 u16 idx = agg_id & MAX_TPA_P5_MASK;
1206 if (test_bit(idx, map->agg_idx_bmap))
1207 idx = find_first_zero_bit(map->agg_idx_bmap,
1208 BNXT_AGG_IDX_BMAP_SIZE);
1209 __set_bit(idx, map->agg_idx_bmap);
1210 map->agg_id_tbl[agg_id] = idx;
1214 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1216 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1218 __clear_bit(idx, map->agg_idx_bmap);
1221 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1223 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1225 return map->agg_id_tbl[agg_id];
1228 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1229 struct rx_tpa_start_cmp *tpa_start,
1230 struct rx_tpa_start_cmp_ext *tpa_start1)
1232 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1233 struct bnxt_tpa_info *tpa_info;
1234 u16 cons, prod, agg_id;
1235 struct rx_bd *prod_bd;
1238 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1239 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1240 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1242 agg_id = TPA_START_AGG_ID(tpa_start);
1244 cons = tpa_start->rx_tpa_start_cmp_opaque;
1245 prod = rxr->rx_prod;
1246 cons_rx_buf = &rxr->rx_buf_ring[cons];
1247 prod_rx_buf = &rxr->rx_buf_ring[prod];
1248 tpa_info = &rxr->rx_tpa[agg_id];
1250 if (unlikely(cons != rxr->rx_next_cons ||
1251 TPA_START_ERROR(tpa_start))) {
1252 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1253 cons, rxr->rx_next_cons,
1254 TPA_START_ERROR_CODE(tpa_start1));
1255 bnxt_sched_reset(bp, rxr);
1258 /* Store cfa_code in tpa_info to use in tpa_end
1259 * completion processing.
1261 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1262 prod_rx_buf->data = tpa_info->data;
1263 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1265 mapping = tpa_info->mapping;
1266 prod_rx_buf->mapping = mapping;
1268 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1270 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1272 tpa_info->data = cons_rx_buf->data;
1273 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1274 cons_rx_buf->data = NULL;
1275 tpa_info->mapping = cons_rx_buf->mapping;
1278 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1279 RX_TPA_START_CMP_LEN_SHIFT;
1280 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1281 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1283 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1284 tpa_info->gso_type = SKB_GSO_TCPV4;
1285 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1286 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1287 tpa_info->gso_type = SKB_GSO_TCPV6;
1288 tpa_info->rss_hash =
1289 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1291 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1292 tpa_info->gso_type = 0;
1293 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1295 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1296 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1297 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1298 tpa_info->agg_count = 0;
1300 rxr->rx_prod = NEXT_RX(prod);
1301 cons = NEXT_RX(cons);
1302 rxr->rx_next_cons = NEXT_RX(cons);
1303 cons_rx_buf = &rxr->rx_buf_ring[cons];
1305 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1306 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1307 cons_rx_buf->data = NULL;
1310 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1313 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1317 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1319 struct udphdr *uh = NULL;
1321 if (ip_proto == htons(ETH_P_IP)) {
1322 struct iphdr *iph = (struct iphdr *)skb->data;
1324 if (iph->protocol == IPPROTO_UDP)
1325 uh = (struct udphdr *)(iph + 1);
1327 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1329 if (iph->nexthdr == IPPROTO_UDP)
1330 uh = (struct udphdr *)(iph + 1);
1334 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1336 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1341 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1342 int payload_off, int tcp_ts,
1343 struct sk_buff *skb)
1348 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1349 u32 hdr_info = tpa_info->hdr_info;
1350 bool loopback = false;
1352 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1353 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1354 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1356 /* If the packet is an internal loopback packet, the offsets will
1357 * have an extra 4 bytes.
1359 if (inner_mac_off == 4) {
1361 } else if (inner_mac_off > 4) {
1362 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1365 /* We only support inner iPv4/ipv6. If we don't see the
1366 * correct protocol ID, it must be a loopback packet where
1367 * the offsets are off by 4.
1369 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1373 /* internal loopback packet, subtract all offsets by 4 */
1379 nw_off = inner_ip_off - ETH_HLEN;
1380 skb_set_network_header(skb, nw_off);
1381 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1382 struct ipv6hdr *iph = ipv6_hdr(skb);
1384 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1385 len = skb->len - skb_transport_offset(skb);
1387 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1389 struct iphdr *iph = ip_hdr(skb);
1391 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1392 len = skb->len - skb_transport_offset(skb);
1394 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1397 if (inner_mac_off) { /* tunnel */
1398 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1401 bnxt_gro_tunnel(skb, proto);
1407 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1408 int payload_off, int tcp_ts,
1409 struct sk_buff *skb)
1412 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1413 u32 hdr_info = tpa_info->hdr_info;
1414 int iphdr_len, nw_off;
1416 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1417 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1418 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1420 nw_off = inner_ip_off - ETH_HLEN;
1421 skb_set_network_header(skb, nw_off);
1422 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1423 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1424 skb_set_transport_header(skb, nw_off + iphdr_len);
1426 if (inner_mac_off) { /* tunnel */
1427 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1430 bnxt_gro_tunnel(skb, proto);
1436 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1437 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1439 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1440 int payload_off, int tcp_ts,
1441 struct sk_buff *skb)
1445 int len, nw_off, tcp_opt_len = 0;
1450 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1453 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1455 skb_set_network_header(skb, nw_off);
1457 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1458 len = skb->len - skb_transport_offset(skb);
1460 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1461 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1462 struct ipv6hdr *iph;
1464 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1466 skb_set_network_header(skb, nw_off);
1467 iph = ipv6_hdr(skb);
1468 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1469 len = skb->len - skb_transport_offset(skb);
1471 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1473 dev_kfree_skb_any(skb);
1477 if (nw_off) /* tunnel */
1478 bnxt_gro_tunnel(skb, skb->protocol);
1483 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1484 struct bnxt_tpa_info *tpa_info,
1485 struct rx_tpa_end_cmp *tpa_end,
1486 struct rx_tpa_end_cmp_ext *tpa_end1,
1487 struct sk_buff *skb)
1493 segs = TPA_END_TPA_SEGS(tpa_end);
1497 NAPI_GRO_CB(skb)->count = segs;
1498 skb_shinfo(skb)->gso_size =
1499 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1500 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1501 if (bp->flags & BNXT_FLAG_CHIP_P5)
1502 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1504 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1505 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1507 tcp_gro_complete(skb);
1512 /* Given the cfa_code of a received packet determine which
1513 * netdev (vf-rep or PF) the packet is destined to.
1515 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1517 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1519 /* if vf-rep dev is NULL, the must belongs to the PF */
1520 return dev ? dev : bp->dev;
1523 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1524 struct bnxt_cp_ring_info *cpr,
1526 struct rx_tpa_end_cmp *tpa_end,
1527 struct rx_tpa_end_cmp_ext *tpa_end1,
1530 struct bnxt_napi *bnapi = cpr->bnapi;
1531 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1532 u8 *data_ptr, agg_bufs;
1534 struct bnxt_tpa_info *tpa_info;
1536 struct sk_buff *skb;
1537 u16 idx = 0, agg_id;
1541 if (unlikely(bnapi->in_reset)) {
1542 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1545 return ERR_PTR(-EBUSY);
1549 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1550 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1551 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1552 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1553 tpa_info = &rxr->rx_tpa[agg_id];
1554 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1555 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1556 agg_bufs, tpa_info->agg_count);
1557 agg_bufs = tpa_info->agg_count;
1559 tpa_info->agg_count = 0;
1560 *event |= BNXT_AGG_EVENT;
1561 bnxt_free_agg_idx(rxr, agg_id);
1563 gro = !!(bp->flags & BNXT_FLAG_GRO);
1565 agg_id = TPA_END_AGG_ID(tpa_end);
1566 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1567 tpa_info = &rxr->rx_tpa[agg_id];
1568 idx = RING_CMP(*raw_cons);
1570 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1571 return ERR_PTR(-EBUSY);
1573 *event |= BNXT_AGG_EVENT;
1574 idx = NEXT_CMP(idx);
1576 gro = !!TPA_END_GRO(tpa_end);
1578 data = tpa_info->data;
1579 data_ptr = tpa_info->data_ptr;
1581 len = tpa_info->len;
1582 mapping = tpa_info->mapping;
1584 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1585 bnxt_abort_tpa(cpr, idx, agg_bufs);
1586 if (agg_bufs > MAX_SKB_FRAGS)
1587 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1588 agg_bufs, (int)MAX_SKB_FRAGS);
1592 if (len <= bp->rx_copy_thresh) {
1593 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1595 bnxt_abort_tpa(cpr, idx, agg_bufs);
1600 dma_addr_t new_mapping;
1602 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1604 bnxt_abort_tpa(cpr, idx, agg_bufs);
1608 tpa_info->data = new_data;
1609 tpa_info->data_ptr = new_data + bp->rx_offset;
1610 tpa_info->mapping = new_mapping;
1612 skb = build_skb(data, 0);
1613 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1614 bp->rx_buf_use_size, bp->rx_dir,
1615 DMA_ATTR_WEAK_ORDERING);
1619 bnxt_abort_tpa(cpr, idx, agg_bufs);
1622 skb_reserve(skb, bp->rx_offset);
1627 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1629 /* Page reuse already handled by bnxt_rx_pages(). */
1635 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1637 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1638 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1640 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1641 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1642 u16 vlan_proto = tpa_info->metadata >>
1643 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1644 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1646 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1649 skb_checksum_none_assert(skb);
1650 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1651 skb->ip_summed = CHECKSUM_UNNECESSARY;
1653 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1657 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1662 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1663 struct rx_agg_cmp *rx_agg)
1665 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1666 struct bnxt_tpa_info *tpa_info;
1668 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1669 tpa_info = &rxr->rx_tpa[agg_id];
1670 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1671 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1674 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1675 struct sk_buff *skb)
1677 if (skb->dev != bp->dev) {
1678 /* this packet belongs to a vf-rep */
1679 bnxt_vf_rep_rx(bp, skb);
1682 skb_record_rx_queue(skb, bnapi->index);
1683 napi_gro_receive(&bnapi->napi, skb);
1686 /* returns the following:
1687 * 1 - 1 packet successfully received
1688 * 0 - successful TPA_START, packet not completed yet
1689 * -EBUSY - completion ring does not have all the agg buffers yet
1690 * -ENOMEM - packet aborted due to out of memory
1691 * -EIO - packet aborted due to hw error indicated in BD
1693 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1694 u32 *raw_cons, u8 *event)
1696 struct bnxt_napi *bnapi = cpr->bnapi;
1697 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1698 struct net_device *dev = bp->dev;
1699 struct rx_cmp *rxcmp;
1700 struct rx_cmp_ext *rxcmp1;
1701 u32 tmp_raw_cons = *raw_cons;
1702 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1703 struct bnxt_sw_rx_bd *rx_buf;
1705 u8 *data_ptr, agg_bufs, cmp_type;
1706 dma_addr_t dma_addr;
1707 struct sk_buff *skb;
1712 rxcmp = (struct rx_cmp *)
1713 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1715 cmp_type = RX_CMP_TYPE(rxcmp);
1717 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1718 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1719 goto next_rx_no_prod_no_len;
1722 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1723 cp_cons = RING_CMP(tmp_raw_cons);
1724 rxcmp1 = (struct rx_cmp_ext *)
1725 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1727 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1730 prod = rxr->rx_prod;
1732 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1733 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1734 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1736 *event |= BNXT_RX_EVENT;
1737 goto next_rx_no_prod_no_len;
1739 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1740 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1741 (struct rx_tpa_end_cmp *)rxcmp,
1742 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1749 bnxt_deliver_skb(bp, bnapi, skb);
1752 *event |= BNXT_RX_EVENT;
1753 goto next_rx_no_prod_no_len;
1756 cons = rxcmp->rx_cmp_opaque;
1757 if (unlikely(cons != rxr->rx_next_cons)) {
1758 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1760 /* 0xffff is forced error, don't print it */
1761 if (rxr->rx_next_cons != 0xffff)
1762 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1763 cons, rxr->rx_next_cons);
1764 bnxt_sched_reset(bp, rxr);
1767 goto next_rx_no_prod_no_len;
1769 rx_buf = &rxr->rx_buf_ring[cons];
1770 data = rx_buf->data;
1771 data_ptr = rx_buf->data_ptr;
1774 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1775 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1778 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1781 cp_cons = NEXT_CMP(cp_cons);
1782 *event |= BNXT_AGG_EVENT;
1784 *event |= BNXT_RX_EVENT;
1786 rx_buf->data = NULL;
1787 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1788 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1790 bnxt_reuse_rx_data(rxr, cons, data);
1792 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1796 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1797 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1798 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1799 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1800 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1802 bnxt_sched_reset(bp, rxr);
1805 goto next_rx_no_len;
1808 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1809 dma_addr = rx_buf->mapping;
1811 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1816 if (len <= bp->rx_copy_thresh) {
1817 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1818 bnxt_reuse_rx_data(rxr, cons, data);
1821 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1829 if (rx_buf->data_ptr == data_ptr)
1830 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1833 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1842 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1849 if (RX_CMP_HASH_VALID(rxcmp)) {
1850 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1851 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1853 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1854 if (hash_type != 1 && hash_type != 3)
1855 type = PKT_HASH_TYPE_L3;
1856 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1859 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1860 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1862 if ((rxcmp1->rx_cmp_flags2 &
1863 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1864 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1865 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1866 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1867 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1869 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1872 skb_checksum_none_assert(skb);
1873 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1874 if (dev->features & NETIF_F_RXCSUM) {
1875 skb->ip_summed = CHECKSUM_UNNECESSARY;
1876 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1879 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1880 if (dev->features & NETIF_F_RXCSUM)
1881 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1885 bnxt_deliver_skb(bp, bnapi, skb);
1889 cpr->rx_packets += 1;
1890 cpr->rx_bytes += len;
1893 rxr->rx_prod = NEXT_RX(prod);
1894 rxr->rx_next_cons = NEXT_RX(cons);
1896 next_rx_no_prod_no_len:
1897 *raw_cons = tmp_raw_cons;
1902 /* In netpoll mode, if we are using a combined completion ring, we need to
1903 * discard the rx packets and recycle the buffers.
1905 static int bnxt_force_rx_discard(struct bnxt *bp,
1906 struct bnxt_cp_ring_info *cpr,
1907 u32 *raw_cons, u8 *event)
1909 u32 tmp_raw_cons = *raw_cons;
1910 struct rx_cmp_ext *rxcmp1;
1911 struct rx_cmp *rxcmp;
1915 cp_cons = RING_CMP(tmp_raw_cons);
1916 rxcmp = (struct rx_cmp *)
1917 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1919 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1920 cp_cons = RING_CMP(tmp_raw_cons);
1921 rxcmp1 = (struct rx_cmp_ext *)
1922 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1924 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1927 cmp_type = RX_CMP_TYPE(rxcmp);
1928 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1929 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1930 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1931 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1932 struct rx_tpa_end_cmp_ext *tpa_end1;
1934 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1935 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1936 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1938 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1941 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1943 struct bnxt_fw_health *fw_health = bp->fw_health;
1944 u32 reg = fw_health->regs[reg_idx];
1945 u32 reg_type, reg_off, val = 0;
1947 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1948 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1950 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1951 pci_read_config_dword(bp->pdev, reg_off, &val);
1953 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1954 reg_off = fw_health->mapped_regs[reg_idx];
1956 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1957 val = readl(bp->bar0 + reg_off);
1959 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1960 val = readl(bp->bar1 + reg_off);
1963 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1964 val &= fw_health->fw_reset_inprog_reg_mask;
1968 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
1972 for (i = 0; i < bp->rx_nr_rings; i++) {
1973 u16 grp_idx = bp->rx_ring[i].bnapi->index;
1974 struct bnxt_ring_grp_info *grp_info;
1976 grp_info = &bp->grp_info[grp_idx];
1977 if (grp_info->agg_fw_ring_id == ring_id)
1980 return INVALID_HW_RING_ID;
1983 #define BNXT_GET_EVENT_PORT(data) \
1985 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1987 #define BNXT_EVENT_RING_TYPE(data2) \
1989 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
1991 #define BNXT_EVENT_RING_TYPE_RX(data2) \
1992 (BNXT_EVENT_RING_TYPE(data2) == \
1993 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
1995 static int bnxt_async_event_process(struct bnxt *bp,
1996 struct hwrm_async_event_cmpl *cmpl)
1998 u16 event_id = le16_to_cpu(cmpl->event_id);
1999 u32 data1 = le32_to_cpu(cmpl->event_data1);
2000 u32 data2 = le32_to_cpu(cmpl->event_data2);
2002 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2004 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2005 struct bnxt_link_info *link_info = &bp->link_info;
2008 goto async_event_process_exit;
2010 /* print unsupported speed warning in forced speed mode only */
2011 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2012 (data1 & 0x20000)) {
2013 u16 fw_speed = link_info->force_link_speed;
2014 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2016 if (speed != SPEED_UNKNOWN)
2017 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2020 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2023 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2024 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2025 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2027 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2028 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2030 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2031 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2033 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2034 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2039 if (bp->pf.port_id != port_id)
2042 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2045 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2047 goto async_event_process_exit;
2048 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2050 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2051 char *fatal_str = "non-fatal";
2054 goto async_event_process_exit;
2056 bp->fw_reset_timestamp = jiffies;
2057 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2058 if (!bp->fw_reset_min_dsecs)
2059 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2060 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2061 if (!bp->fw_reset_max_dsecs)
2062 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2063 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2064 fatal_str = "fatal";
2065 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2067 netif_warn(bp, hw, bp->dev,
2068 "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2069 fatal_str, data1, data2,
2070 bp->fw_reset_min_dsecs * 100,
2071 bp->fw_reset_max_dsecs * 100);
2072 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2075 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2076 struct bnxt_fw_health *fw_health = bp->fw_health;
2079 goto async_event_process_exit;
2081 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2082 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2083 if (!fw_health->enabled) {
2084 netif_info(bp, drv, bp->dev,
2085 "Error recovery info: error recovery[0]\n");
2088 fw_health->tmr_multiplier =
2089 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2090 bp->current_interval * 10);
2091 fw_health->tmr_counter = fw_health->tmr_multiplier;
2092 fw_health->last_fw_heartbeat =
2093 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2094 fw_health->last_fw_reset_cnt =
2095 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2096 netif_info(bp, drv, bp->dev,
2097 "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2098 fw_health->master, fw_health->last_fw_reset_cnt,
2099 bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2100 goto async_event_process_exit;
2102 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2103 netif_notice(bp, hw, bp->dev,
2104 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2106 goto async_event_process_exit;
2107 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2108 struct bnxt_rx_ring_info *rxr;
2111 if (bp->flags & BNXT_FLAG_CHIP_P5)
2112 goto async_event_process_exit;
2114 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2115 BNXT_EVENT_RING_TYPE(data2), data1);
2116 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2117 goto async_event_process_exit;
2119 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2120 if (grp_idx == INVALID_HW_RING_ID) {
2121 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2123 goto async_event_process_exit;
2125 rxr = bp->bnapi[grp_idx]->rx_ring;
2126 bnxt_sched_reset(bp, rxr);
2127 goto async_event_process_exit;
2129 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2130 struct bnxt_fw_health *fw_health = bp->fw_health;
2132 netif_notice(bp, hw, bp->dev,
2133 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2136 fw_health->echo_req_data1 = data1;
2137 fw_health->echo_req_data2 = data2;
2138 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2141 goto async_event_process_exit;
2144 goto async_event_process_exit;
2146 bnxt_queue_sp_work(bp);
2147 async_event_process_exit:
2148 bnxt_ulp_async_events(bp, cmpl);
2152 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2154 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2155 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2156 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2157 (struct hwrm_fwd_req_cmpl *)txcmp;
2159 switch (cmpl_type) {
2160 case CMPL_BASE_TYPE_HWRM_DONE:
2161 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2162 if (seq_id == bp->hwrm_intr_seq_id)
2163 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2165 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2168 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2169 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2171 if ((vf_id < bp->pf.first_vf_id) ||
2172 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2173 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2178 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2179 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2180 bnxt_queue_sp_work(bp);
2183 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2184 bnxt_async_event_process(bp,
2185 (struct hwrm_async_event_cmpl *)txcmp);
2194 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2196 struct bnxt_napi *bnapi = dev_instance;
2197 struct bnxt *bp = bnapi->bp;
2198 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2199 u32 cons = RING_CMP(cpr->cp_raw_cons);
2202 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2203 napi_schedule(&bnapi->napi);
2207 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2209 u32 raw_cons = cpr->cp_raw_cons;
2210 u16 cons = RING_CMP(raw_cons);
2211 struct tx_cmp *txcmp;
2213 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2215 return TX_CMP_VALID(txcmp, raw_cons);
2218 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2220 struct bnxt_napi *bnapi = dev_instance;
2221 struct bnxt *bp = bnapi->bp;
2222 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2223 u32 cons = RING_CMP(cpr->cp_raw_cons);
2226 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2228 if (!bnxt_has_work(bp, cpr)) {
2229 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2230 /* return if erroneous interrupt */
2231 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2235 /* disable ring IRQ */
2236 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2238 /* Return here if interrupt is shared and is disabled. */
2239 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2242 napi_schedule(&bnapi->napi);
2246 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2249 struct bnxt_napi *bnapi = cpr->bnapi;
2250 u32 raw_cons = cpr->cp_raw_cons;
2255 struct tx_cmp *txcmp;
2257 cpr->has_more_work = 0;
2258 cpr->had_work_done = 1;
2262 cons = RING_CMP(raw_cons);
2263 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2265 if (!TX_CMP_VALID(txcmp, raw_cons))
2268 /* The valid test of the entry must be done first before
2269 * reading any further.
2272 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2274 /* return full budget so NAPI will complete. */
2275 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2277 raw_cons = NEXT_RAW_CMP(raw_cons);
2279 cpr->has_more_work = 1;
2282 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2284 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2286 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2288 if (likely(rc >= 0))
2290 /* Increment rx_pkts when rc is -ENOMEM to count towards
2291 * the NAPI budget. Otherwise, we may potentially loop
2292 * here forever if we consistently cannot allocate
2295 else if (rc == -ENOMEM && budget)
2297 else if (rc == -EBUSY) /* partial completion */
2299 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2300 CMPL_BASE_TYPE_HWRM_DONE) ||
2301 (TX_CMP_TYPE(txcmp) ==
2302 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2303 (TX_CMP_TYPE(txcmp) ==
2304 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2305 bnxt_hwrm_handler(bp, txcmp);
2307 raw_cons = NEXT_RAW_CMP(raw_cons);
2309 if (rx_pkts && rx_pkts == budget) {
2310 cpr->has_more_work = 1;
2315 if (event & BNXT_REDIRECT_EVENT)
2318 if (event & BNXT_TX_EVENT) {
2319 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2320 u16 prod = txr->tx_prod;
2322 /* Sync BD data before updating doorbell */
2325 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2328 cpr->cp_raw_cons = raw_cons;
2329 bnapi->tx_pkts += tx_pkts;
2330 bnapi->events |= event;
2334 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2336 if (bnapi->tx_pkts) {
2337 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2341 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2342 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2344 if (bnapi->events & BNXT_AGG_EVENT)
2345 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2346 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2351 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2354 struct bnxt_napi *bnapi = cpr->bnapi;
2357 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2359 /* ACK completion ring before freeing tx ring and producing new
2360 * buffers in rx/agg rings to prevent overflowing the completion
2363 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2365 __bnxt_poll_work_done(bp, bnapi);
2369 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2371 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2372 struct bnxt *bp = bnapi->bp;
2373 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2374 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2375 struct tx_cmp *txcmp;
2376 struct rx_cmp_ext *rxcmp1;
2377 u32 cp_cons, tmp_raw_cons;
2378 u32 raw_cons = cpr->cp_raw_cons;
2385 cp_cons = RING_CMP(raw_cons);
2386 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2388 if (!TX_CMP_VALID(txcmp, raw_cons))
2391 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2392 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2393 cp_cons = RING_CMP(tmp_raw_cons);
2394 rxcmp1 = (struct rx_cmp_ext *)
2395 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2397 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2400 /* force an error to recycle the buffer */
2401 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2402 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2404 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2405 if (likely(rc == -EIO) && budget)
2407 else if (rc == -EBUSY) /* partial completion */
2409 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2410 CMPL_BASE_TYPE_HWRM_DONE)) {
2411 bnxt_hwrm_handler(bp, txcmp);
2414 "Invalid completion received on special ring\n");
2416 raw_cons = NEXT_RAW_CMP(raw_cons);
2418 if (rx_pkts == budget)
2422 cpr->cp_raw_cons = raw_cons;
2423 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2424 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2426 if (event & BNXT_AGG_EVENT)
2427 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2429 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2430 napi_complete_done(napi, rx_pkts);
2431 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2436 static int bnxt_poll(struct napi_struct *napi, int budget)
2438 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2439 struct bnxt *bp = bnapi->bp;
2440 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2443 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2444 napi_complete(napi);
2448 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2450 if (work_done >= budget) {
2452 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2456 if (!bnxt_has_work(bp, cpr)) {
2457 if (napi_complete_done(napi, work_done))
2458 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2462 if (bp->flags & BNXT_FLAG_DIM) {
2463 struct dim_sample dim_sample = {};
2465 dim_update_sample(cpr->event_ctr,
2469 net_dim(&cpr->dim, dim_sample);
2474 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2476 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2477 int i, work_done = 0;
2479 for (i = 0; i < 2; i++) {
2480 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2483 work_done += __bnxt_poll_work(bp, cpr2,
2484 budget - work_done);
2485 cpr->has_more_work |= cpr2->has_more_work;
2491 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2494 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2497 for (i = 0; i < 2; i++) {
2498 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2499 struct bnxt_db_info *db;
2501 if (cpr2 && cpr2->had_work_done) {
2503 writeq(db->db_key64 | dbr_type |
2504 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2505 cpr2->had_work_done = 0;
2508 __bnxt_poll_work_done(bp, bnapi);
2511 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2513 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2514 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2515 u32 raw_cons = cpr->cp_raw_cons;
2516 struct bnxt *bp = bnapi->bp;
2517 struct nqe_cn *nqcmp;
2521 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2522 napi_complete(napi);
2525 if (cpr->has_more_work) {
2526 cpr->has_more_work = 0;
2527 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2530 cons = RING_CMP(raw_cons);
2531 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2533 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2534 if (cpr->has_more_work)
2537 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2538 cpr->cp_raw_cons = raw_cons;
2539 if (napi_complete_done(napi, work_done))
2540 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2545 /* The valid test of the entry must be done first before
2546 * reading any further.
2550 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2551 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2552 struct bnxt_cp_ring_info *cpr2;
2554 cpr2 = cpr->cp_ring_arr[idx];
2555 work_done += __bnxt_poll_work(bp, cpr2,
2556 budget - work_done);
2557 cpr->has_more_work |= cpr2->has_more_work;
2559 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2561 raw_cons = NEXT_RAW_CMP(raw_cons);
2563 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2564 if (raw_cons != cpr->cp_raw_cons) {
2565 cpr->cp_raw_cons = raw_cons;
2566 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2571 static void bnxt_free_tx_skbs(struct bnxt *bp)
2574 struct pci_dev *pdev = bp->pdev;
2579 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2580 for (i = 0; i < bp->tx_nr_rings; i++) {
2581 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2584 for (j = 0; j < max_idx;) {
2585 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2586 struct sk_buff *skb;
2589 if (i < bp->tx_nr_rings_xdp &&
2590 tx_buf->action == XDP_REDIRECT) {
2591 dma_unmap_single(&pdev->dev,
2592 dma_unmap_addr(tx_buf, mapping),
2593 dma_unmap_len(tx_buf, len),
2595 xdp_return_frame(tx_buf->xdpf);
2597 tx_buf->xdpf = NULL;
2610 if (tx_buf->is_push) {
2616 dma_unmap_single(&pdev->dev,
2617 dma_unmap_addr(tx_buf, mapping),
2621 last = tx_buf->nr_frags;
2623 for (k = 0; k < last; k++, j++) {
2624 int ring_idx = j & bp->tx_ring_mask;
2625 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2627 tx_buf = &txr->tx_buf_ring[ring_idx];
2630 dma_unmap_addr(tx_buf, mapping),
2631 skb_frag_size(frag), PCI_DMA_TODEVICE);
2635 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2639 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2641 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2642 struct pci_dev *pdev = bp->pdev;
2643 struct bnxt_tpa_idx_map *map;
2644 int i, max_idx, max_agg_idx;
2646 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2647 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2649 goto skip_rx_tpa_free;
2651 for (i = 0; i < bp->max_tpa; i++) {
2652 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2653 u8 *data = tpa_info->data;
2658 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2659 bp->rx_buf_use_size, bp->rx_dir,
2660 DMA_ATTR_WEAK_ORDERING);
2662 tpa_info->data = NULL;
2668 for (i = 0; i < max_idx; i++) {
2669 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2670 dma_addr_t mapping = rx_buf->mapping;
2671 void *data = rx_buf->data;
2676 rx_buf->data = NULL;
2677 if (BNXT_RX_PAGE_MODE(bp)) {
2678 mapping -= bp->rx_dma_offset;
2679 dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2681 DMA_ATTR_WEAK_ORDERING);
2682 page_pool_recycle_direct(rxr->page_pool, data);
2684 dma_unmap_single_attrs(&pdev->dev, mapping,
2685 bp->rx_buf_use_size, bp->rx_dir,
2686 DMA_ATTR_WEAK_ORDERING);
2690 for (i = 0; i < max_agg_idx; i++) {
2691 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2692 struct page *page = rx_agg_buf->page;
2697 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2698 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2699 DMA_ATTR_WEAK_ORDERING);
2701 rx_agg_buf->page = NULL;
2702 __clear_bit(i, rxr->rx_agg_bmap);
2707 __free_page(rxr->rx_page);
2708 rxr->rx_page = NULL;
2710 map = rxr->rx_tpa_idx_map;
2712 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2715 static void bnxt_free_rx_skbs(struct bnxt *bp)
2722 for (i = 0; i < bp->rx_nr_rings; i++)
2723 bnxt_free_one_rx_ring_skbs(bp, i);
2726 static void bnxt_free_skbs(struct bnxt *bp)
2728 bnxt_free_tx_skbs(bp);
2729 bnxt_free_rx_skbs(bp);
2732 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2734 u8 init_val = mem_init->init_val;
2735 u16 offset = mem_init->offset;
2741 if (offset == BNXT_MEM_INVALID_OFFSET) {
2742 memset(p, init_val, len);
2745 for (i = 0; i < len; i += mem_init->size)
2746 *(p2 + i + offset) = init_val;
2749 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2751 struct pci_dev *pdev = bp->pdev;
2754 for (i = 0; i < rmem->nr_pages; i++) {
2755 if (!rmem->pg_arr[i])
2758 dma_free_coherent(&pdev->dev, rmem->page_size,
2759 rmem->pg_arr[i], rmem->dma_arr[i]);
2761 rmem->pg_arr[i] = NULL;
2764 size_t pg_tbl_size = rmem->nr_pages * 8;
2766 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2767 pg_tbl_size = rmem->page_size;
2768 dma_free_coherent(&pdev->dev, pg_tbl_size,
2769 rmem->pg_tbl, rmem->pg_tbl_map);
2770 rmem->pg_tbl = NULL;
2772 if (rmem->vmem_size && *rmem->vmem) {
2778 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2780 struct pci_dev *pdev = bp->pdev;
2784 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2785 valid_bit = PTU_PTE_VALID;
2786 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2787 size_t pg_tbl_size = rmem->nr_pages * 8;
2789 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2790 pg_tbl_size = rmem->page_size;
2791 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2798 for (i = 0; i < rmem->nr_pages; i++) {
2799 u64 extra_bits = valid_bit;
2801 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2805 if (!rmem->pg_arr[i])
2809 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
2811 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2812 if (i == rmem->nr_pages - 2 &&
2813 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2814 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2815 else if (i == rmem->nr_pages - 1 &&
2816 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2817 extra_bits |= PTU_PTE_LAST;
2819 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2823 if (rmem->vmem_size) {
2824 *rmem->vmem = vzalloc(rmem->vmem_size);
2831 static void bnxt_free_tpa_info(struct bnxt *bp)
2835 for (i = 0; i < bp->rx_nr_rings; i++) {
2836 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2838 kfree(rxr->rx_tpa_idx_map);
2839 rxr->rx_tpa_idx_map = NULL;
2841 kfree(rxr->rx_tpa[0].agg_arr);
2842 rxr->rx_tpa[0].agg_arr = NULL;
2849 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2851 int i, j, total_aggs = 0;
2853 bp->max_tpa = MAX_TPA;
2854 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2855 if (!bp->max_tpa_v2)
2857 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2858 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2861 for (i = 0; i < bp->rx_nr_rings; i++) {
2862 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2863 struct rx_agg_cmp *agg;
2865 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2870 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2872 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2873 rxr->rx_tpa[0].agg_arr = agg;
2876 for (j = 1; j < bp->max_tpa; j++)
2877 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2878 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2880 if (!rxr->rx_tpa_idx_map)
2886 static void bnxt_free_rx_rings(struct bnxt *bp)
2893 bnxt_free_tpa_info(bp);
2894 for (i = 0; i < bp->rx_nr_rings; i++) {
2895 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2896 struct bnxt_ring_struct *ring;
2899 bpf_prog_put(rxr->xdp_prog);
2901 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2902 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2904 page_pool_destroy(rxr->page_pool);
2905 rxr->page_pool = NULL;
2907 kfree(rxr->rx_agg_bmap);
2908 rxr->rx_agg_bmap = NULL;
2910 ring = &rxr->rx_ring_struct;
2911 bnxt_free_ring(bp, &ring->ring_mem);
2913 ring = &rxr->rx_agg_ring_struct;
2914 bnxt_free_ring(bp, &ring->ring_mem);
2918 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2919 struct bnxt_rx_ring_info *rxr)
2921 struct page_pool_params pp = { 0 };
2923 pp.pool_size = bp->rx_ring_size;
2924 pp.nid = dev_to_node(&bp->pdev->dev);
2925 pp.dev = &bp->pdev->dev;
2926 pp.dma_dir = DMA_BIDIRECTIONAL;
2928 rxr->page_pool = page_pool_create(&pp);
2929 if (IS_ERR(rxr->page_pool)) {
2930 int err = PTR_ERR(rxr->page_pool);
2932 rxr->page_pool = NULL;
2938 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2940 int i, rc = 0, agg_rings = 0;
2945 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2948 for (i = 0; i < bp->rx_nr_rings; i++) {
2949 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2950 struct bnxt_ring_struct *ring;
2952 ring = &rxr->rx_ring_struct;
2954 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2958 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
2962 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2966 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2970 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2978 ring = &rxr->rx_agg_ring_struct;
2979 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2984 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2985 mem_size = rxr->rx_agg_bmap_size / 8;
2986 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2987 if (!rxr->rx_agg_bmap)
2991 if (bp->flags & BNXT_FLAG_TPA)
2992 rc = bnxt_alloc_tpa_info(bp);
2996 static void bnxt_free_tx_rings(struct bnxt *bp)
2999 struct pci_dev *pdev = bp->pdev;
3004 for (i = 0; i < bp->tx_nr_rings; i++) {
3005 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3006 struct bnxt_ring_struct *ring;
3009 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3010 txr->tx_push, txr->tx_push_mapping);
3011 txr->tx_push = NULL;
3014 ring = &txr->tx_ring_struct;
3016 bnxt_free_ring(bp, &ring->ring_mem);
3020 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3023 struct pci_dev *pdev = bp->pdev;
3025 bp->tx_push_size = 0;
3026 if (bp->tx_push_thresh) {
3029 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3030 bp->tx_push_thresh);
3032 if (push_size > 256) {
3034 bp->tx_push_thresh = 0;
3037 bp->tx_push_size = push_size;
3040 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3041 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3042 struct bnxt_ring_struct *ring;
3045 ring = &txr->tx_ring_struct;
3047 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3051 ring->grp_idx = txr->bnapi->index;
3052 if (bp->tx_push_size) {
3055 /* One pre-allocated DMA buffer to backup
3058 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3060 &txr->tx_push_mapping,
3066 mapping = txr->tx_push_mapping +
3067 sizeof(struct tx_push_bd);
3068 txr->data_mapping = cpu_to_le64(mapping);
3070 qidx = bp->tc_to_qidx[j];
3071 ring->queue_id = bp->q_info[qidx].queue_id;
3072 if (i < bp->tx_nr_rings_xdp)
3074 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3080 static void bnxt_free_cp_rings(struct bnxt *bp)
3087 for (i = 0; i < bp->cp_nr_rings; i++) {
3088 struct bnxt_napi *bnapi = bp->bnapi[i];
3089 struct bnxt_cp_ring_info *cpr;
3090 struct bnxt_ring_struct *ring;
3096 cpr = &bnapi->cp_ring;
3097 ring = &cpr->cp_ring_struct;
3099 bnxt_free_ring(bp, &ring->ring_mem);
3101 for (j = 0; j < 2; j++) {
3102 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3105 ring = &cpr2->cp_ring_struct;
3106 bnxt_free_ring(bp, &ring->ring_mem);
3108 cpr->cp_ring_arr[j] = NULL;
3114 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3116 struct bnxt_ring_mem_info *rmem;
3117 struct bnxt_ring_struct *ring;
3118 struct bnxt_cp_ring_info *cpr;
3121 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3125 ring = &cpr->cp_ring_struct;
3126 rmem = &ring->ring_mem;
3127 rmem->nr_pages = bp->cp_nr_pages;
3128 rmem->page_size = HW_CMPD_RING_SIZE;
3129 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3130 rmem->dma_arr = cpr->cp_desc_mapping;
3131 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3132 rc = bnxt_alloc_ring(bp, rmem);
3134 bnxt_free_ring(bp, rmem);
3141 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3143 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3144 int i, rc, ulp_base_vec, ulp_msix;
3146 ulp_msix = bnxt_get_ulp_msix_num(bp);
3147 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3148 for (i = 0; i < bp->cp_nr_rings; i++) {
3149 struct bnxt_napi *bnapi = bp->bnapi[i];
3150 struct bnxt_cp_ring_info *cpr;
3151 struct bnxt_ring_struct *ring;
3156 cpr = &bnapi->cp_ring;
3158 ring = &cpr->cp_ring_struct;
3160 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3164 if (ulp_msix && i >= ulp_base_vec)
3165 ring->map_idx = i + ulp_msix;
3169 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3172 if (i < bp->rx_nr_rings) {
3173 struct bnxt_cp_ring_info *cpr2 =
3174 bnxt_alloc_cp_sub_ring(bp);
3176 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3179 cpr2->bnapi = bnapi;
3181 if ((sh && i < bp->tx_nr_rings) ||
3182 (!sh && i >= bp->rx_nr_rings)) {
3183 struct bnxt_cp_ring_info *cpr2 =
3184 bnxt_alloc_cp_sub_ring(bp);
3186 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3189 cpr2->bnapi = bnapi;
3195 static void bnxt_init_ring_struct(struct bnxt *bp)
3199 for (i = 0; i < bp->cp_nr_rings; i++) {
3200 struct bnxt_napi *bnapi = bp->bnapi[i];
3201 struct bnxt_ring_mem_info *rmem;
3202 struct bnxt_cp_ring_info *cpr;
3203 struct bnxt_rx_ring_info *rxr;
3204 struct bnxt_tx_ring_info *txr;
3205 struct bnxt_ring_struct *ring;
3210 cpr = &bnapi->cp_ring;
3211 ring = &cpr->cp_ring_struct;
3212 rmem = &ring->ring_mem;
3213 rmem->nr_pages = bp->cp_nr_pages;
3214 rmem->page_size = HW_CMPD_RING_SIZE;
3215 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3216 rmem->dma_arr = cpr->cp_desc_mapping;
3217 rmem->vmem_size = 0;
3219 rxr = bnapi->rx_ring;
3223 ring = &rxr->rx_ring_struct;
3224 rmem = &ring->ring_mem;
3225 rmem->nr_pages = bp->rx_nr_pages;
3226 rmem->page_size = HW_RXBD_RING_SIZE;
3227 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3228 rmem->dma_arr = rxr->rx_desc_mapping;
3229 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3230 rmem->vmem = (void **)&rxr->rx_buf_ring;
3232 ring = &rxr->rx_agg_ring_struct;
3233 rmem = &ring->ring_mem;
3234 rmem->nr_pages = bp->rx_agg_nr_pages;
3235 rmem->page_size = HW_RXBD_RING_SIZE;
3236 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3237 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3238 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3239 rmem->vmem = (void **)&rxr->rx_agg_ring;
3242 txr = bnapi->tx_ring;
3246 ring = &txr->tx_ring_struct;
3247 rmem = &ring->ring_mem;
3248 rmem->nr_pages = bp->tx_nr_pages;
3249 rmem->page_size = HW_RXBD_RING_SIZE;
3250 rmem->pg_arr = (void **)txr->tx_desc_ring;
3251 rmem->dma_arr = txr->tx_desc_mapping;
3252 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3253 rmem->vmem = (void **)&txr->tx_buf_ring;
3257 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3261 struct rx_bd **rx_buf_ring;
3263 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3264 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3268 rxbd = rx_buf_ring[i];
3272 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3273 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3274 rxbd->rx_bd_opaque = prod;
3279 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3281 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3282 struct net_device *dev = bp->dev;
3286 prod = rxr->rx_prod;
3287 for (i = 0; i < bp->rx_ring_size; i++) {
3288 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3289 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3290 ring_nr, i, bp->rx_ring_size);
3293 prod = NEXT_RX(prod);
3295 rxr->rx_prod = prod;
3297 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3300 prod = rxr->rx_agg_prod;
3301 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3302 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3303 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3304 ring_nr, i, bp->rx_ring_size);
3307 prod = NEXT_RX_AGG(prod);
3309 rxr->rx_agg_prod = prod;
3315 for (i = 0; i < bp->max_tpa; i++) {
3316 data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3320 rxr->rx_tpa[i].data = data;
3321 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3322 rxr->rx_tpa[i].mapping = mapping;
3328 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3330 struct bnxt_rx_ring_info *rxr;
3331 struct bnxt_ring_struct *ring;
3334 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3335 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3337 if (NET_IP_ALIGN == 2)
3338 type |= RX_BD_FLAGS_SOP;
3340 rxr = &bp->rx_ring[ring_nr];
3341 ring = &rxr->rx_ring_struct;
3342 bnxt_init_rxbd_pages(ring, type);
3344 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3345 bpf_prog_add(bp->xdp_prog, 1);
3346 rxr->xdp_prog = bp->xdp_prog;
3348 ring->fw_ring_id = INVALID_HW_RING_ID;
3350 ring = &rxr->rx_agg_ring_struct;
3351 ring->fw_ring_id = INVALID_HW_RING_ID;
3353 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3354 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3355 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3357 bnxt_init_rxbd_pages(ring, type);
3360 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3363 static void bnxt_init_cp_rings(struct bnxt *bp)
3367 for (i = 0; i < bp->cp_nr_rings; i++) {
3368 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3369 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3371 ring->fw_ring_id = INVALID_HW_RING_ID;
3372 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3373 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3374 for (j = 0; j < 2; j++) {
3375 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3380 ring = &cpr2->cp_ring_struct;
3381 ring->fw_ring_id = INVALID_HW_RING_ID;
3382 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3383 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3388 static int bnxt_init_rx_rings(struct bnxt *bp)
3392 if (BNXT_RX_PAGE_MODE(bp)) {
3393 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3394 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3396 bp->rx_offset = BNXT_RX_OFFSET;
3397 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3400 for (i = 0; i < bp->rx_nr_rings; i++) {
3401 rc = bnxt_init_one_rx_ring(bp, i);
3409 static int bnxt_init_tx_rings(struct bnxt *bp)
3413 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3416 for (i = 0; i < bp->tx_nr_rings; i++) {
3417 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3418 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3420 ring->fw_ring_id = INVALID_HW_RING_ID;
3426 static void bnxt_free_ring_grps(struct bnxt *bp)
3428 kfree(bp->grp_info);
3429 bp->grp_info = NULL;
3432 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3437 bp->grp_info = kcalloc(bp->cp_nr_rings,
3438 sizeof(struct bnxt_ring_grp_info),
3443 for (i = 0; i < bp->cp_nr_rings; i++) {
3445 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3446 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3447 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3448 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3449 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3454 static void bnxt_free_vnics(struct bnxt *bp)
3456 kfree(bp->vnic_info);
3457 bp->vnic_info = NULL;
3461 static int bnxt_alloc_vnics(struct bnxt *bp)
3465 #ifdef CONFIG_RFS_ACCEL
3466 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3467 num_vnics += bp->rx_nr_rings;
3470 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3473 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3478 bp->nr_vnics = num_vnics;
3482 static void bnxt_init_vnics(struct bnxt *bp)
3486 for (i = 0; i < bp->nr_vnics; i++) {
3487 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3490 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3491 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3492 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3494 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3496 if (bp->vnic_info[i].rss_hash_key) {
3498 prandom_bytes(vnic->rss_hash_key,
3501 memcpy(vnic->rss_hash_key,
3502 bp->vnic_info[0].rss_hash_key,
3508 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3512 pages = ring_size / desc_per_pg;
3519 while (pages & (pages - 1))
3525 void bnxt_set_tpa_flags(struct bnxt *bp)
3527 bp->flags &= ~BNXT_FLAG_TPA;
3528 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3530 if (bp->dev->features & NETIF_F_LRO)
3531 bp->flags |= BNXT_FLAG_LRO;
3532 else if (bp->dev->features & NETIF_F_GRO_HW)
3533 bp->flags |= BNXT_FLAG_GRO;
3536 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3539 void bnxt_set_ring_params(struct bnxt *bp)
3541 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3542 u32 agg_factor = 0, agg_ring_size = 0;
3544 /* 8 for CRC and VLAN */
3545 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3547 rx_space = rx_size + NET_SKB_PAD +
3548 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3550 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3551 ring_size = bp->rx_ring_size;
3552 bp->rx_agg_ring_size = 0;
3553 bp->rx_agg_nr_pages = 0;
3555 if (bp->flags & BNXT_FLAG_TPA)
3556 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3558 bp->flags &= ~BNXT_FLAG_JUMBO;
3559 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3562 bp->flags |= BNXT_FLAG_JUMBO;
3563 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3564 if (jumbo_factor > agg_factor)
3565 agg_factor = jumbo_factor;
3567 agg_ring_size = ring_size * agg_factor;
3569 if (agg_ring_size) {
3570 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3572 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3573 u32 tmp = agg_ring_size;
3575 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3576 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3577 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3578 tmp, agg_ring_size);
3580 bp->rx_agg_ring_size = agg_ring_size;
3581 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3582 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3583 rx_space = rx_size + NET_SKB_PAD +
3584 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3587 bp->rx_buf_use_size = rx_size;
3588 bp->rx_buf_size = rx_space;
3590 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3591 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3593 ring_size = bp->tx_ring_size;
3594 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3595 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3597 max_rx_cmpl = bp->rx_ring_size;
3598 /* MAX TPA needs to be added because TPA_START completions are
3599 * immediately recycled, so the TPA completions are not bound by
3602 if (bp->flags & BNXT_FLAG_TPA)
3603 max_rx_cmpl += bp->max_tpa;
3604 /* RX and TPA completions are 32-byte, all others are 16-byte */
3605 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3606 bp->cp_ring_size = ring_size;
3608 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3609 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3610 bp->cp_nr_pages = MAX_CP_PAGES;
3611 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3612 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3613 ring_size, bp->cp_ring_size);
3615 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3616 bp->cp_ring_mask = bp->cp_bit - 1;
3619 /* Changing allocation mode of RX rings.
3620 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3622 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3625 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3628 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3629 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3630 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3631 bp->rx_dir = DMA_BIDIRECTIONAL;
3632 bp->rx_skb_func = bnxt_rx_page_skb;
3633 /* Disable LRO or GRO_HW */
3634 netdev_update_features(bp->dev);
3636 bp->dev->max_mtu = bp->max_mtu;
3637 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3638 bp->rx_dir = DMA_FROM_DEVICE;
3639 bp->rx_skb_func = bnxt_rx_skb;
3644 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3647 struct bnxt_vnic_info *vnic;
3648 struct pci_dev *pdev = bp->pdev;
3653 for (i = 0; i < bp->nr_vnics; i++) {
3654 vnic = &bp->vnic_info[i];
3656 kfree(vnic->fw_grp_ids);
3657 vnic->fw_grp_ids = NULL;
3659 kfree(vnic->uc_list);
3660 vnic->uc_list = NULL;
3662 if (vnic->mc_list) {
3663 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3664 vnic->mc_list, vnic->mc_list_mapping);
3665 vnic->mc_list = NULL;
3668 if (vnic->rss_table) {
3669 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3671 vnic->rss_table_dma_addr);
3672 vnic->rss_table = NULL;
3675 vnic->rss_hash_key = NULL;
3680 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3682 int i, rc = 0, size;
3683 struct bnxt_vnic_info *vnic;
3684 struct pci_dev *pdev = bp->pdev;
3687 for (i = 0; i < bp->nr_vnics; i++) {
3688 vnic = &bp->vnic_info[i];
3690 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3691 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3694 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3695 if (!vnic->uc_list) {
3702 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3703 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3705 dma_alloc_coherent(&pdev->dev,
3707 &vnic->mc_list_mapping,
3709 if (!vnic->mc_list) {
3715 if (bp->flags & BNXT_FLAG_CHIP_P5)
3716 goto vnic_skip_grps;
3718 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3719 max_rings = bp->rx_nr_rings;
3723 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3724 if (!vnic->fw_grp_ids) {
3729 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3730 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3733 /* Allocate rss table and hash key */
3734 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3735 if (bp->flags & BNXT_FLAG_CHIP_P5)
3736 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3738 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3739 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3740 vnic->rss_table_size,
3741 &vnic->rss_table_dma_addr,
3743 if (!vnic->rss_table) {
3748 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3749 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3757 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3759 struct pci_dev *pdev = bp->pdev;
3761 if (bp->hwrm_cmd_resp_addr) {
3762 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3763 bp->hwrm_cmd_resp_dma_addr);
3764 bp->hwrm_cmd_resp_addr = NULL;
3767 if (bp->hwrm_cmd_kong_resp_addr) {
3768 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3769 bp->hwrm_cmd_kong_resp_addr,
3770 bp->hwrm_cmd_kong_resp_dma_addr);
3771 bp->hwrm_cmd_kong_resp_addr = NULL;
3775 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3777 struct pci_dev *pdev = bp->pdev;
3779 if (bp->hwrm_cmd_kong_resp_addr)
3782 bp->hwrm_cmd_kong_resp_addr =
3783 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3784 &bp->hwrm_cmd_kong_resp_dma_addr,
3786 if (!bp->hwrm_cmd_kong_resp_addr)
3792 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3794 struct pci_dev *pdev = bp->pdev;
3796 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3797 &bp->hwrm_cmd_resp_dma_addr,
3799 if (!bp->hwrm_cmd_resp_addr)
3805 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3807 if (bp->hwrm_short_cmd_req_addr) {
3808 struct pci_dev *pdev = bp->pdev;
3810 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3811 bp->hwrm_short_cmd_req_addr,
3812 bp->hwrm_short_cmd_req_dma_addr);
3813 bp->hwrm_short_cmd_req_addr = NULL;
3817 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3819 struct pci_dev *pdev = bp->pdev;
3821 if (bp->hwrm_short_cmd_req_addr)
3824 bp->hwrm_short_cmd_req_addr =
3825 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3826 &bp->hwrm_short_cmd_req_dma_addr,
3828 if (!bp->hwrm_short_cmd_req_addr)
3834 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3836 kfree(stats->hw_masks);
3837 stats->hw_masks = NULL;
3838 kfree(stats->sw_stats);
3839 stats->sw_stats = NULL;
3840 if (stats->hw_stats) {
3841 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3842 stats->hw_stats_map);
3843 stats->hw_stats = NULL;
3847 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3850 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3851 &stats->hw_stats_map, GFP_KERNEL);
3852 if (!stats->hw_stats)
3855 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3856 if (!stats->sw_stats)
3860 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3861 if (!stats->hw_masks)
3867 bnxt_free_stats_mem(bp, stats);
3871 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3875 for (i = 0; i < count; i++)
3879 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3883 for (i = 0; i < count; i++)
3884 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3887 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3888 struct bnxt_stats_mem *stats)
3890 struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3891 struct hwrm_func_qstats_ext_input req = {0};
3895 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3896 !(bp->flags & BNXT_FLAG_CHIP_P5))
3899 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3900 req.fid = cpu_to_le16(0xffff);
3901 req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3902 mutex_lock(&bp->hwrm_cmd_lock);
3903 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3907 hw_masks = &resp->rx_ucast_pkts;
3908 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3911 mutex_unlock(&bp->hwrm_cmd_lock);
3915 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3916 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3918 static void bnxt_init_stats(struct bnxt *bp)
3920 struct bnxt_napi *bnapi = bp->bnapi[0];
3921 struct bnxt_cp_ring_info *cpr;
3922 struct bnxt_stats_mem *stats;
3923 __le64 *rx_stats, *tx_stats;
3924 int rc, rx_count, tx_count;
3925 u64 *rx_masks, *tx_masks;
3929 cpr = &bnapi->cp_ring;
3930 stats = &cpr->stats;
3931 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3933 if (bp->flags & BNXT_FLAG_CHIP_P5)
3934 mask = (1ULL << 48) - 1;
3937 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3939 if (bp->flags & BNXT_FLAG_PORT_STATS) {
3940 stats = &bp->port_stats;
3941 rx_stats = stats->hw_stats;
3942 rx_masks = stats->hw_masks;
3943 rx_count = sizeof(struct rx_port_stats) / 8;
3944 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3945 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3946 tx_count = sizeof(struct tx_port_stats) / 8;
3948 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3949 rc = bnxt_hwrm_port_qstats(bp, flags);
3951 mask = (1ULL << 40) - 1;
3953 bnxt_fill_masks(rx_masks, mask, rx_count);
3954 bnxt_fill_masks(tx_masks, mask, tx_count);
3956 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3957 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3958 bnxt_hwrm_port_qstats(bp, 0);
3961 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3962 stats = &bp->rx_port_stats_ext;
3963 rx_stats = stats->hw_stats;
3964 rx_masks = stats->hw_masks;
3965 rx_count = sizeof(struct rx_port_stats_ext) / 8;
3966 stats = &bp->tx_port_stats_ext;
3967 tx_stats = stats->hw_stats;
3968 tx_masks = stats->hw_masks;
3969 tx_count = sizeof(struct tx_port_stats_ext) / 8;
3971 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3972 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
3974 mask = (1ULL << 40) - 1;
3976 bnxt_fill_masks(rx_masks, mask, rx_count);
3978 bnxt_fill_masks(tx_masks, mask, tx_count);
3980 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3982 bnxt_copy_hw_masks(tx_masks, tx_stats,
3984 bnxt_hwrm_port_qstats_ext(bp, 0);
3989 static void bnxt_free_port_stats(struct bnxt *bp)
3991 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3992 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3994 bnxt_free_stats_mem(bp, &bp->port_stats);
3995 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
3996 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
3999 static void bnxt_free_ring_stats(struct bnxt *bp)
4006 for (i = 0; i < bp->cp_nr_rings; i++) {
4007 struct bnxt_napi *bnapi = bp->bnapi[i];
4008 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4010 bnxt_free_stats_mem(bp, &cpr->stats);
4014 static int bnxt_alloc_stats(struct bnxt *bp)
4019 size = bp->hw_ring_stats_size;
4021 for (i = 0; i < bp->cp_nr_rings; i++) {
4022 struct bnxt_napi *bnapi = bp->bnapi[i];
4023 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4025 cpr->stats.len = size;
4026 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4030 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4033 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4036 if (bp->port_stats.hw_stats)
4037 goto alloc_ext_stats;
4039 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4040 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4044 bp->flags |= BNXT_FLAG_PORT_STATS;
4047 /* Display extended statistics only if FW supports it */
4048 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4049 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4052 if (bp->rx_port_stats_ext.hw_stats)
4053 goto alloc_tx_ext_stats;
4055 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4056 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4057 /* Extended stats are optional */
4062 if (bp->tx_port_stats_ext.hw_stats)
4065 if (bp->hwrm_spec_code >= 0x10902 ||
4066 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4067 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4068 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4069 /* Extended stats are optional */
4073 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4077 static void bnxt_clear_ring_indices(struct bnxt *bp)
4084 for (i = 0; i < bp->cp_nr_rings; i++) {
4085 struct bnxt_napi *bnapi = bp->bnapi[i];
4086 struct bnxt_cp_ring_info *cpr;
4087 struct bnxt_rx_ring_info *rxr;
4088 struct bnxt_tx_ring_info *txr;
4093 cpr = &bnapi->cp_ring;
4094 cpr->cp_raw_cons = 0;
4096 txr = bnapi->tx_ring;
4102 rxr = bnapi->rx_ring;
4105 rxr->rx_agg_prod = 0;
4106 rxr->rx_sw_agg_prod = 0;
4107 rxr->rx_next_cons = 0;
4112 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4114 #ifdef CONFIG_RFS_ACCEL
4117 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4118 * safe to delete the hash table.
4120 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4121 struct hlist_head *head;
4122 struct hlist_node *tmp;
4123 struct bnxt_ntuple_filter *fltr;
4125 head = &bp->ntp_fltr_hash_tbl[i];
4126 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4127 hlist_del(&fltr->hash);
4132 kfree(bp->ntp_fltr_bmap);
4133 bp->ntp_fltr_bmap = NULL;
4135 bp->ntp_fltr_count = 0;
4139 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4141 #ifdef CONFIG_RFS_ACCEL
4144 if (!(bp->flags & BNXT_FLAG_RFS))
4147 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4148 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4150 bp->ntp_fltr_count = 0;
4151 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4155 if (!bp->ntp_fltr_bmap)
4164 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4166 bnxt_free_vnic_attributes(bp);
4167 bnxt_free_tx_rings(bp);
4168 bnxt_free_rx_rings(bp);
4169 bnxt_free_cp_rings(bp);
4170 bnxt_free_ntp_fltrs(bp, irq_re_init);
4172 bnxt_free_ring_stats(bp);
4173 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4174 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4175 bnxt_free_port_stats(bp);
4176 bnxt_free_ring_grps(bp);
4177 bnxt_free_vnics(bp);
4178 kfree(bp->tx_ring_map);
4179 bp->tx_ring_map = NULL;
4187 bnxt_clear_ring_indices(bp);
4191 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4193 int i, j, rc, size, arr_size;
4197 /* Allocate bnapi mem pointer array and mem block for
4200 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4202 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4203 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4209 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4210 bp->bnapi[i] = bnapi;
4211 bp->bnapi[i]->index = i;
4212 bp->bnapi[i]->bp = bp;
4213 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4214 struct bnxt_cp_ring_info *cpr =
4215 &bp->bnapi[i]->cp_ring;
4217 cpr->cp_ring_struct.ring_mem.flags =
4218 BNXT_RMEM_RING_PTE_FLAG;
4222 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4223 sizeof(struct bnxt_rx_ring_info),
4228 for (i = 0; i < bp->rx_nr_rings; i++) {
4229 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4231 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4232 rxr->rx_ring_struct.ring_mem.flags =
4233 BNXT_RMEM_RING_PTE_FLAG;
4234 rxr->rx_agg_ring_struct.ring_mem.flags =
4235 BNXT_RMEM_RING_PTE_FLAG;
4237 rxr->bnapi = bp->bnapi[i];
4238 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4241 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4242 sizeof(struct bnxt_tx_ring_info),
4247 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4250 if (!bp->tx_ring_map)
4253 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4256 j = bp->rx_nr_rings;
4258 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4259 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4261 if (bp->flags & BNXT_FLAG_CHIP_P5)
4262 txr->tx_ring_struct.ring_mem.flags =
4263 BNXT_RMEM_RING_PTE_FLAG;
4264 txr->bnapi = bp->bnapi[j];
4265 bp->bnapi[j]->tx_ring = txr;
4266 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4267 if (i >= bp->tx_nr_rings_xdp) {
4268 txr->txq_index = i - bp->tx_nr_rings_xdp;
4269 bp->bnapi[j]->tx_int = bnxt_tx_int;
4271 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4272 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4276 rc = bnxt_alloc_stats(bp);
4279 bnxt_init_stats(bp);
4281 rc = bnxt_alloc_ntp_fltrs(bp);
4285 rc = bnxt_alloc_vnics(bp);
4290 bnxt_init_ring_struct(bp);
4292 rc = bnxt_alloc_rx_rings(bp);
4296 rc = bnxt_alloc_tx_rings(bp);
4300 rc = bnxt_alloc_cp_rings(bp);
4304 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4305 BNXT_VNIC_UCAST_FLAG;
4306 rc = bnxt_alloc_vnic_attributes(bp);
4312 bnxt_free_mem(bp, true);
4316 static void bnxt_disable_int(struct bnxt *bp)
4323 for (i = 0; i < bp->cp_nr_rings; i++) {
4324 struct bnxt_napi *bnapi = bp->bnapi[i];
4325 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4326 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4328 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4329 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4333 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4335 struct bnxt_napi *bnapi = bp->bnapi[n];
4336 struct bnxt_cp_ring_info *cpr;
4338 cpr = &bnapi->cp_ring;
4339 return cpr->cp_ring_struct.map_idx;
4342 static void bnxt_disable_int_sync(struct bnxt *bp)
4349 atomic_inc(&bp->intr_sem);
4351 bnxt_disable_int(bp);
4352 for (i = 0; i < bp->cp_nr_rings; i++) {
4353 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4355 synchronize_irq(bp->irq_tbl[map_idx].vector);
4359 static void bnxt_enable_int(struct bnxt *bp)
4363 atomic_set(&bp->intr_sem, 0);
4364 for (i = 0; i < bp->cp_nr_rings; i++) {
4365 struct bnxt_napi *bnapi = bp->bnapi[i];
4366 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4368 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4372 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4373 u16 cmpl_ring, u16 target_id)
4375 struct input *req = request;
4377 req->req_type = cpu_to_le16(req_type);
4378 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4379 req->target_id = cpu_to_le16(target_id);
4380 if (bnxt_kong_hwrm_message(bp, req))
4381 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4383 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4386 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4389 case HWRM_ERR_CODE_SUCCESS:
4391 case HWRM_ERR_CODE_RESOURCE_LOCKED:
4393 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4395 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4397 case HWRM_ERR_CODE_INVALID_PARAMS:
4398 case HWRM_ERR_CODE_INVALID_FLAGS:
4399 case HWRM_ERR_CODE_INVALID_ENABLES:
4400 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4401 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4403 case HWRM_ERR_CODE_NO_BUFFER:
4405 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4406 case HWRM_ERR_CODE_BUSY:
4408 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4415 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4416 int timeout, bool silent)
4418 int i, intr_process, rc, tmo_count;
4419 struct input *req = msg;
4422 u16 cp_ring_id, len = 0;
4423 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4424 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4425 struct hwrm_short_input short_input = {0};
4426 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4427 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4428 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4430 if (BNXT_NO_FW_ACCESS(bp) &&
4431 le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4434 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4435 if (msg_len > bp->hwrm_max_ext_req_len ||
4436 !bp->hwrm_short_cmd_req_addr)
4440 if (bnxt_hwrm_kong_chnl(bp, req)) {
4441 dst = BNXT_HWRM_CHNL_KONG;
4442 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4443 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4444 resp = bp->hwrm_cmd_kong_resp_addr;
4447 memset(resp, 0, PAGE_SIZE);
4448 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4449 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4451 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4452 /* currently supports only one outstanding message */
4454 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4456 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4457 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4458 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4461 /* Set boundary for maximum extended request length for short
4462 * cmd format. If passed up from device use the max supported
4463 * internal req length.
4465 max_msg_len = bp->hwrm_max_ext_req_len;
4467 memcpy(short_cmd_req, req, msg_len);
4468 if (msg_len < max_msg_len)
4469 memset(short_cmd_req + msg_len, 0,
4470 max_msg_len - msg_len);
4472 short_input.req_type = req->req_type;
4473 short_input.signature =
4474 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4475 short_input.size = cpu_to_le16(msg_len);
4476 short_input.req_addr =
4477 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4479 data = (u32 *)&short_input;
4480 msg_len = sizeof(short_input);
4482 /* Sync memory write before updating doorbell */
4485 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4488 /* Write request msg to hwrm channel */
4489 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4491 for (i = msg_len; i < max_req_len; i += 4)
4492 writel(0, bp->bar0 + bar_offset + i);
4494 /* Ring channel doorbell */
4495 writel(1, bp->bar0 + doorbell_offset);
4497 if (!pci_is_enabled(bp->pdev))
4501 timeout = DFLT_HWRM_CMD_TIMEOUT;
4502 /* Limit timeout to an upper limit */
4503 timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
4504 /* convert timeout to usec */
4508 /* Short timeout for the first few iterations:
4509 * number of loops = number of loops for short timeout +
4510 * number of loops for standard timeout.
4512 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4513 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4514 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4517 u16 seq_id = bp->hwrm_intr_seq_id;
4519 /* Wait until hwrm response cmpl interrupt is processed */
4520 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4522 /* Abort the wait for completion if the FW health
4525 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4527 /* on first few passes, just barely sleep */
4528 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4529 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4530 HWRM_SHORT_MAX_TIMEOUT);
4532 if (HWRM_WAIT_MUST_ABORT(bp, req))
4534 usleep_range(HWRM_MIN_TIMEOUT,
4539 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4541 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4542 le16_to_cpu(req->req_type));
4545 len = le16_to_cpu(resp->resp_len);
4546 valid = ((u8 *)resp) + len - 1;
4550 /* Check if response len is updated */
4551 for (i = 0; i < tmo_count; i++) {
4552 /* Abort the wait for completion if the FW health
4555 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4557 len = le16_to_cpu(resp->resp_len);
4560 /* on first few passes, just barely sleep */
4561 if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
4562 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4563 HWRM_SHORT_MAX_TIMEOUT);
4565 if (HWRM_WAIT_MUST_ABORT(bp, req))
4567 usleep_range(HWRM_MIN_TIMEOUT,
4572 if (i >= tmo_count) {
4575 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4576 HWRM_TOTAL_TIMEOUT(i),
4577 le16_to_cpu(req->req_type),
4578 le16_to_cpu(req->seq_id), len);
4582 /* Last byte of resp contains valid bit */
4583 valid = ((u8 *)resp) + len - 1;
4584 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4585 /* make sure we read from updated DMA memory */
4592 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4594 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4595 HWRM_TOTAL_TIMEOUT(i),
4596 le16_to_cpu(req->req_type),
4597 le16_to_cpu(req->seq_id), len,
4603 /* Zero valid bit for compatibility. Valid bit in an older spec
4604 * may become a new field in a newer spec. We must make sure that
4605 * a new field not implemented by old spec will read zero.
4608 rc = le16_to_cpu(resp->error_code);
4610 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4611 le16_to_cpu(resp->req_type),
4612 le16_to_cpu(resp->seq_id), rc);
4613 return bnxt_hwrm_to_stderr(rc);
4616 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4618 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4621 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4624 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4627 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4631 mutex_lock(&bp->hwrm_cmd_lock);
4632 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4633 mutex_unlock(&bp->hwrm_cmd_lock);
4637 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4642 mutex_lock(&bp->hwrm_cmd_lock);
4643 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4644 mutex_unlock(&bp->hwrm_cmd_lock);
4648 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4651 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4652 struct hwrm_func_drv_rgtr_input req = {0};
4653 DECLARE_BITMAP(async_events_bmap, 256);
4654 u32 *events = (u32 *)async_events_bmap;
4658 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4661 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4662 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4663 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4665 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4666 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4667 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4668 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4669 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4670 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4671 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4672 req.flags = cpu_to_le32(flags);
4673 req.ver_maj_8b = DRV_VER_MAJ;
4674 req.ver_min_8b = DRV_VER_MIN;
4675 req.ver_upd_8b = DRV_VER_UPD;
4676 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4677 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4678 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4684 memset(data, 0, sizeof(data));
4685 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4686 u16 cmd = bnxt_vf_req_snif[i];
4687 unsigned int bit, idx;
4691 data[idx] |= 1 << bit;
4694 for (i = 0; i < 8; i++)
4695 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4698 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4701 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4702 req.flags |= cpu_to_le32(
4703 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4705 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4706 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4707 u16 event_id = bnxt_async_events_arr[i];
4709 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4710 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4712 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4714 if (bmap && bmap_size) {
4715 for (i = 0; i < bmap_size; i++) {
4716 if (test_bit(i, bmap))
4717 __set_bit(i, async_events_bmap);
4720 for (i = 0; i < 8; i++)
4721 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4725 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4727 mutex_lock(&bp->hwrm_cmd_lock);
4728 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4730 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4732 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4733 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4735 mutex_unlock(&bp->hwrm_cmd_lock);
4739 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4741 struct hwrm_func_drv_unrgtr_input req = {0};
4743 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4747 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4750 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4753 struct hwrm_tunnel_dst_port_free_input req = {0};
4755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4756 req.tunnel_type = tunnel_type;
4758 switch (tunnel_type) {
4759 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4760 req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4761 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4763 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4764 req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4765 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4771 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4773 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4778 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4782 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4783 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4785 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4787 req.tunnel_type = tunnel_type;
4788 req.tunnel_dst_port_val = port;
4790 mutex_lock(&bp->hwrm_cmd_lock);
4791 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4793 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4798 switch (tunnel_type) {
4799 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4800 bp->vxlan_fw_dst_port_id =
4801 le16_to_cpu(resp->tunnel_dst_port_id);
4803 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4804 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4811 mutex_unlock(&bp->hwrm_cmd_lock);
4815 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4817 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4818 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4820 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4821 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4823 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4824 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4825 req.mask = cpu_to_le32(vnic->rx_mask);
4826 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4829 #ifdef CONFIG_RFS_ACCEL
4830 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4831 struct bnxt_ntuple_filter *fltr)
4833 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4835 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4836 req.ntuple_filter_id = fltr->filter_id;
4837 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4840 #define BNXT_NTP_FLTR_FLAGS \
4841 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4842 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4843 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4844 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4845 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4846 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4847 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4848 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4849 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4850 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4851 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4852 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4853 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4854 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4856 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4857 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4859 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4860 struct bnxt_ntuple_filter *fltr)
4862 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4863 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4864 struct flow_keys *keys = &fltr->fkeys;
4865 struct bnxt_vnic_info *vnic;
4869 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4870 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4872 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4873 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4874 req.dst_id = cpu_to_le16(fltr->rxq);
4876 vnic = &bp->vnic_info[fltr->rxq + 1];
4877 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4879 req.flags = cpu_to_le32(flags);
4880 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4882 req.ethertype = htons(ETH_P_IP);
4883 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4884 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4885 req.ip_protocol = keys->basic.ip_proto;
4887 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4890 req.ethertype = htons(ETH_P_IPV6);
4892 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4893 *(struct in6_addr *)&req.src_ipaddr[0] =
4894 keys->addrs.v6addrs.src;
4895 *(struct in6_addr *)&req.dst_ipaddr[0] =
4896 keys->addrs.v6addrs.dst;
4897 for (i = 0; i < 4; i++) {
4898 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4899 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4902 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4903 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4904 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4905 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4907 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4908 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4910 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4913 req.src_port = keys->ports.src;
4914 req.src_port_mask = cpu_to_be16(0xffff);
4915 req.dst_port = keys->ports.dst;
4916 req.dst_port_mask = cpu_to_be16(0xffff);
4918 mutex_lock(&bp->hwrm_cmd_lock);
4919 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4921 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4922 fltr->filter_id = resp->ntuple_filter_id;
4924 mutex_unlock(&bp->hwrm_cmd_lock);
4929 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4933 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4934 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4937 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4938 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4940 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4941 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4943 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4944 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4945 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4946 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4947 req.l2_addr_mask[0] = 0xff;
4948 req.l2_addr_mask[1] = 0xff;
4949 req.l2_addr_mask[2] = 0xff;
4950 req.l2_addr_mask[3] = 0xff;
4951 req.l2_addr_mask[4] = 0xff;
4952 req.l2_addr_mask[5] = 0xff;
4954 mutex_lock(&bp->hwrm_cmd_lock);
4955 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4957 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4959 mutex_unlock(&bp->hwrm_cmd_lock);
4963 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4965 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4968 /* Any associated ntuple filters will also be cleared by firmware. */
4969 mutex_lock(&bp->hwrm_cmd_lock);
4970 for (i = 0; i < num_of_vnics; i++) {
4971 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4973 for (j = 0; j < vnic->uc_filter_count; j++) {
4974 struct hwrm_cfa_l2_filter_free_input req = {0};
4976 bnxt_hwrm_cmd_hdr_init(bp, &req,
4977 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4979 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4981 rc = _hwrm_send_message(bp, &req, sizeof(req),
4984 vnic->uc_filter_count = 0;
4986 mutex_unlock(&bp->hwrm_cmd_lock);
4991 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4993 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4994 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4995 struct hwrm_vnic_tpa_cfg_input req = {0};
4997 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5000 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5003 u16 mss = bp->dev->mtu - 40;
5004 u32 nsegs, n, segs = 0, flags;
5006 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5007 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5008 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5009 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5010 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5011 if (tpa_flags & BNXT_FLAG_GRO)
5012 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5014 req.flags = cpu_to_le32(flags);
5017 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5018 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5019 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5021 /* Number of segs are log2 units, and first packet is not
5022 * included as part of this units.
5024 if (mss <= BNXT_RX_PAGE_SIZE) {
5025 n = BNXT_RX_PAGE_SIZE / mss;
5026 nsegs = (MAX_SKB_FRAGS - 1) * n;
5028 n = mss / BNXT_RX_PAGE_SIZE;
5029 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5031 nsegs = (MAX_SKB_FRAGS - n) / n;
5034 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5035 segs = MAX_TPA_SEGS_P5;
5036 max_aggs = bp->max_tpa;
5038 segs = ilog2(nsegs);
5040 req.max_agg_segs = cpu_to_le16(segs);
5041 req.max_aggs = cpu_to_le16(max_aggs);
5043 req.min_agg_len = cpu_to_le32(512);
5045 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5047 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5050 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5052 struct bnxt_ring_grp_info *grp_info;
5054 grp_info = &bp->grp_info[ring->grp_idx];
5055 return grp_info->cp_fw_ring_id;
5058 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5060 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5061 struct bnxt_napi *bnapi = rxr->bnapi;
5062 struct bnxt_cp_ring_info *cpr;
5064 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5065 return cpr->cp_ring_struct.fw_ring_id;
5067 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5071 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5073 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5074 struct bnxt_napi *bnapi = txr->bnapi;
5075 struct bnxt_cp_ring_info *cpr;
5077 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5078 return cpr->cp_ring_struct.fw_ring_id;
5080 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5084 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5088 if (bp->flags & BNXT_FLAG_CHIP_P5)
5089 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5091 entries = HW_HASH_INDEX_SIZE;
5093 bp->rss_indir_tbl_entries = entries;
5094 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5096 if (!bp->rss_indir_tbl)
5101 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5103 u16 max_rings, max_entries, pad, i;
5105 if (!bp->rx_nr_rings)
5108 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5109 max_rings = bp->rx_nr_rings - 1;
5111 max_rings = bp->rx_nr_rings;
5113 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5115 for (i = 0; i < max_entries; i++)
5116 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5118 pad = bp->rss_indir_tbl_entries - max_entries;
5120 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5123 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5125 u16 i, tbl_size, max_ring = 0;
5127 if (!bp->rss_indir_tbl)
5130 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5131 for (i = 0; i < tbl_size; i++)
5132 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5136 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5138 if (bp->flags & BNXT_FLAG_CHIP_P5)
5139 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5140 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5145 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5147 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5150 /* Fill the RSS indirection table with ring group ids */
5151 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5153 j = bp->rss_indir_tbl[i];
5154 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5158 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5159 struct bnxt_vnic_info *vnic)
5161 __le16 *ring_tbl = vnic->rss_table;
5162 struct bnxt_rx_ring_info *rxr;
5165 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5167 for (i = 0; i < tbl_size; i++) {
5170 j = bp->rss_indir_tbl[i];
5171 rxr = &bp->rx_ring[j];
5173 ring_id = rxr->rx_ring_struct.fw_ring_id;
5174 *ring_tbl++ = cpu_to_le16(ring_id);
5175 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5176 *ring_tbl++ = cpu_to_le16(ring_id);
5180 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5182 if (bp->flags & BNXT_FLAG_CHIP_P5)
5183 __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5185 __bnxt_fill_hw_rss_tbl(bp, vnic);
5188 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5190 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5191 struct hwrm_vnic_rss_cfg_input req = {0};
5193 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5194 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5197 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5199 bnxt_fill_hw_rss_tbl(bp, vnic);
5200 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5201 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5202 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5203 req.hash_key_tbl_addr =
5204 cpu_to_le64(vnic->rss_hash_key_dma_addr);
5206 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5207 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5210 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5212 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5213 struct hwrm_vnic_rss_cfg_input req = {0};
5214 dma_addr_t ring_tbl_map;
5217 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5218 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5220 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5223 bnxt_fill_hw_rss_tbl(bp, vnic);
5224 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5225 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5226 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5227 ring_tbl_map = vnic->rss_table_dma_addr;
5228 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5229 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5232 req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5233 req.ring_table_pair_index = i;
5234 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5235 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5242 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5244 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5245 struct hwrm_vnic_plcmodes_cfg_input req = {0};
5247 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5248 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5249 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5250 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5252 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5253 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5254 /* thresholds not implemented in firmware yet */
5255 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5256 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5257 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5258 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5261 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5264 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5266 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5267 req.rss_cos_lb_ctx_id =
5268 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5270 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5271 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5274 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5278 for (i = 0; i < bp->nr_vnics; i++) {
5279 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5281 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5282 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5283 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5286 bp->rsscos_nr_ctxs = 0;
5289 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5292 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5293 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5294 bp->hwrm_cmd_resp_addr;
5296 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5299 mutex_lock(&bp->hwrm_cmd_lock);
5300 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5302 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5303 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5304 mutex_unlock(&bp->hwrm_cmd_lock);
5309 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5311 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5312 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5313 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5316 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5318 unsigned int ring = 0, grp_idx;
5319 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5320 struct hwrm_vnic_cfg_input req = {0};
5323 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5325 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5326 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5328 req.default_rx_ring_id =
5329 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5330 req.default_cmpl_ring_id =
5331 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5333 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5334 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5337 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5338 /* Only RSS support for now TBD: COS & LB */
5339 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5340 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5341 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5342 VNIC_CFG_REQ_ENABLES_MRU);
5343 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5345 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5346 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5347 VNIC_CFG_REQ_ENABLES_MRU);
5348 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5350 req.rss_rule = cpu_to_le16(0xffff);
5353 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5354 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5355 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5356 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5358 req.cos_rule = cpu_to_le16(0xffff);
5361 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5363 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5365 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5366 ring = bp->rx_nr_rings - 1;
5368 grp_idx = bp->rx_ring[ring].bnapi->index;
5369 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5370 req.lb_rule = cpu_to_le16(0xffff);
5372 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5374 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5375 #ifdef CONFIG_BNXT_SRIOV
5377 def_vlan = bp->vf.vlan;
5379 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5380 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5381 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5382 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5384 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5387 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5389 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5390 struct hwrm_vnic_free_input req = {0};
5392 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5394 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5396 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5397 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5401 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5405 for (i = 0; i < bp->nr_vnics; i++)
5406 bnxt_hwrm_vnic_free_one(bp, i);
5409 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5410 unsigned int start_rx_ring_idx,
5411 unsigned int nr_rings)
5414 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5415 struct hwrm_vnic_alloc_input req = {0};
5416 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5417 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5419 if (bp->flags & BNXT_FLAG_CHIP_P5)
5420 goto vnic_no_ring_grps;
5422 /* map ring groups to this vnic */
5423 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5424 grp_idx = bp->rx_ring[i].bnapi->index;
5425 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5426 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5430 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5434 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5435 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5437 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5439 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5441 mutex_lock(&bp->hwrm_cmd_lock);
5442 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5444 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5445 mutex_unlock(&bp->hwrm_cmd_lock);
5449 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5451 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5452 struct hwrm_vnic_qcaps_input req = {0};
5455 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5456 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5457 if (bp->hwrm_spec_code < 0x10600)
5460 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5461 mutex_lock(&bp->hwrm_cmd_lock);
5462 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5464 u32 flags = le32_to_cpu(resp->flags);
5466 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5467 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5468 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5470 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5471 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5473 /* Older P5 fw before EXT_HW_STATS support did not set
5474 * VLAN_STRIP_CAP properly.
5476 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5477 (BNXT_CHIP_P5_THOR(bp) &&
5478 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5479 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5480 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5481 if (bp->max_tpa_v2) {
5482 if (BNXT_CHIP_P5_THOR(bp))
5483 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5485 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5488 mutex_unlock(&bp->hwrm_cmd_lock);
5492 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5497 if (bp->flags & BNXT_FLAG_CHIP_P5)
5500 mutex_lock(&bp->hwrm_cmd_lock);
5501 for (i = 0; i < bp->rx_nr_rings; i++) {
5502 struct hwrm_ring_grp_alloc_input req = {0};
5503 struct hwrm_ring_grp_alloc_output *resp =
5504 bp->hwrm_cmd_resp_addr;
5505 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5507 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5509 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5510 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5511 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5512 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5514 rc = _hwrm_send_message(bp, &req, sizeof(req),
5519 bp->grp_info[grp_idx].fw_grp_id =
5520 le32_to_cpu(resp->ring_group_id);
5522 mutex_unlock(&bp->hwrm_cmd_lock);
5526 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5529 struct hwrm_ring_grp_free_input req = {0};
5531 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5534 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5536 mutex_lock(&bp->hwrm_cmd_lock);
5537 for (i = 0; i < bp->cp_nr_rings; i++) {
5538 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5541 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5543 _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5544 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5546 mutex_unlock(&bp->hwrm_cmd_lock);
5549 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5550 struct bnxt_ring_struct *ring,
5551 u32 ring_type, u32 map_index)
5553 int rc = 0, err = 0;
5554 struct hwrm_ring_alloc_input req = {0};
5555 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5556 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5557 struct bnxt_ring_grp_info *grp_info;
5560 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5563 if (rmem->nr_pages > 1) {
5564 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5565 /* Page size is in log2 units */
5566 req.page_size = BNXT_PAGE_SHIFT;
5567 req.page_tbl_depth = 1;
5569 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5572 /* Association of ring index with doorbell index and MSIX number */
5573 req.logical_id = cpu_to_le16(map_index);
5575 switch (ring_type) {
5576 case HWRM_RING_ALLOC_TX: {
5577 struct bnxt_tx_ring_info *txr;
5579 txr = container_of(ring, struct bnxt_tx_ring_info,
5581 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5582 /* Association of transmit ring with completion ring */
5583 grp_info = &bp->grp_info[ring->grp_idx];
5584 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5585 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5586 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5587 req.queue_id = cpu_to_le16(ring->queue_id);
5590 case HWRM_RING_ALLOC_RX:
5591 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5592 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5593 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5596 /* Association of rx ring with stats context */
5597 grp_info = &bp->grp_info[ring->grp_idx];
5598 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5599 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5600 req.enables |= cpu_to_le32(
5601 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5602 if (NET_IP_ALIGN == 2)
5603 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5604 req.flags = cpu_to_le16(flags);
5607 case HWRM_RING_ALLOC_AGG:
5608 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5609 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5610 /* Association of agg ring with rx ring */
5611 grp_info = &bp->grp_info[ring->grp_idx];
5612 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5613 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5614 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5615 req.enables |= cpu_to_le32(
5616 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5617 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5619 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5621 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5623 case HWRM_RING_ALLOC_CMPL:
5624 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5625 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5626 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5627 /* Association of cp ring with nq */
5628 grp_info = &bp->grp_info[map_index];
5629 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5630 req.cq_handle = cpu_to_le64(ring->handle);
5631 req.enables |= cpu_to_le32(
5632 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5633 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5634 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5637 case HWRM_RING_ALLOC_NQ:
5638 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5639 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5640 if (bp->flags & BNXT_FLAG_USING_MSIX)
5641 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5644 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5649 mutex_lock(&bp->hwrm_cmd_lock);
5650 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5651 err = le16_to_cpu(resp->error_code);
5652 ring_id = le16_to_cpu(resp->ring_id);
5653 mutex_unlock(&bp->hwrm_cmd_lock);
5656 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5657 ring_type, rc, err);
5660 ring->fw_ring_id = ring_id;
5664 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5669 struct hwrm_func_cfg_input req = {0};
5671 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5672 req.fid = cpu_to_le16(0xffff);
5673 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5674 req.async_event_cr = cpu_to_le16(idx);
5675 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5677 struct hwrm_func_vf_cfg_input req = {0};
5679 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5681 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5682 req.async_event_cr = cpu_to_le16(idx);
5683 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5688 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5689 u32 map_idx, u32 xid)
5691 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5693 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5695 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5696 switch (ring_type) {
5697 case HWRM_RING_ALLOC_TX:
5698 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5700 case HWRM_RING_ALLOC_RX:
5701 case HWRM_RING_ALLOC_AGG:
5702 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5704 case HWRM_RING_ALLOC_CMPL:
5705 db->db_key64 = DBR_PATH_L2;
5707 case HWRM_RING_ALLOC_NQ:
5708 db->db_key64 = DBR_PATH_L2;
5711 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5713 db->doorbell = bp->bar1 + map_idx * 0x80;
5714 switch (ring_type) {
5715 case HWRM_RING_ALLOC_TX:
5716 db->db_key32 = DB_KEY_TX;
5718 case HWRM_RING_ALLOC_RX:
5719 case HWRM_RING_ALLOC_AGG:
5720 db->db_key32 = DB_KEY_RX;
5722 case HWRM_RING_ALLOC_CMPL:
5723 db->db_key32 = DB_KEY_CP;
5729 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5731 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5735 if (bp->flags & BNXT_FLAG_CHIP_P5)
5736 type = HWRM_RING_ALLOC_NQ;
5738 type = HWRM_RING_ALLOC_CMPL;
5739 for (i = 0; i < bp->cp_nr_rings; i++) {
5740 struct bnxt_napi *bnapi = bp->bnapi[i];
5741 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5742 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5743 u32 map_idx = ring->map_idx;
5744 unsigned int vector;
5746 vector = bp->irq_tbl[map_idx].vector;
5747 disable_irq_nosync(vector);
5748 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5753 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5754 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5756 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5759 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5761 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5765 type = HWRM_RING_ALLOC_TX;
5766 for (i = 0; i < bp->tx_nr_rings; i++) {
5767 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5768 struct bnxt_ring_struct *ring;
5771 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5772 struct bnxt_napi *bnapi = txr->bnapi;
5773 struct bnxt_cp_ring_info *cpr, *cpr2;
5774 u32 type2 = HWRM_RING_ALLOC_CMPL;
5776 cpr = &bnapi->cp_ring;
5777 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5778 ring = &cpr2->cp_ring_struct;
5779 ring->handle = BNXT_TX_HDL;
5780 map_idx = bnapi->index;
5781 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5784 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5786 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5788 ring = &txr->tx_ring_struct;
5790 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5793 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5796 type = HWRM_RING_ALLOC_RX;
5797 for (i = 0; i < bp->rx_nr_rings; i++) {
5798 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5799 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5800 struct bnxt_napi *bnapi = rxr->bnapi;
5801 u32 map_idx = bnapi->index;
5803 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5806 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5807 /* If we have agg rings, post agg buffers first. */
5809 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5810 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5811 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5812 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5813 u32 type2 = HWRM_RING_ALLOC_CMPL;
5814 struct bnxt_cp_ring_info *cpr2;
5816 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5817 ring = &cpr2->cp_ring_struct;
5818 ring->handle = BNXT_RX_HDL;
5819 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5822 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5824 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5829 type = HWRM_RING_ALLOC_AGG;
5830 for (i = 0; i < bp->rx_nr_rings; i++) {
5831 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5832 struct bnxt_ring_struct *ring =
5833 &rxr->rx_agg_ring_struct;
5834 u32 grp_idx = ring->grp_idx;
5835 u32 map_idx = grp_idx + bp->rx_nr_rings;
5837 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5841 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5843 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5844 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5845 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5852 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5853 struct bnxt_ring_struct *ring,
5854 u32 ring_type, int cmpl_ring_id)
5857 struct hwrm_ring_free_input req = {0};
5858 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5861 if (BNXT_NO_FW_ACCESS(bp))
5864 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5865 req.ring_type = ring_type;
5866 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5868 mutex_lock(&bp->hwrm_cmd_lock);
5869 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5870 error_code = le16_to_cpu(resp->error_code);
5871 mutex_unlock(&bp->hwrm_cmd_lock);
5873 if (rc || error_code) {
5874 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5875 ring_type, rc, error_code);
5881 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5889 for (i = 0; i < bp->tx_nr_rings; i++) {
5890 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5891 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5893 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5894 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5896 hwrm_ring_free_send_msg(bp, ring,
5897 RING_FREE_REQ_RING_TYPE_TX,
5898 close_path ? cmpl_ring_id :
5899 INVALID_HW_RING_ID);
5900 ring->fw_ring_id = INVALID_HW_RING_ID;
5904 for (i = 0; i < bp->rx_nr_rings; i++) {
5905 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5906 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5907 u32 grp_idx = rxr->bnapi->index;
5909 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5910 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5912 hwrm_ring_free_send_msg(bp, ring,
5913 RING_FREE_REQ_RING_TYPE_RX,
5914 close_path ? cmpl_ring_id :
5915 INVALID_HW_RING_ID);
5916 ring->fw_ring_id = INVALID_HW_RING_ID;
5917 bp->grp_info[grp_idx].rx_fw_ring_id =
5922 if (bp->flags & BNXT_FLAG_CHIP_P5)
5923 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5925 type = RING_FREE_REQ_RING_TYPE_RX;
5926 for (i = 0; i < bp->rx_nr_rings; i++) {
5927 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5928 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5929 u32 grp_idx = rxr->bnapi->index;
5931 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5932 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5934 hwrm_ring_free_send_msg(bp, ring, type,
5935 close_path ? cmpl_ring_id :
5936 INVALID_HW_RING_ID);
5937 ring->fw_ring_id = INVALID_HW_RING_ID;
5938 bp->grp_info[grp_idx].agg_fw_ring_id =
5943 /* The completion rings are about to be freed. After that the
5944 * IRQ doorbell will not work anymore. So we need to disable
5947 bnxt_disable_int_sync(bp);
5949 if (bp->flags & BNXT_FLAG_CHIP_P5)
5950 type = RING_FREE_REQ_RING_TYPE_NQ;
5952 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5953 for (i = 0; i < bp->cp_nr_rings; i++) {
5954 struct bnxt_napi *bnapi = bp->bnapi[i];
5955 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5956 struct bnxt_ring_struct *ring;
5959 for (j = 0; j < 2; j++) {
5960 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5963 ring = &cpr2->cp_ring_struct;
5964 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5966 hwrm_ring_free_send_msg(bp, ring,
5967 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5968 INVALID_HW_RING_ID);
5969 ring->fw_ring_id = INVALID_HW_RING_ID;
5972 ring = &cpr->cp_ring_struct;
5973 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5974 hwrm_ring_free_send_msg(bp, ring, type,
5975 INVALID_HW_RING_ID);
5976 ring->fw_ring_id = INVALID_HW_RING_ID;
5977 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5982 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5985 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5987 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5988 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5989 struct hwrm_func_qcfg_input req = {0};
5992 if (bp->hwrm_spec_code < 0x10601)
5995 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5996 req.fid = cpu_to_le16(0xffff);
5997 mutex_lock(&bp->hwrm_cmd_lock);
5998 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6000 mutex_unlock(&bp->hwrm_cmd_lock);
6004 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6005 if (BNXT_NEW_RM(bp)) {
6008 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6009 hw_resc->resv_hw_ring_grps =
6010 le32_to_cpu(resp->alloc_hw_ring_grps);
6011 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6012 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6013 stats = le16_to_cpu(resp->alloc_stat_ctx);
6014 hw_resc->resv_irqs = cp;
6015 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6016 int rx = hw_resc->resv_rx_rings;
6017 int tx = hw_resc->resv_tx_rings;
6019 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6021 if (cp < (rx + tx)) {
6022 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6023 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6025 hw_resc->resv_rx_rings = rx;
6026 hw_resc->resv_tx_rings = tx;
6028 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6029 hw_resc->resv_hw_ring_grps = rx;
6031 hw_resc->resv_cp_rings = cp;
6032 hw_resc->resv_stat_ctxs = stats;
6034 mutex_unlock(&bp->hwrm_cmd_lock);
6038 /* Caller must hold bp->hwrm_cmd_lock */
6039 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6041 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6042 struct hwrm_func_qcfg_input req = {0};
6045 if (bp->hwrm_spec_code < 0x10601)
6048 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6049 req.fid = cpu_to_le16(fid);
6050 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6052 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6057 static bool bnxt_rfs_supported(struct bnxt *bp);
6060 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6061 int tx_rings, int rx_rings, int ring_grps,
6062 int cp_rings, int stats, int vnics)
6066 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6067 req->fid = cpu_to_le16(0xffff);
6068 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6069 req->num_tx_rings = cpu_to_le16(tx_rings);
6070 if (BNXT_NEW_RM(bp)) {
6071 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6072 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6073 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6074 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6075 enables |= tx_rings + ring_grps ?
6076 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6077 enables |= rx_rings ?
6078 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6080 enables |= cp_rings ?
6081 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6082 enables |= ring_grps ?
6083 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6084 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6086 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6088 req->num_rx_rings = cpu_to_le16(rx_rings);
6089 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6090 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6091 req->num_msix = cpu_to_le16(cp_rings);
6092 req->num_rsscos_ctxs =
6093 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6095 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6096 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6097 req->num_rsscos_ctxs = cpu_to_le16(1);
6098 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6099 bnxt_rfs_supported(bp))
6100 req->num_rsscos_ctxs =
6101 cpu_to_le16(ring_grps + 1);
6103 req->num_stat_ctxs = cpu_to_le16(stats);
6104 req->num_vnics = cpu_to_le16(vnics);
6106 req->enables = cpu_to_le32(enables);
6110 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6111 struct hwrm_func_vf_cfg_input *req, int tx_rings,
6112 int rx_rings, int ring_grps, int cp_rings,
6113 int stats, int vnics)
6117 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6118 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6119 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6120 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6121 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6122 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6123 enables |= tx_rings + ring_grps ?
6124 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6126 enables |= cp_rings ?
6127 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6128 enables |= ring_grps ?
6129 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6131 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6132 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6134 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6135 req->num_tx_rings = cpu_to_le16(tx_rings);
6136 req->num_rx_rings = cpu_to_le16(rx_rings);
6137 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6138 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6139 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6141 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6142 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6143 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6145 req->num_stat_ctxs = cpu_to_le16(stats);
6146 req->num_vnics = cpu_to_le16(vnics);
6148 req->enables = cpu_to_le32(enables);
6152 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6153 int ring_grps, int cp_rings, int stats, int vnics)
6155 struct hwrm_func_cfg_input req = {0};
6158 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6159 cp_rings, stats, vnics);
6163 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6167 if (bp->hwrm_spec_code < 0x10601)
6168 bp->hw_resc.resv_tx_rings = tx_rings;
6170 return bnxt_hwrm_get_rings(bp);
6174 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6175 int ring_grps, int cp_rings, int stats, int vnics)
6177 struct hwrm_func_vf_cfg_input req = {0};
6180 if (!BNXT_NEW_RM(bp)) {
6181 bp->hw_resc.resv_tx_rings = tx_rings;
6185 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6186 cp_rings, stats, vnics);
6187 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6191 return bnxt_hwrm_get_rings(bp);
6194 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6195 int cp, int stat, int vnic)
6198 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6201 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6205 int bnxt_nq_rings_in_use(struct bnxt *bp)
6207 int cp = bp->cp_nr_rings;
6208 int ulp_msix, ulp_base;
6210 ulp_msix = bnxt_get_ulp_msix_num(bp);
6212 ulp_base = bnxt_get_ulp_msix_base(bp);
6214 if ((ulp_base + ulp_msix) > cp)
6215 cp = ulp_base + ulp_msix;
6220 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6224 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6225 return bnxt_nq_rings_in_use(bp);
6227 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6231 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6233 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6234 int cp = bp->cp_nr_rings;
6239 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6240 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6242 return cp + ulp_stat;
6245 /* Check if a default RSS map needs to be setup. This function is only
6246 * used on older firmware that does not require reserving RX rings.
6248 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6250 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6252 /* The RSS map is valid for RX rings set to resv_rx_rings */
6253 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6254 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6255 if (!netif_is_rxfh_configured(bp->dev))
6256 bnxt_set_dflt_rss_indir_tbl(bp);
6260 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6262 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6263 int cp = bnxt_cp_rings_in_use(bp);
6264 int nq = bnxt_nq_rings_in_use(bp);
6265 int rx = bp->rx_nr_rings, stat;
6266 int vnic = 1, grp = rx;
6268 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6269 bp->hwrm_spec_code >= 0x10601)
6272 /* Old firmware does not need RX ring reservations but we still
6273 * need to setup a default RSS map when needed. With new firmware
6274 * we go through RX ring reservations first and then set up the
6275 * RSS map for the successfully reserved RX rings when needed.
6277 if (!BNXT_NEW_RM(bp)) {
6278 bnxt_check_rss_tbl_no_rmgr(bp);
6281 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6283 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6285 stat = bnxt_get_func_stat_ctxs(bp);
6286 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6287 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6288 (hw_resc->resv_hw_ring_grps != grp &&
6289 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6291 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6292 hw_resc->resv_irqs != nq)
6297 static int __bnxt_reserve_rings(struct bnxt *bp)
6299 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6300 int cp = bnxt_nq_rings_in_use(bp);
6301 int tx = bp->tx_nr_rings;
6302 int rx = bp->rx_nr_rings;
6303 int grp, rx_rings, rc;
6307 if (!bnxt_need_reserve_rings(bp))
6310 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6312 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6314 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6316 grp = bp->rx_nr_rings;
6317 stat = bnxt_get_func_stat_ctxs(bp);
6319 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6323 tx = hw_resc->resv_tx_rings;
6324 if (BNXT_NEW_RM(bp)) {
6325 rx = hw_resc->resv_rx_rings;
6326 cp = hw_resc->resv_irqs;
6327 grp = hw_resc->resv_hw_ring_grps;
6328 vnic = hw_resc->resv_vnics;
6329 stat = hw_resc->resv_stat_ctxs;
6333 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6337 if (netif_running(bp->dev))
6340 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6341 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6342 bp->dev->hw_features &= ~NETIF_F_LRO;
6343 bp->dev->features &= ~NETIF_F_LRO;
6344 bnxt_set_ring_params(bp);
6347 rx_rings = min_t(int, rx_rings, grp);
6348 cp = min_t(int, cp, bp->cp_nr_rings);
6349 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6350 stat -= bnxt_get_ulp_stat_ctxs(bp);
6351 cp = min_t(int, cp, stat);
6352 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6353 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6355 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6356 bp->tx_nr_rings = tx;
6358 /* If we cannot reserve all the RX rings, reset the RSS map only
6359 * if absolutely necessary
6361 if (rx_rings != bp->rx_nr_rings) {
6362 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6363 rx_rings, bp->rx_nr_rings);
6364 if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6365 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6366 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6367 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6368 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6369 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6372 bp->rx_nr_rings = rx_rings;
6373 bp->cp_nr_rings = cp;
6375 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6378 if (!netif_is_rxfh_configured(bp->dev))
6379 bnxt_set_dflt_rss_indir_tbl(bp);
6384 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6385 int ring_grps, int cp_rings, int stats,
6388 struct hwrm_func_vf_cfg_input req = {0};
6391 if (!BNXT_NEW_RM(bp))
6394 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6395 cp_rings, stats, vnics);
6396 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6397 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6398 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6399 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6400 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6401 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6402 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6403 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6405 req.flags = cpu_to_le32(flags);
6406 return hwrm_send_message_silent(bp, &req, sizeof(req),
6410 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6411 int ring_grps, int cp_rings, int stats,
6414 struct hwrm_func_cfg_input req = {0};
6417 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6418 cp_rings, stats, vnics);
6419 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6420 if (BNXT_NEW_RM(bp)) {
6421 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6422 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6423 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6424 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6425 if (bp->flags & BNXT_FLAG_CHIP_P5)
6426 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6427 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6429 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6432 req.flags = cpu_to_le32(flags);
6433 return hwrm_send_message_silent(bp, &req, sizeof(req),
6437 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6438 int ring_grps, int cp_rings, int stats,
6441 if (bp->hwrm_spec_code < 0x10801)
6445 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6446 ring_grps, cp_rings, stats,
6449 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6450 cp_rings, stats, vnics);
6453 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6455 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6456 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6457 struct hwrm_ring_aggint_qcaps_input req = {0};
6460 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6461 coal_cap->num_cmpl_dma_aggr_max = 63;
6462 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6463 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6464 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6465 coal_cap->int_lat_tmr_min_max = 65535;
6466 coal_cap->int_lat_tmr_max_max = 65535;
6467 coal_cap->num_cmpl_aggr_int_max = 65535;
6468 coal_cap->timer_units = 80;
6470 if (bp->hwrm_spec_code < 0x10902)
6473 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6474 mutex_lock(&bp->hwrm_cmd_lock);
6475 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6477 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6478 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6479 coal_cap->num_cmpl_dma_aggr_max =
6480 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6481 coal_cap->num_cmpl_dma_aggr_during_int_max =
6482 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6483 coal_cap->cmpl_aggr_dma_tmr_max =
6484 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6485 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6486 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6487 coal_cap->int_lat_tmr_min_max =
6488 le16_to_cpu(resp->int_lat_tmr_min_max);
6489 coal_cap->int_lat_tmr_max_max =
6490 le16_to_cpu(resp->int_lat_tmr_max_max);
6491 coal_cap->num_cmpl_aggr_int_max =
6492 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6493 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6495 mutex_unlock(&bp->hwrm_cmd_lock);
6498 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6500 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6502 return usec * 1000 / coal_cap->timer_units;
6505 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6506 struct bnxt_coal *hw_coal,
6507 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6509 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6510 u32 cmpl_params = coal_cap->cmpl_params;
6511 u16 val, tmr, max, flags = 0;
6513 max = hw_coal->bufs_per_record * 128;
6514 if (hw_coal->budget)
6515 max = hw_coal->bufs_per_record * hw_coal->budget;
6516 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6518 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6519 req->num_cmpl_aggr_int = cpu_to_le16(val);
6521 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6522 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6524 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6525 coal_cap->num_cmpl_dma_aggr_during_int_max);
6526 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6528 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6529 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6530 req->int_lat_tmr_max = cpu_to_le16(tmr);
6532 /* min timer set to 1/2 of interrupt timer */
6533 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6535 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6536 req->int_lat_tmr_min = cpu_to_le16(val);
6537 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6540 /* buf timer set to 1/4 of interrupt timer */
6541 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6542 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6545 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6546 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6547 val = clamp_t(u16, tmr, 1,
6548 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6549 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6551 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6554 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6555 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6556 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6557 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6558 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6559 req->flags = cpu_to_le16(flags);
6560 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6563 /* Caller holds bp->hwrm_cmd_lock */
6564 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6565 struct bnxt_coal *hw_coal)
6567 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6568 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6569 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6570 u32 nq_params = coal_cap->nq_params;
6573 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6576 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6578 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6580 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6582 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6583 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6584 req.int_lat_tmr_min = cpu_to_le16(tmr);
6585 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6586 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6589 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6591 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6592 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6593 struct bnxt_coal coal;
6595 /* Tick values in micro seconds.
6596 * 1 coal_buf x bufs_per_record = 1 completion record.
6598 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6600 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6601 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6603 if (!bnapi->rx_ring)
6606 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6607 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6609 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6611 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6613 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6617 int bnxt_hwrm_set_coal(struct bnxt *bp)
6620 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6623 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6624 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6625 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6626 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6628 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6629 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6631 mutex_lock(&bp->hwrm_cmd_lock);
6632 for (i = 0; i < bp->cp_nr_rings; i++) {
6633 struct bnxt_napi *bnapi = bp->bnapi[i];
6634 struct bnxt_coal *hw_coal;
6638 if (!bnapi->rx_ring) {
6639 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6642 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6644 req->ring_id = cpu_to_le16(ring_id);
6646 rc = _hwrm_send_message(bp, req, sizeof(*req),
6651 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6654 if (bnapi->rx_ring && bnapi->tx_ring) {
6656 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6657 req->ring_id = cpu_to_le16(ring_id);
6658 rc = _hwrm_send_message(bp, req, sizeof(*req),
6664 hw_coal = &bp->rx_coal;
6666 hw_coal = &bp->tx_coal;
6667 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6669 mutex_unlock(&bp->hwrm_cmd_lock);
6673 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6675 struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6676 struct hwrm_stat_ctx_free_input req = {0};
6682 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6685 bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6686 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6688 mutex_lock(&bp->hwrm_cmd_lock);
6689 for (i = 0; i < bp->cp_nr_rings; i++) {
6690 struct bnxt_napi *bnapi = bp->bnapi[i];
6691 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6693 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6694 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6695 if (BNXT_FW_MAJ(bp) <= 20) {
6696 req0.stat_ctx_id = req.stat_ctx_id;
6697 _hwrm_send_message(bp, &req0, sizeof(req0),
6700 _hwrm_send_message(bp, &req, sizeof(req),
6703 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6706 mutex_unlock(&bp->hwrm_cmd_lock);
6709 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6712 struct hwrm_stat_ctx_alloc_input req = {0};
6713 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6715 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6718 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6720 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6721 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6723 mutex_lock(&bp->hwrm_cmd_lock);
6724 for (i = 0; i < bp->cp_nr_rings; i++) {
6725 struct bnxt_napi *bnapi = bp->bnapi[i];
6726 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6728 req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6730 rc = _hwrm_send_message(bp, &req, sizeof(req),
6735 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6737 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6739 mutex_unlock(&bp->hwrm_cmd_lock);
6743 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6745 struct hwrm_func_qcfg_input req = {0};
6746 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6747 u32 min_db_offset = 0;
6751 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6752 req.fid = cpu_to_le16(0xffff);
6753 mutex_lock(&bp->hwrm_cmd_lock);
6754 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6756 goto func_qcfg_exit;
6758 #ifdef CONFIG_BNXT_SRIOV
6760 struct bnxt_vf_info *vf = &bp->vf;
6762 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6764 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6767 flags = le16_to_cpu(resp->flags);
6768 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6769 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6770 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6771 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6772 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6774 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6775 bp->flags |= BNXT_FLAG_MULTI_HOST;
6776 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6777 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6779 switch (resp->port_partition_type) {
6780 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6781 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6782 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6783 bp->port_partition_type = resp->port_partition_type;
6786 if (bp->hwrm_spec_code < 0x10707 ||
6787 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6788 bp->br_mode = BRIDGE_MODE_VEB;
6789 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6790 bp->br_mode = BRIDGE_MODE_VEPA;
6792 bp->br_mode = BRIDGE_MODE_UNDEF;
6794 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6796 bp->max_mtu = BNXT_MAX_MTU;
6799 goto func_qcfg_exit;
6801 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6803 min_db_offset = DB_PF_OFFSET_P5;
6805 min_db_offset = DB_VF_OFFSET_P5;
6807 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6809 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6810 bp->db_size <= min_db_offset)
6811 bp->db_size = pci_resource_len(bp->pdev, 2);
6814 mutex_unlock(&bp->hwrm_cmd_lock);
6818 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
6819 struct hwrm_func_backing_store_qcaps_output *resp)
6821 struct bnxt_mem_init *mem_init;
6827 init_val = resp->ctx_kind_initializer;
6828 init_mask = le16_to_cpu(resp->ctx_init_mask);
6829 offset = &resp->qp_init_offset;
6830 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
6831 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
6832 mem_init->init_val = init_val;
6833 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
6836 if (i == BNXT_CTX_MEM_INIT_STAT)
6837 offset = &resp->stat_init_offset;
6838 if (init_mask & (1 << i))
6839 mem_init->offset = *offset * 4;
6841 mem_init->init_val = 0;
6843 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
6844 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
6845 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
6846 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
6847 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
6848 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
6851 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6853 struct hwrm_func_backing_store_qcaps_input req = {0};
6854 struct hwrm_func_backing_store_qcaps_output *resp =
6855 bp->hwrm_cmd_resp_addr;
6858 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6861 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6862 mutex_lock(&bp->hwrm_cmd_lock);
6863 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6865 struct bnxt_ctx_pg_info *ctx_pg;
6866 struct bnxt_ctx_mem_info *ctx;
6869 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6874 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6875 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6876 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6877 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6878 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6879 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6880 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6881 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6882 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6883 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6884 ctx->vnic_max_vnic_entries =
6885 le16_to_cpu(resp->vnic_max_vnic_entries);
6886 ctx->vnic_max_ring_table_entries =
6887 le16_to_cpu(resp->vnic_max_ring_table_entries);
6888 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6889 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6890 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6891 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6892 ctx->tqm_min_entries_per_ring =
6893 le32_to_cpu(resp->tqm_min_entries_per_ring);
6894 ctx->tqm_max_entries_per_ring =
6895 le32_to_cpu(resp->tqm_max_entries_per_ring);
6896 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6897 if (!ctx->tqm_entries_multiple)
6898 ctx->tqm_entries_multiple = 1;
6899 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6900 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6901 ctx->mrav_num_entries_units =
6902 le16_to_cpu(resp->mrav_num_entries_units);
6903 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6904 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6906 bnxt_init_ctx_initializer(ctx, resp);
6908 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6909 if (!ctx->tqm_fp_rings_count)
6910 ctx->tqm_fp_rings_count = bp->max_q;
6911 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6912 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6914 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6915 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6921 for (i = 0; i < tqm_rings; i++, ctx_pg++)
6922 ctx->tqm_mem[i] = ctx_pg;
6928 mutex_unlock(&bp->hwrm_cmd_lock);
6932 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6937 if (!rmem->nr_pages)
6940 if (BNXT_PAGE_SHIFT == 13)
6942 else if (BNXT_PAGE_SIZE == 16)
6946 if (rmem->depth >= 1) {
6947 if (rmem->depth == 2)
6951 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6953 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6957 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6958 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6959 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6960 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6961 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6962 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6964 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6966 struct hwrm_func_backing_store_cfg_input req = {0};
6967 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6968 struct bnxt_ctx_pg_info *ctx_pg;
6969 u32 req_len = sizeof(req);
6970 __le32 *num_entries;
6980 if (req_len > bp->hwrm_max_ext_req_len)
6981 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
6982 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6983 req.enables = cpu_to_le32(enables);
6985 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6986 ctx_pg = &ctx->qp_mem;
6987 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6988 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6989 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6990 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6991 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6992 &req.qpc_pg_size_qpc_lvl,
6995 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6996 ctx_pg = &ctx->srq_mem;
6997 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6998 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6999 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7000 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7001 &req.srq_pg_size_srq_lvl,
7004 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7005 ctx_pg = &ctx->cq_mem;
7006 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
7007 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7008 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7009 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
7012 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7013 ctx_pg = &ctx->vnic_mem;
7014 req.vnic_num_vnic_entries =
7015 cpu_to_le16(ctx->vnic_max_vnic_entries);
7016 req.vnic_num_ring_table_entries =
7017 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7018 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7019 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7020 &req.vnic_pg_size_vnic_lvl,
7021 &req.vnic_page_dir);
7023 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7024 ctx_pg = &ctx->stat_mem;
7025 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7026 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7027 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7028 &req.stat_pg_size_stat_lvl,
7029 &req.stat_page_dir);
7031 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7032 ctx_pg = &ctx->mrav_mem;
7033 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7034 if (ctx->mrav_num_entries_units)
7036 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7037 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7038 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7039 &req.mrav_pg_size_mrav_lvl,
7040 &req.mrav_page_dir);
7042 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7043 ctx_pg = &ctx->tim_mem;
7044 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7045 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7046 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7047 &req.tim_pg_size_tim_lvl,
7050 for (i = 0, num_entries = &req.tqm_sp_num_entries,
7051 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7052 pg_dir = &req.tqm_sp_page_dir,
7053 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7054 i < BNXT_MAX_TQM_RINGS;
7055 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7056 if (!(enables & ena))
7059 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7060 ctx_pg = ctx->tqm_mem[i];
7061 *num_entries = cpu_to_le32(ctx_pg->entries);
7062 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7064 req.flags = cpu_to_le32(flags);
7065 return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
7068 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7069 struct bnxt_ctx_pg_info *ctx_pg)
7071 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7073 rmem->page_size = BNXT_PAGE_SIZE;
7074 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7075 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7076 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7077 if (rmem->depth >= 1)
7078 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7079 return bnxt_alloc_ring(bp, rmem);
7082 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7083 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7084 u8 depth, struct bnxt_mem_init *mem_init)
7086 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7092 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7093 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7094 ctx_pg->nr_pages = 0;
7097 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7101 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7103 if (!ctx_pg->ctx_pg_tbl)
7105 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7106 rmem->nr_pages = nr_tbls;
7107 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7110 for (i = 0; i < nr_tbls; i++) {
7111 struct bnxt_ctx_pg_info *pg_tbl;
7113 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7116 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7117 rmem = &pg_tbl->ring_mem;
7118 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7119 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7121 rmem->nr_pages = MAX_CTX_PAGES;
7122 rmem->mem_init = mem_init;
7123 if (i == (nr_tbls - 1)) {
7124 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7127 rmem->nr_pages = rem;
7129 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7134 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7135 if (rmem->nr_pages > 1 || depth)
7137 rmem->mem_init = mem_init;
7138 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7143 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7144 struct bnxt_ctx_pg_info *ctx_pg)
7146 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7148 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7149 ctx_pg->ctx_pg_tbl) {
7150 int i, nr_tbls = rmem->nr_pages;
7152 for (i = 0; i < nr_tbls; i++) {
7153 struct bnxt_ctx_pg_info *pg_tbl;
7154 struct bnxt_ring_mem_info *rmem2;
7156 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7159 rmem2 = &pg_tbl->ring_mem;
7160 bnxt_free_ring(bp, rmem2);
7161 ctx_pg->ctx_pg_arr[i] = NULL;
7163 ctx_pg->ctx_pg_tbl[i] = NULL;
7165 kfree(ctx_pg->ctx_pg_tbl);
7166 ctx_pg->ctx_pg_tbl = NULL;
7168 bnxt_free_ring(bp, rmem);
7169 ctx_pg->nr_pages = 0;
7172 static void bnxt_free_ctx_mem(struct bnxt *bp)
7174 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7180 if (ctx->tqm_mem[0]) {
7181 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7182 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7183 kfree(ctx->tqm_mem[0]);
7184 ctx->tqm_mem[0] = NULL;
7187 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7188 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7189 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7190 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7191 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7192 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7193 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7194 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7197 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7199 struct bnxt_ctx_pg_info *ctx_pg;
7200 struct bnxt_ctx_mem_info *ctx;
7201 struct bnxt_mem_init *init;
7202 u32 mem_size, ena, entries;
7203 u32 entries_sp, min;
7210 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7212 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7217 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7220 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7226 ctx_pg = &ctx->qp_mem;
7227 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7229 if (ctx->qp_entry_size) {
7230 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7231 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7232 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7237 ctx_pg = &ctx->srq_mem;
7238 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7239 if (ctx->srq_entry_size) {
7240 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7241 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7242 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7247 ctx_pg = &ctx->cq_mem;
7248 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7249 if (ctx->cq_entry_size) {
7250 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7251 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7252 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7257 ctx_pg = &ctx->vnic_mem;
7258 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7259 ctx->vnic_max_ring_table_entries;
7260 if (ctx->vnic_entry_size) {
7261 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7262 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7263 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7268 ctx_pg = &ctx->stat_mem;
7269 ctx_pg->entries = ctx->stat_max_entries;
7270 if (ctx->stat_entry_size) {
7271 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7272 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7273 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7279 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7282 ctx_pg = &ctx->mrav_mem;
7283 /* 128K extra is needed to accommodate static AH context
7284 * allocation by f/w.
7286 num_mr = 1024 * 256;
7287 num_ah = 1024 * 128;
7288 ctx_pg->entries = num_mr + num_ah;
7289 if (ctx->mrav_entry_size) {
7290 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7291 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7292 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7296 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7297 if (ctx->mrav_num_entries_units)
7299 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7300 (num_ah / ctx->mrav_num_entries_units);
7302 ctx_pg = &ctx->tim_mem;
7303 ctx_pg->entries = ctx->qp_mem.entries;
7304 if (ctx->tim_entry_size) {
7305 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7306 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7310 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7313 min = ctx->tqm_min_entries_per_ring;
7314 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7315 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7316 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7317 entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
7318 entries = roundup(entries, ctx->tqm_entries_multiple);
7319 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7320 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7321 ctx_pg = ctx->tqm_mem[i];
7322 ctx_pg->entries = i ? entries : entries_sp;
7323 if (ctx->tqm_entry_size) {
7324 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7325 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7330 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7332 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7333 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7335 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7339 ctx->flags |= BNXT_CTX_FLAG_INITED;
7343 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7345 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7346 struct hwrm_func_resource_qcaps_input req = {0};
7347 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7350 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7351 req.fid = cpu_to_le16(0xffff);
7353 mutex_lock(&bp->hwrm_cmd_lock);
7354 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7357 goto hwrm_func_resc_qcaps_exit;
7359 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7361 goto hwrm_func_resc_qcaps_exit;
7363 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7364 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7365 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7366 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7367 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7368 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7369 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7370 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7371 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7372 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7373 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7374 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7375 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7376 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7377 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7378 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7380 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7381 u16 max_msix = le16_to_cpu(resp->max_msix);
7383 hw_resc->max_nqs = max_msix;
7384 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7388 struct bnxt_pf_info *pf = &bp->pf;
7390 pf->vf_resv_strategy =
7391 le16_to_cpu(resp->vf_reservation_strategy);
7392 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7393 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7395 hwrm_func_resc_qcaps_exit:
7396 mutex_unlock(&bp->hwrm_cmd_lock);
7400 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7403 struct hwrm_func_qcaps_input req = {0};
7404 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7405 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7406 u32 flags, flags_ext;
7408 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7409 req.fid = cpu_to_le16(0xffff);
7411 mutex_lock(&bp->hwrm_cmd_lock);
7412 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7414 goto hwrm_func_qcaps_exit;
7416 flags = le32_to_cpu(resp->flags);
7417 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7418 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7419 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7420 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7421 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7422 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7423 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7424 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7425 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7426 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7427 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7428 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7429 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7430 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7431 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7432 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7434 flags_ext = le32_to_cpu(resp->flags_ext);
7435 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7436 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7438 bp->tx_push_thresh = 0;
7439 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7440 BNXT_FW_MAJ(bp) > 217)
7441 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7443 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7444 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7445 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7446 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7447 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7448 if (!hw_resc->max_hw_ring_grps)
7449 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7450 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7451 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7452 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7455 struct bnxt_pf_info *pf = &bp->pf;
7457 pf->fw_fid = le16_to_cpu(resp->fid);
7458 pf->port_id = le16_to_cpu(resp->port_id);
7459 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7460 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7461 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7462 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7463 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7464 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7465 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7466 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7467 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7468 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7469 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7470 bp->flags |= BNXT_FLAG_WOL_CAP;
7472 #ifdef CONFIG_BNXT_SRIOV
7473 struct bnxt_vf_info *vf = &bp->vf;
7475 vf->fw_fid = le16_to_cpu(resp->fid);
7476 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7480 hwrm_func_qcaps_exit:
7481 mutex_unlock(&bp->hwrm_cmd_lock);
7485 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7487 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7491 rc = __bnxt_hwrm_func_qcaps(bp);
7494 rc = bnxt_hwrm_queue_qportcfg(bp);
7496 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7499 if (bp->hwrm_spec_code >= 0x10803) {
7500 rc = bnxt_alloc_ctx_mem(bp);
7503 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7505 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7510 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7512 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7513 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7517 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7520 resp = bp->hwrm_cmd_resp_addr;
7521 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7523 mutex_lock(&bp->hwrm_cmd_lock);
7524 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7526 goto hwrm_cfa_adv_qcaps_exit;
7528 flags = le32_to_cpu(resp->flags);
7530 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7531 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7533 hwrm_cfa_adv_qcaps_exit:
7534 mutex_unlock(&bp->hwrm_cmd_lock);
7538 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7543 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7550 static int bnxt_alloc_fw_health(struct bnxt *bp)
7554 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7555 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7558 rc = __bnxt_alloc_fw_health(bp);
7560 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7561 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7568 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7570 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7571 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7572 BNXT_FW_HEALTH_WIN_MAP_OFF);
7575 bool bnxt_is_fw_healthy(struct bnxt *bp)
7577 if (bp->fw_health && bp->fw_health->status_reliable) {
7580 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
7581 if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
7588 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7590 struct bnxt_fw_health *fw_health = bp->fw_health;
7593 if (!fw_health || !fw_health->status_reliable)
7596 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7597 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7598 fw_health->status_reliable = false;
7601 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7609 bp->fw_health->status_reliable = false;
7611 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7612 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7614 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7615 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7616 if (!bp->chip_num) {
7617 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7618 bp->chip_num = readl(bp->bar0 +
7619 BNXT_FW_HEALTH_WIN_BASE +
7620 BNXT_GRC_REG_CHIP_NUM);
7622 if (!BNXT_CHIP_P5(bp))
7625 status_loc = BNXT_GRC_REG_STATUS_P5 |
7626 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7628 status_loc = readl(hs + offsetof(struct hcomm_status,
7632 if (__bnxt_alloc_fw_health(bp)) {
7633 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7637 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7638 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7639 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7640 __bnxt_map_fw_health_reg(bp, status_loc);
7641 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7642 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7645 bp->fw_health->status_reliable = true;
7648 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7650 struct bnxt_fw_health *fw_health = bp->fw_health;
7651 u32 reg_base = 0xffffffff;
7654 bp->fw_health->status_reliable = false;
7655 /* Only pre-map the monitoring GRC registers using window 3 */
7656 for (i = 0; i < 4; i++) {
7657 u32 reg = fw_health->regs[i];
7659 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7661 if (reg_base == 0xffffffff)
7662 reg_base = reg & BNXT_GRC_BASE_MASK;
7663 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7665 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7667 bp->fw_health->status_reliable = true;
7668 if (reg_base == 0xffffffff)
7671 __bnxt_map_fw_health_reg(bp, reg_base);
7675 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7677 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7678 struct bnxt_fw_health *fw_health = bp->fw_health;
7679 struct hwrm_error_recovery_qcfg_input req = {0};
7682 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7685 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7686 mutex_lock(&bp->hwrm_cmd_lock);
7687 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7689 goto err_recovery_out;
7690 fw_health->flags = le32_to_cpu(resp->flags);
7691 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7692 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7694 goto err_recovery_out;
7696 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7697 fw_health->master_func_wait_dsecs =
7698 le32_to_cpu(resp->master_func_wait_period);
7699 fw_health->normal_func_wait_dsecs =
7700 le32_to_cpu(resp->normal_func_wait_period);
7701 fw_health->post_reset_wait_dsecs =
7702 le32_to_cpu(resp->master_func_wait_period_after_reset);
7703 fw_health->post_reset_max_wait_dsecs =
7704 le32_to_cpu(resp->max_bailout_time_after_reset);
7705 fw_health->regs[BNXT_FW_HEALTH_REG] =
7706 le32_to_cpu(resp->fw_health_status_reg);
7707 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7708 le32_to_cpu(resp->fw_heartbeat_reg);
7709 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7710 le32_to_cpu(resp->fw_reset_cnt_reg);
7711 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7712 le32_to_cpu(resp->reset_inprogress_reg);
7713 fw_health->fw_reset_inprog_reg_mask =
7714 le32_to_cpu(resp->reset_inprogress_reg_mask);
7715 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7716 if (fw_health->fw_reset_seq_cnt >= 16) {
7718 goto err_recovery_out;
7720 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7721 fw_health->fw_reset_seq_regs[i] =
7722 le32_to_cpu(resp->reset_reg[i]);
7723 fw_health->fw_reset_seq_vals[i] =
7724 le32_to_cpu(resp->reset_reg_val[i]);
7725 fw_health->fw_reset_seq_delay_msec[i] =
7726 resp->delay_after_reset[i];
7729 mutex_unlock(&bp->hwrm_cmd_lock);
7731 rc = bnxt_map_fw_health_regs(bp);
7733 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7737 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7739 struct hwrm_func_reset_input req = {0};
7741 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7744 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7747 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7749 struct hwrm_nvm_get_dev_info_output nvm_info;
7751 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7752 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7753 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7754 nvm_info.nvm_cfg_ver_upd);
7757 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7760 struct hwrm_queue_qportcfg_input req = {0};
7761 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7765 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7767 mutex_lock(&bp->hwrm_cmd_lock);
7768 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7772 if (!resp->max_configurable_queues) {
7776 bp->max_tc = resp->max_configurable_queues;
7777 bp->max_lltc = resp->max_configurable_lossless_queues;
7778 if (bp->max_tc > BNXT_MAX_QUEUE)
7779 bp->max_tc = BNXT_MAX_QUEUE;
7781 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7782 qptr = &resp->queue_id0;
7783 for (i = 0, j = 0; i < bp->max_tc; i++) {
7784 bp->q_info[j].queue_id = *qptr;
7785 bp->q_ids[i] = *qptr++;
7786 bp->q_info[j].queue_profile = *qptr++;
7787 bp->tc_to_qidx[j] = j;
7788 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7789 (no_rdma && BNXT_PF(bp)))
7792 bp->max_q = bp->max_tc;
7793 bp->max_tc = max_t(u8, j, 1);
7795 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7798 if (bp->max_lltc > bp->max_tc)
7799 bp->max_lltc = bp->max_tc;
7802 mutex_unlock(&bp->hwrm_cmd_lock);
7806 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7808 struct hwrm_ver_get_input req = {0};
7811 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7812 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7813 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7814 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7816 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7821 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7823 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7824 u16 fw_maj, fw_min, fw_bld, fw_rsv;
7825 u32 dev_caps_cfg, hwrm_ver;
7828 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7829 mutex_lock(&bp->hwrm_cmd_lock);
7830 rc = __bnxt_hwrm_ver_get(bp, false);
7832 goto hwrm_ver_get_exit;
7834 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7836 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7837 resp->hwrm_intf_min_8b << 8 |
7838 resp->hwrm_intf_upd_8b;
7839 if (resp->hwrm_intf_maj_8b < 1) {
7840 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7841 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7842 resp->hwrm_intf_upd_8b);
7843 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7846 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7847 HWRM_VERSION_UPDATE;
7849 if (bp->hwrm_spec_code > hwrm_ver)
7850 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7851 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7852 HWRM_VERSION_UPDATE);
7854 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7855 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7856 resp->hwrm_intf_upd_8b);
7858 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7859 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7860 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7861 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7862 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7863 len = FW_VER_STR_LEN;
7865 fw_maj = resp->hwrm_fw_maj_8b;
7866 fw_min = resp->hwrm_fw_min_8b;
7867 fw_bld = resp->hwrm_fw_bld_8b;
7868 fw_rsv = resp->hwrm_fw_rsvd_8b;
7869 len = BC_HWRM_STR_LEN;
7871 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7872 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7875 if (strlen(resp->active_pkg_name)) {
7876 int fw_ver_len = strlen(bp->fw_ver_str);
7878 snprintf(bp->fw_ver_str + fw_ver_len,
7879 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7880 resp->active_pkg_name);
7881 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7884 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7885 if (!bp->hwrm_cmd_timeout)
7886 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7888 if (resp->hwrm_intf_maj_8b >= 1) {
7889 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7890 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7892 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7893 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7895 bp->chip_num = le16_to_cpu(resp->chip_num);
7896 bp->chip_rev = resp->chip_rev;
7897 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7899 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7901 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7902 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7903 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7904 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7906 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7907 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7910 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7911 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7914 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7915 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7918 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7919 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7922 mutex_unlock(&bp->hwrm_cmd_lock);
7926 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7928 struct hwrm_fw_set_time_input req = {0};
7930 time64_t now = ktime_get_real_seconds();
7932 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7933 bp->hwrm_spec_code < 0x10400)
7936 time64_to_tm(now, 0, &tm);
7937 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7938 req.year = cpu_to_le16(1900 + tm.tm_year);
7939 req.month = 1 + tm.tm_mon;
7940 req.day = tm.tm_mday;
7941 req.hour = tm.tm_hour;
7942 req.minute = tm.tm_min;
7943 req.second = tm.tm_sec;
7944 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7947 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7952 sw_tmp = (*sw & ~mask) | hw;
7953 if (hw < (*sw & mask))
7955 WRITE_ONCE(*sw, sw_tmp);
7958 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7959 int count, bool ignore_zero)
7963 for (i = 0; i < count; i++) {
7964 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7966 if (ignore_zero && !hw)
7969 if (masks[i] == -1ULL)
7972 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7976 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7978 if (!stats->hw_stats)
7981 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7982 stats->hw_masks, stats->len / 8, false);
7985 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7987 struct bnxt_stats_mem *ring0_stats;
7988 bool ignore_zero = false;
7991 /* Chip bug. Counter intermittently becomes 0. */
7992 if (bp->flags & BNXT_FLAG_CHIP_P5)
7995 for (i = 0; i < bp->cp_nr_rings; i++) {
7996 struct bnxt_napi *bnapi = bp->bnapi[i];
7997 struct bnxt_cp_ring_info *cpr;
7998 struct bnxt_stats_mem *stats;
8000 cpr = &bnapi->cp_ring;
8001 stats = &cpr->stats;
8003 ring0_stats = stats;
8004 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8005 ring0_stats->hw_masks,
8006 ring0_stats->len / 8, ignore_zero);
8008 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8009 struct bnxt_stats_mem *stats = &bp->port_stats;
8010 __le64 *hw_stats = stats->hw_stats;
8011 u64 *sw_stats = stats->sw_stats;
8012 u64 *masks = stats->hw_masks;
8015 cnt = sizeof(struct rx_port_stats) / 8;
8016 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8018 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8019 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8020 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8021 cnt = sizeof(struct tx_port_stats) / 8;
8022 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8024 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8025 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8026 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8030 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8032 struct bnxt_pf_info *pf = &bp->pf;
8033 struct hwrm_port_qstats_input req = {0};
8035 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8038 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8042 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
8043 req.port_id = cpu_to_le16(pf->port_id);
8044 req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8045 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8046 req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8047 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8050 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8052 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
8053 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
8054 struct hwrm_port_qstats_ext_input req = {0};
8055 struct bnxt_pf_info *pf = &bp->pf;
8059 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8062 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8065 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
8067 req.port_id = cpu_to_le16(pf->port_id);
8068 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8069 req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8070 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8071 sizeof(struct tx_port_stats_ext) : 0;
8072 req.tx_stat_size = cpu_to_le16(tx_stat_size);
8073 req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8074 mutex_lock(&bp->hwrm_cmd_lock);
8075 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8077 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
8078 bp->fw_tx_stats_ext_size = tx_stat_size ?
8079 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
8081 bp->fw_rx_stats_ext_size = 0;
8082 bp->fw_tx_stats_ext_size = 0;
8087 if (bp->fw_tx_stats_ext_size <=
8088 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8089 mutex_unlock(&bp->hwrm_cmd_lock);
8090 bp->pri2cos_valid = 0;
8094 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
8095 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8097 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
8099 struct hwrm_queue_pri2cos_qcfg_output *resp2;
8103 resp2 = bp->hwrm_cmd_resp_addr;
8104 pri2cos = &resp2->pri0_cos_queue_id;
8105 for (i = 0; i < 8; i++) {
8106 u8 queue_id = pri2cos[i];
8109 /* Per port queue IDs start from 0, 10, 20, etc */
8110 queue_idx = queue_id % 10;
8111 if (queue_idx > BNXT_MAX_QUEUE) {
8112 bp->pri2cos_valid = false;
8115 for (j = 0; j < bp->max_q; j++) {
8116 if (bp->q_ids[j] == queue_id)
8117 bp->pri2cos_idx[i] = queue_idx;
8120 bp->pri2cos_valid = 1;
8123 mutex_unlock(&bp->hwrm_cmd_lock);
8127 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8129 if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8130 bnxt_hwrm_tunnel_dst_port_free(
8131 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8132 if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8133 bnxt_hwrm_tunnel_dst_port_free(
8134 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8137 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8143 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8144 else if (BNXT_NO_FW_ACCESS(bp))
8146 for (i = 0; i < bp->nr_vnics; i++) {
8147 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8149 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8157 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8161 for (i = 0; i < bp->nr_vnics; i++)
8162 bnxt_hwrm_vnic_set_rss(bp, i, false);
8165 static void bnxt_clear_vnic(struct bnxt *bp)
8170 bnxt_hwrm_clear_vnic_filter(bp);
8171 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8172 /* clear all RSS setting before free vnic ctx */
8173 bnxt_hwrm_clear_vnic_rss(bp);
8174 bnxt_hwrm_vnic_ctx_free(bp);
8176 /* before free the vnic, undo the vnic tpa settings */
8177 if (bp->flags & BNXT_FLAG_TPA)
8178 bnxt_set_tpa(bp, false);
8179 bnxt_hwrm_vnic_free(bp);
8180 if (bp->flags & BNXT_FLAG_CHIP_P5)
8181 bnxt_hwrm_vnic_ctx_free(bp);
8184 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8187 bnxt_clear_vnic(bp);
8188 bnxt_hwrm_ring_free(bp, close_path);
8189 bnxt_hwrm_ring_grp_free(bp);
8191 bnxt_hwrm_stat_ctx_free(bp);
8192 bnxt_hwrm_free_tunnel_ports(bp);
8196 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8198 struct hwrm_func_cfg_input req = {0};
8200 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8201 req.fid = cpu_to_le16(0xffff);
8202 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8203 if (br_mode == BRIDGE_MODE_VEB)
8204 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8205 else if (br_mode == BRIDGE_MODE_VEPA)
8206 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8209 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8212 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8214 struct hwrm_func_cfg_input req = {0};
8216 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8219 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8220 req.fid = cpu_to_le16(0xffff);
8221 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8222 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8224 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8226 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8229 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8231 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8234 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8237 /* allocate context for vnic */
8238 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8240 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8242 goto vnic_setup_err;
8244 bp->rsscos_nr_ctxs++;
8246 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8247 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8249 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8251 goto vnic_setup_err;
8253 bp->rsscos_nr_ctxs++;
8257 /* configure default vnic, ring grp */
8258 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8260 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8262 goto vnic_setup_err;
8265 /* Enable RSS hashing on vnic */
8266 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8268 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8270 goto vnic_setup_err;
8273 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8274 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8276 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8285 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8289 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8290 for (i = 0; i < nr_ctxs; i++) {
8291 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8293 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8297 bp->rsscos_nr_ctxs++;
8302 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8304 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8308 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8310 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8314 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8315 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8317 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8324 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8326 if (bp->flags & BNXT_FLAG_CHIP_P5)
8327 return __bnxt_setup_vnic_p5(bp, vnic_id);
8329 return __bnxt_setup_vnic(bp, vnic_id);
8332 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8334 #ifdef CONFIG_RFS_ACCEL
8337 if (bp->flags & BNXT_FLAG_CHIP_P5)
8340 for (i = 0; i < bp->rx_nr_rings; i++) {
8341 struct bnxt_vnic_info *vnic;
8342 u16 vnic_id = i + 1;
8345 if (vnic_id >= bp->nr_vnics)
8348 vnic = &bp->vnic_info[vnic_id];
8349 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8350 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8351 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8352 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8354 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8358 rc = bnxt_setup_vnic(bp, vnic_id);
8368 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8369 static bool bnxt_promisc_ok(struct bnxt *bp)
8371 #ifdef CONFIG_BNXT_SRIOV
8372 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8378 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8380 unsigned int rc = 0;
8382 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8384 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8389 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8391 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8398 static int bnxt_cfg_rx_mode(struct bnxt *);
8399 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8401 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8403 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8405 unsigned int rx_nr_rings = bp->rx_nr_rings;
8408 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8410 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8416 rc = bnxt_hwrm_ring_alloc(bp);
8418 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8422 rc = bnxt_hwrm_ring_grp_alloc(bp);
8424 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8428 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8431 /* default vnic 0 */
8432 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8434 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8438 rc = bnxt_setup_vnic(bp, 0);
8442 if (bp->flags & BNXT_FLAG_RFS) {
8443 rc = bnxt_alloc_rfs_vnics(bp);
8448 if (bp->flags & BNXT_FLAG_TPA) {
8449 rc = bnxt_set_tpa(bp, true);
8455 bnxt_update_vf_mac(bp);
8457 /* Filter for default vnic 0 */
8458 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8460 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8463 vnic->uc_filter_count = 1;
8466 if (bp->dev->flags & IFF_BROADCAST)
8467 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8469 if (bp->dev->flags & IFF_PROMISC)
8470 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8472 if (bp->dev->flags & IFF_ALLMULTI) {
8473 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8474 vnic->mc_list_count = 0;
8478 bnxt_mc_list_updated(bp, &mask);
8479 vnic->rx_mask |= mask;
8482 rc = bnxt_cfg_rx_mode(bp);
8486 rc = bnxt_hwrm_set_coal(bp);
8488 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8491 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8492 rc = bnxt_setup_nitroa0_vnic(bp);
8494 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8499 bnxt_hwrm_func_qcfg(bp);
8500 netdev_update_features(bp->dev);
8506 bnxt_hwrm_resource_free(bp, 0, true);
8511 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8513 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8517 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8519 bnxt_init_cp_rings(bp);
8520 bnxt_init_rx_rings(bp);
8521 bnxt_init_tx_rings(bp);
8522 bnxt_init_ring_grps(bp, irq_re_init);
8523 bnxt_init_vnics(bp);
8525 return bnxt_init_chip(bp, irq_re_init);
8528 static int bnxt_set_real_num_queues(struct bnxt *bp)
8531 struct net_device *dev = bp->dev;
8533 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8534 bp->tx_nr_rings_xdp);
8538 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8542 #ifdef CONFIG_RFS_ACCEL
8543 if (bp->flags & BNXT_FLAG_RFS)
8544 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8550 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8553 int _rx = *rx, _tx = *tx;
8556 *rx = min_t(int, _rx, max);
8557 *tx = min_t(int, _tx, max);
8562 while (_rx + _tx > max) {
8563 if (_rx > _tx && _rx > 1)
8574 static void bnxt_setup_msix(struct bnxt *bp)
8576 const int len = sizeof(bp->irq_tbl[0].name);
8577 struct net_device *dev = bp->dev;
8580 tcs = netdev_get_num_tc(dev);
8584 for (i = 0; i < tcs; i++) {
8585 count = bp->tx_nr_rings_per_tc;
8587 netdev_set_tc_queue(dev, i, count, off);
8591 for (i = 0; i < bp->cp_nr_rings; i++) {
8592 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8595 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8597 else if (i < bp->rx_nr_rings)
8602 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8604 bp->irq_tbl[map_idx].handler = bnxt_msix;
8608 static void bnxt_setup_inta(struct bnxt *bp)
8610 const int len = sizeof(bp->irq_tbl[0].name);
8612 if (netdev_get_num_tc(bp->dev))
8613 netdev_reset_tc(bp->dev);
8615 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8617 bp->irq_tbl[0].handler = bnxt_inta;
8620 static int bnxt_init_int_mode(struct bnxt *bp);
8622 static int bnxt_setup_int_mode(struct bnxt *bp)
8627 rc = bnxt_init_int_mode(bp);
8628 if (rc || !bp->irq_tbl)
8629 return rc ?: -ENODEV;
8632 if (bp->flags & BNXT_FLAG_USING_MSIX)
8633 bnxt_setup_msix(bp);
8635 bnxt_setup_inta(bp);
8637 rc = bnxt_set_real_num_queues(bp);
8641 #ifdef CONFIG_RFS_ACCEL
8642 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8644 return bp->hw_resc.max_rsscos_ctxs;
8647 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8649 return bp->hw_resc.max_vnics;
8653 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8655 return bp->hw_resc.max_stat_ctxs;
8658 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8660 return bp->hw_resc.max_cp_rings;
8663 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8665 unsigned int cp = bp->hw_resc.max_cp_rings;
8667 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8668 cp -= bnxt_get_ulp_msix_num(bp);
8673 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8675 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8677 if (bp->flags & BNXT_FLAG_CHIP_P5)
8678 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8680 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8683 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8685 bp->hw_resc.max_irqs = max_irqs;
8688 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8692 cp = bnxt_get_max_func_cp_rings_for_en(bp);
8693 if (bp->flags & BNXT_FLAG_CHIP_P5)
8694 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8696 return cp - bp->cp_nr_rings;
8699 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8701 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8704 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8706 int max_cp = bnxt_get_max_func_cp_rings(bp);
8707 int max_irq = bnxt_get_max_func_irqs(bp);
8708 int total_req = bp->cp_nr_rings + num;
8709 int max_idx, avail_msix;
8711 max_idx = bp->total_irqs;
8712 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8713 max_idx = min_t(int, bp->total_irqs, max_cp);
8714 avail_msix = max_idx - bp->cp_nr_rings;
8715 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8718 if (max_irq < total_req) {
8719 num = max_irq - bp->cp_nr_rings;
8726 static int bnxt_get_num_msix(struct bnxt *bp)
8728 if (!BNXT_NEW_RM(bp))
8729 return bnxt_get_max_func_irqs(bp);
8731 return bnxt_nq_rings_in_use(bp);
8734 static int bnxt_init_msix(struct bnxt *bp)
8736 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8737 struct msix_entry *msix_ent;
8739 total_vecs = bnxt_get_num_msix(bp);
8740 max = bnxt_get_max_func_irqs(bp);
8741 if (total_vecs > max)
8747 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8751 for (i = 0; i < total_vecs; i++) {
8752 msix_ent[i].entry = i;
8753 msix_ent[i].vector = 0;
8756 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8759 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8760 ulp_msix = bnxt_get_ulp_msix_num(bp);
8761 if (total_vecs < 0 || total_vecs < ulp_msix) {
8763 goto msix_setup_exit;
8766 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8768 for (i = 0; i < total_vecs; i++)
8769 bp->irq_tbl[i].vector = msix_ent[i].vector;
8771 bp->total_irqs = total_vecs;
8772 /* Trim rings based upon num of vectors allocated */
8773 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8774 total_vecs - ulp_msix, min == 1);
8776 goto msix_setup_exit;
8778 bp->cp_nr_rings = (min == 1) ?
8779 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8780 bp->tx_nr_rings + bp->rx_nr_rings;
8784 goto msix_setup_exit;
8786 bp->flags |= BNXT_FLAG_USING_MSIX;
8791 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8794 pci_disable_msix(bp->pdev);
8799 static int bnxt_init_inta(struct bnxt *bp)
8801 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
8806 bp->rx_nr_rings = 1;
8807 bp->tx_nr_rings = 1;
8808 bp->cp_nr_rings = 1;
8809 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8810 bp->irq_tbl[0].vector = bp->pdev->irq;
8814 static int bnxt_init_int_mode(struct bnxt *bp)
8818 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8819 rc = bnxt_init_msix(bp);
8821 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8822 /* fallback to INTA */
8823 rc = bnxt_init_inta(bp);
8828 static void bnxt_clear_int_mode(struct bnxt *bp)
8830 if (bp->flags & BNXT_FLAG_USING_MSIX)
8831 pci_disable_msix(bp->pdev);
8835 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8838 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8840 int tcs = netdev_get_num_tc(bp->dev);
8841 bool irq_cleared = false;
8844 if (!bnxt_need_reserve_rings(bp))
8847 if (irq_re_init && BNXT_NEW_RM(bp) &&
8848 bnxt_get_num_msix(bp) != bp->total_irqs) {
8849 bnxt_ulp_irq_stop(bp);
8850 bnxt_clear_int_mode(bp);
8853 rc = __bnxt_reserve_rings(bp);
8856 rc = bnxt_init_int_mode(bp);
8857 bnxt_ulp_irq_restart(bp, rc);
8860 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8863 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8864 netdev_err(bp->dev, "tx ring reservation failure\n");
8865 netdev_reset_tc(bp->dev);
8866 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8872 static void bnxt_free_irq(struct bnxt *bp)
8874 struct bnxt_irq *irq;
8877 #ifdef CONFIG_RFS_ACCEL
8878 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8879 bp->dev->rx_cpu_rmap = NULL;
8881 if (!bp->irq_tbl || !bp->bnapi)
8884 for (i = 0; i < bp->cp_nr_rings; i++) {
8885 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8887 irq = &bp->irq_tbl[map_idx];
8888 if (irq->requested) {
8889 if (irq->have_cpumask) {
8890 irq_set_affinity_hint(irq->vector, NULL);
8891 free_cpumask_var(irq->cpu_mask);
8892 irq->have_cpumask = 0;
8894 free_irq(irq->vector, bp->bnapi[i]);
8901 static int bnxt_request_irq(struct bnxt *bp)
8904 unsigned long flags = 0;
8905 #ifdef CONFIG_RFS_ACCEL
8906 struct cpu_rmap *rmap;
8909 rc = bnxt_setup_int_mode(bp);
8911 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8915 #ifdef CONFIG_RFS_ACCEL
8916 rmap = bp->dev->rx_cpu_rmap;
8918 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8919 flags = IRQF_SHARED;
8921 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8922 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8923 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8925 #ifdef CONFIG_RFS_ACCEL
8926 if (rmap && bp->bnapi[i]->rx_ring) {
8927 rc = irq_cpu_rmap_add(rmap, irq->vector);
8929 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8934 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8941 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8942 int numa_node = dev_to_node(&bp->pdev->dev);
8944 irq->have_cpumask = 1;
8945 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8947 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8949 netdev_warn(bp->dev,
8950 "Set affinity failed, IRQ = %d\n",
8959 static void bnxt_del_napi(struct bnxt *bp)
8966 for (i = 0; i < bp->cp_nr_rings; i++) {
8967 struct bnxt_napi *bnapi = bp->bnapi[i];
8969 __netif_napi_del(&bnapi->napi);
8971 /* We called __netif_napi_del(), we need
8972 * to respect an RCU grace period before freeing napi structures.
8977 static void bnxt_init_napi(struct bnxt *bp)
8980 unsigned int cp_nr_rings = bp->cp_nr_rings;
8981 struct bnxt_napi *bnapi;
8983 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8984 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8986 if (bp->flags & BNXT_FLAG_CHIP_P5)
8987 poll_fn = bnxt_poll_p5;
8988 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8990 for (i = 0; i < cp_nr_rings; i++) {
8991 bnapi = bp->bnapi[i];
8992 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8994 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8995 bnapi = bp->bnapi[cp_nr_rings];
8996 netif_napi_add(bp->dev, &bnapi->napi,
8997 bnxt_poll_nitroa0, 64);
9000 bnapi = bp->bnapi[0];
9001 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9005 static void bnxt_disable_napi(struct bnxt *bp)
9010 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9013 for (i = 0; i < bp->cp_nr_rings; i++) {
9014 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9016 if (bp->bnapi[i]->rx_ring)
9017 cancel_work_sync(&cpr->dim.work);
9019 napi_disable(&bp->bnapi[i]->napi);
9023 static void bnxt_enable_napi(struct bnxt *bp)
9027 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9028 for (i = 0; i < bp->cp_nr_rings; i++) {
9029 struct bnxt_napi *bnapi = bp->bnapi[i];
9030 struct bnxt_cp_ring_info *cpr;
9032 cpr = &bnapi->cp_ring;
9033 if (bnapi->in_reset)
9034 cpr->sw_stats.rx.rx_resets++;
9035 bnapi->in_reset = false;
9037 if (bnapi->rx_ring) {
9038 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9039 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9041 napi_enable(&bnapi->napi);
9045 void bnxt_tx_disable(struct bnxt *bp)
9048 struct bnxt_tx_ring_info *txr;
9051 for (i = 0; i < bp->tx_nr_rings; i++) {
9052 txr = &bp->tx_ring[i];
9053 txr->dev_state = BNXT_DEV_STATE_CLOSING;
9056 /* Drop carrier first to prevent TX timeout */
9057 netif_carrier_off(bp->dev);
9058 /* Stop all TX queues */
9059 netif_tx_disable(bp->dev);
9062 void bnxt_tx_enable(struct bnxt *bp)
9065 struct bnxt_tx_ring_info *txr;
9067 for (i = 0; i < bp->tx_nr_rings; i++) {
9068 txr = &bp->tx_ring[i];
9071 netif_tx_wake_all_queues(bp->dev);
9072 if (bp->link_info.link_up)
9073 netif_carrier_on(bp->dev);
9076 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9078 u8 active_fec = link_info->active_fec_sig_mode &
9079 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9081 switch (active_fec) {
9083 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9085 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9086 return "Clause 74 BaseR";
9087 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9088 return "Clause 91 RS(528,514)";
9089 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9090 return "Clause 91 RS544_1XN";
9091 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9092 return "Clause 91 RS(544,514)";
9093 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9094 return "Clause 91 RS272_1XN";
9095 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9096 return "Clause 91 RS(272,257)";
9100 static void bnxt_report_link(struct bnxt *bp)
9102 if (bp->link_info.link_up) {
9103 const char *signal = "";
9104 const char *flow_ctrl;
9109 netif_carrier_on(bp->dev);
9110 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9111 if (speed == SPEED_UNKNOWN) {
9112 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9115 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9119 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9120 flow_ctrl = "ON - receive & transmit";
9121 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9122 flow_ctrl = "ON - transmit";
9123 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9124 flow_ctrl = "ON - receive";
9127 if (bp->link_info.phy_qcfg_resp.option_flags &
9128 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9129 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9130 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9132 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9135 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9142 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9143 speed, signal, duplex, flow_ctrl);
9144 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9145 netdev_info(bp->dev, "EEE is %s\n",
9146 bp->eee.eee_active ? "active" :
9148 fec = bp->link_info.fec_cfg;
9149 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9150 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9151 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9152 bnxt_report_fec(&bp->link_info));
9154 netif_carrier_off(bp->dev);
9155 netdev_err(bp->dev, "NIC Link is Down\n");
9159 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9161 if (!resp->supported_speeds_auto_mode &&
9162 !resp->supported_speeds_force_mode &&
9163 !resp->supported_pam4_speeds_auto_mode &&
9164 !resp->supported_pam4_speeds_force_mode)
9169 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9172 struct hwrm_port_phy_qcaps_input req = {0};
9173 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9174 struct bnxt_link_info *link_info = &bp->link_info;
9176 if (bp->hwrm_spec_code < 0x10201)
9179 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9181 mutex_lock(&bp->hwrm_cmd_lock);
9182 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9184 goto hwrm_phy_qcaps_exit;
9186 bp->phy_flags = resp->flags;
9187 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9188 struct ethtool_eee *eee = &bp->eee;
9189 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9191 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9192 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9193 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9194 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9195 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9198 if (bp->hwrm_spec_code >= 0x10a01) {
9199 if (bnxt_phy_qcaps_no_speed(resp)) {
9200 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9201 netdev_warn(bp->dev, "Ethernet link disabled\n");
9202 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9203 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9204 netdev_info(bp->dev, "Ethernet link enabled\n");
9205 /* Phy re-enabled, reprobe the speeds */
9206 link_info->support_auto_speeds = 0;
9207 link_info->support_pam4_auto_speeds = 0;
9210 if (resp->supported_speeds_auto_mode)
9211 link_info->support_auto_speeds =
9212 le16_to_cpu(resp->supported_speeds_auto_mode);
9213 if (resp->supported_pam4_speeds_auto_mode)
9214 link_info->support_pam4_auto_speeds =
9215 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9217 bp->port_count = resp->port_cnt;
9219 hwrm_phy_qcaps_exit:
9220 mutex_unlock(&bp->hwrm_cmd_lock);
9224 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9226 u16 diff = advertising ^ supported;
9228 return ((supported | diff) != supported);
9231 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9234 struct bnxt_link_info *link_info = &bp->link_info;
9235 struct hwrm_port_phy_qcfg_input req = {0};
9236 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9237 u8 link_up = link_info->link_up;
9238 bool support_changed = false;
9240 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9242 mutex_lock(&bp->hwrm_cmd_lock);
9243 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9245 mutex_unlock(&bp->hwrm_cmd_lock);
9249 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9250 link_info->phy_link_status = resp->link;
9251 link_info->duplex = resp->duplex_cfg;
9252 if (bp->hwrm_spec_code >= 0x10800)
9253 link_info->duplex = resp->duplex_state;
9254 link_info->pause = resp->pause;
9255 link_info->auto_mode = resp->auto_mode;
9256 link_info->auto_pause_setting = resp->auto_pause;
9257 link_info->lp_pause = resp->link_partner_adv_pause;
9258 link_info->force_pause_setting = resp->force_pause;
9259 link_info->duplex_setting = resp->duplex_cfg;
9260 if (link_info->phy_link_status == BNXT_LINK_LINK)
9261 link_info->link_speed = le16_to_cpu(resp->link_speed);
9263 link_info->link_speed = 0;
9264 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9265 link_info->force_pam4_link_speed =
9266 le16_to_cpu(resp->force_pam4_link_speed);
9267 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9268 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9269 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9270 link_info->auto_pam4_link_speeds =
9271 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9272 link_info->lp_auto_link_speeds =
9273 le16_to_cpu(resp->link_partner_adv_speeds);
9274 link_info->lp_auto_pam4_link_speeds =
9275 resp->link_partner_pam4_adv_speeds;
9276 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9277 link_info->phy_ver[0] = resp->phy_maj;
9278 link_info->phy_ver[1] = resp->phy_min;
9279 link_info->phy_ver[2] = resp->phy_bld;
9280 link_info->media_type = resp->media_type;
9281 link_info->phy_type = resp->phy_type;
9282 link_info->transceiver = resp->xcvr_pkg_type;
9283 link_info->phy_addr = resp->eee_config_phy_addr &
9284 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9285 link_info->module_status = resp->module_status;
9287 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9288 struct ethtool_eee *eee = &bp->eee;
9291 eee->eee_active = 0;
9292 if (resp->eee_config_phy_addr &
9293 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9294 eee->eee_active = 1;
9295 fw_speeds = le16_to_cpu(
9296 resp->link_partner_adv_eee_link_speed_mask);
9297 eee->lp_advertised =
9298 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9301 /* Pull initial EEE config */
9302 if (!chng_link_state) {
9303 if (resp->eee_config_phy_addr &
9304 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9305 eee->eee_enabled = 1;
9307 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9309 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9311 if (resp->eee_config_phy_addr &
9312 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9315 eee->tx_lpi_enabled = 1;
9316 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9317 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9318 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9323 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9324 if (bp->hwrm_spec_code >= 0x10504) {
9325 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9326 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9328 /* TODO: need to add more logic to report VF link */
9329 if (chng_link_state) {
9330 if (link_info->phy_link_status == BNXT_LINK_LINK)
9331 link_info->link_up = 1;
9333 link_info->link_up = 0;
9334 if (link_up != link_info->link_up)
9335 bnxt_report_link(bp);
9337 /* alwasy link down if not require to update link state */
9338 link_info->link_up = 0;
9340 mutex_unlock(&bp->hwrm_cmd_lock);
9342 if (!BNXT_PHY_CFG_ABLE(bp))
9345 /* Check if any advertised speeds are no longer supported. The caller
9346 * holds the link_lock mutex, so we can modify link_info settings.
9348 if (bnxt_support_dropped(link_info->advertising,
9349 link_info->support_auto_speeds)) {
9350 link_info->advertising = link_info->support_auto_speeds;
9351 support_changed = true;
9353 if (bnxt_support_dropped(link_info->advertising_pam4,
9354 link_info->support_pam4_auto_speeds)) {
9355 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9356 support_changed = true;
9358 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9359 bnxt_hwrm_set_link_setting(bp, true, false);
9363 static void bnxt_get_port_module_status(struct bnxt *bp)
9365 struct bnxt_link_info *link_info = &bp->link_info;
9366 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9369 if (bnxt_update_link(bp, true))
9372 module_status = link_info->module_status;
9373 switch (module_status) {
9374 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9375 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9376 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9377 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9379 if (bp->hwrm_spec_code >= 0x10201) {
9380 netdev_warn(bp->dev, "Module part number %s\n",
9381 resp->phy_vendor_partnumber);
9383 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9384 netdev_warn(bp->dev, "TX is disabled\n");
9385 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9386 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9391 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9393 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9394 if (bp->hwrm_spec_code >= 0x10201)
9396 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9397 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9398 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9399 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9400 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9402 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9404 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9405 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9406 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9407 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9409 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9410 if (bp->hwrm_spec_code >= 0x10201) {
9411 req->auto_pause = req->force_pause;
9412 req->enables |= cpu_to_le32(
9413 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9418 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9420 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9421 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9422 if (bp->link_info.advertising) {
9423 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9424 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9426 if (bp->link_info.advertising_pam4) {
9428 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9429 req->auto_link_pam4_speed_mask =
9430 cpu_to_le16(bp->link_info.advertising_pam4);
9432 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9433 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9435 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9436 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9437 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9438 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9440 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9444 /* tell chimp that the setting takes effect immediately */
9445 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9448 int bnxt_hwrm_set_pause(struct bnxt *bp)
9450 struct hwrm_port_phy_cfg_input req = {0};
9453 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9454 bnxt_hwrm_set_pause_common(bp, &req);
9456 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9457 bp->link_info.force_link_chng)
9458 bnxt_hwrm_set_link_common(bp, &req);
9460 mutex_lock(&bp->hwrm_cmd_lock);
9461 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9462 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9463 /* since changing of pause setting doesn't trigger any link
9464 * change event, the driver needs to update the current pause
9465 * result upon successfully return of the phy_cfg command
9467 bp->link_info.pause =
9468 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9469 bp->link_info.auto_pause_setting = 0;
9470 if (!bp->link_info.force_link_chng)
9471 bnxt_report_link(bp);
9473 bp->link_info.force_link_chng = false;
9474 mutex_unlock(&bp->hwrm_cmd_lock);
9478 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9479 struct hwrm_port_phy_cfg_input *req)
9481 struct ethtool_eee *eee = &bp->eee;
9483 if (eee->eee_enabled) {
9485 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9487 if (eee->tx_lpi_enabled)
9488 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9490 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9492 req->flags |= cpu_to_le32(flags);
9493 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9494 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9495 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9497 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9501 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9503 struct hwrm_port_phy_cfg_input req = {0};
9505 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9507 bnxt_hwrm_set_pause_common(bp, &req);
9509 bnxt_hwrm_set_link_common(bp, &req);
9512 bnxt_hwrm_set_eee(bp, &req);
9513 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9516 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9518 struct hwrm_port_phy_cfg_input req = {0};
9520 if (!BNXT_SINGLE_PF(bp))
9523 if (pci_num_vf(bp->pdev) &&
9524 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9527 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9528 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9529 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9532 static int bnxt_fw_init_one(struct bnxt *bp);
9534 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9536 #ifdef CONFIG_TEE_BNXT_FW
9537 int rc = tee_bnxt_fw_load();
9540 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9544 netdev_err(bp->dev, "OP-TEE not supported\n");
9549 static int bnxt_try_recover_fw(struct bnxt *bp)
9551 if (bp->fw_health && bp->fw_health->status_reliable) {
9555 mutex_lock(&bp->hwrm_cmd_lock);
9557 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9558 rc = __bnxt_hwrm_ver_get(bp, true);
9559 if (!BNXT_FW_IS_BOOTING(sts) &&
9560 !BNXT_FW_IS_RECOVERING(sts))
9563 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9564 mutex_unlock(&bp->hwrm_cmd_lock);
9566 if (!BNXT_FW_IS_HEALTHY(sts)) {
9568 "Firmware not responding, status: 0x%x\n",
9572 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9573 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9574 return bnxt_fw_reset_via_optee(bp);
9582 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9584 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9585 struct hwrm_func_drv_if_change_input req = {0};
9586 bool fw_reset = !bp->irq_tbl;
9587 bool resc_reinit = false;
9591 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9596 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9597 mutex_lock(&bp->hwrm_cmd_lock);
9598 while (retry < BNXT_FW_IF_RETRY) {
9599 rc = _hwrm_send_message(bp, &req, sizeof(req),
9608 flags = le32_to_cpu(resp->flags);
9609 mutex_unlock(&bp->hwrm_cmd_lock);
9614 rc = bnxt_try_recover_fw(bp);
9621 bnxt_inv_fw_health_reg(bp);
9625 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9627 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9629 else if (bp->fw_health && !bp->fw_health->status_reliable)
9630 bnxt_try_map_fw_health_reg(bp);
9632 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9633 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9634 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9637 if (resc_reinit || fw_reset) {
9639 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9640 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9642 bnxt_free_ctx_mem(bp);
9646 rc = bnxt_fw_init_one(bp);
9648 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9649 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9652 bnxt_clear_int_mode(bp);
9653 rc = bnxt_init_int_mode(bp);
9655 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9656 netdev_err(bp->dev, "init int mode failed\n");
9660 if (BNXT_NEW_RM(bp)) {
9661 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9663 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9665 netdev_err(bp->dev, "resc_qcaps failed\n");
9667 hw_resc->resv_cp_rings = 0;
9668 hw_resc->resv_stat_ctxs = 0;
9669 hw_resc->resv_irqs = 0;
9670 hw_resc->resv_tx_rings = 0;
9671 hw_resc->resv_rx_rings = 0;
9672 hw_resc->resv_hw_ring_grps = 0;
9673 hw_resc->resv_vnics = 0;
9675 bp->tx_nr_rings = 0;
9676 bp->rx_nr_rings = 0;
9683 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9685 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9686 struct hwrm_port_led_qcaps_input req = {0};
9687 struct bnxt_pf_info *pf = &bp->pf;
9691 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9694 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9695 req.port_id = cpu_to_le16(pf->port_id);
9696 mutex_lock(&bp->hwrm_cmd_lock);
9697 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9699 mutex_unlock(&bp->hwrm_cmd_lock);
9702 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9705 bp->num_leds = resp->num_leds;
9706 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9708 for (i = 0; i < bp->num_leds; i++) {
9709 struct bnxt_led_info *led = &bp->leds[i];
9710 __le16 caps = led->led_state_caps;
9712 if (!led->led_group_id ||
9713 !BNXT_LED_ALT_BLINK_CAP(caps)) {
9719 mutex_unlock(&bp->hwrm_cmd_lock);
9723 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9725 struct hwrm_wol_filter_alloc_input req = {0};
9726 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9729 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9730 req.port_id = cpu_to_le16(bp->pf.port_id);
9731 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9732 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9733 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9734 mutex_lock(&bp->hwrm_cmd_lock);
9735 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9737 bp->wol_filter_id = resp->wol_filter_id;
9738 mutex_unlock(&bp->hwrm_cmd_lock);
9742 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9744 struct hwrm_wol_filter_free_input req = {0};
9746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9747 req.port_id = cpu_to_le16(bp->pf.port_id);
9748 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9749 req.wol_filter_id = bp->wol_filter_id;
9750 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9753 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9755 struct hwrm_wol_filter_qcfg_input req = {0};
9756 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9757 u16 next_handle = 0;
9760 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9761 req.port_id = cpu_to_le16(bp->pf.port_id);
9762 req.handle = cpu_to_le16(handle);
9763 mutex_lock(&bp->hwrm_cmd_lock);
9764 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9766 next_handle = le16_to_cpu(resp->next_handle);
9767 if (next_handle != 0) {
9768 if (resp->wol_type ==
9769 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9771 bp->wol_filter_id = resp->wol_filter_id;
9775 mutex_unlock(&bp->hwrm_cmd_lock);
9779 static void bnxt_get_wol_settings(struct bnxt *bp)
9784 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9788 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9789 } while (handle && handle != 0xffff);
9792 #ifdef CONFIG_BNXT_HWMON
9793 static ssize_t bnxt_show_temp(struct device *dev,
9794 struct device_attribute *devattr, char *buf)
9796 struct hwrm_temp_monitor_query_input req = {0};
9797 struct hwrm_temp_monitor_query_output *resp;
9798 struct bnxt *bp = dev_get_drvdata(dev);
9802 resp = bp->hwrm_cmd_resp_addr;
9803 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9804 mutex_lock(&bp->hwrm_cmd_lock);
9805 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9807 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9808 mutex_unlock(&bp->hwrm_cmd_lock);
9813 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9815 static struct attribute *bnxt_attrs[] = {
9816 &sensor_dev_attr_temp1_input.dev_attr.attr,
9819 ATTRIBUTE_GROUPS(bnxt);
9821 static void bnxt_hwmon_close(struct bnxt *bp)
9823 if (bp->hwmon_dev) {
9824 hwmon_device_unregister(bp->hwmon_dev);
9825 bp->hwmon_dev = NULL;
9829 static void bnxt_hwmon_open(struct bnxt *bp)
9831 struct hwrm_temp_monitor_query_input req = {0};
9832 struct pci_dev *pdev = bp->pdev;
9835 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9836 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9837 if (rc == -EACCES || rc == -EOPNOTSUPP) {
9838 bnxt_hwmon_close(bp);
9845 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9846 DRV_MODULE_NAME, bp,
9848 if (IS_ERR(bp->hwmon_dev)) {
9849 bp->hwmon_dev = NULL;
9850 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9854 static void bnxt_hwmon_close(struct bnxt *bp)
9858 static void bnxt_hwmon_open(struct bnxt *bp)
9863 static bool bnxt_eee_config_ok(struct bnxt *bp)
9865 struct ethtool_eee *eee = &bp->eee;
9866 struct bnxt_link_info *link_info = &bp->link_info;
9868 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
9871 if (eee->eee_enabled) {
9873 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9875 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9876 eee->eee_enabled = 0;
9879 if (eee->advertised & ~advertising) {
9880 eee->advertised = advertising & eee->supported;
9887 static int bnxt_update_phy_setting(struct bnxt *bp)
9890 bool update_link = false;
9891 bool update_pause = false;
9892 bool update_eee = false;
9893 struct bnxt_link_info *link_info = &bp->link_info;
9895 rc = bnxt_update_link(bp, true);
9897 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9901 if (!BNXT_SINGLE_PF(bp))
9904 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9905 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9906 link_info->req_flow_ctrl)
9907 update_pause = true;
9908 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9909 link_info->force_pause_setting != link_info->req_flow_ctrl)
9910 update_pause = true;
9911 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9912 if (BNXT_AUTO_MODE(link_info->auto_mode))
9914 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
9915 link_info->req_link_speed != link_info->force_link_speed)
9917 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
9918 link_info->req_link_speed != link_info->force_pam4_link_speed)
9920 if (link_info->req_duplex != link_info->duplex_setting)
9923 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9925 if (link_info->advertising != link_info->auto_link_speeds ||
9926 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
9930 /* The last close may have shutdown the link, so need to call
9931 * PHY_CFG to bring it back up.
9933 if (!bp->link_info.link_up)
9936 if (!bnxt_eee_config_ok(bp))
9940 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9941 else if (update_pause)
9942 rc = bnxt_hwrm_set_pause(bp);
9944 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9952 /* Common routine to pre-map certain register block to different GRC window.
9953 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9954 * in PF and 3 windows in VF that can be customized to map in different
9957 static void bnxt_preset_reg_win(struct bnxt *bp)
9960 /* CAG registers map to GRC window #4 */
9961 writel(BNXT_CAG_REG_BASE,
9962 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9966 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9968 static int bnxt_reinit_after_abort(struct bnxt *bp)
9972 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9975 if (bp->dev->reg_state == NETREG_UNREGISTERED)
9978 rc = bnxt_fw_init_one(bp);
9980 bnxt_clear_int_mode(bp);
9981 rc = bnxt_init_int_mode(bp);
9983 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9984 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9990 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9994 bnxt_preset_reg_win(bp);
9995 netif_carrier_off(bp->dev);
9997 /* Reserve rings now if none were reserved at driver probe. */
9998 rc = bnxt_init_dflt_ring_mode(bp);
10000 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10004 rc = bnxt_reserve_rings(bp, irq_re_init);
10007 if ((bp->flags & BNXT_FLAG_RFS) &&
10008 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10009 /* disable RFS if falling back to INTA */
10010 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10011 bp->flags &= ~BNXT_FLAG_RFS;
10014 rc = bnxt_alloc_mem(bp, irq_re_init);
10016 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10017 goto open_err_free_mem;
10021 bnxt_init_napi(bp);
10022 rc = bnxt_request_irq(bp);
10024 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10029 rc = bnxt_init_nic(bp, irq_re_init);
10031 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10035 bnxt_enable_napi(bp);
10036 bnxt_debug_dev_init(bp);
10038 if (link_re_init) {
10039 mutex_lock(&bp->link_lock);
10040 rc = bnxt_update_phy_setting(bp);
10041 mutex_unlock(&bp->link_lock);
10043 netdev_warn(bp->dev, "failed to update phy settings\n");
10044 if (BNXT_SINGLE_PF(bp)) {
10045 bp->link_info.phy_retry = true;
10046 bp->link_info.phy_retry_expires =
10053 udp_tunnel_nic_reset_ntf(bp->dev);
10055 set_bit(BNXT_STATE_OPEN, &bp->state);
10056 bnxt_enable_int(bp);
10057 /* Enable TX queues */
10058 bnxt_tx_enable(bp);
10059 mod_timer(&bp->timer, jiffies + bp->current_interval);
10060 /* Poll link status and check for SFP+ module status */
10061 bnxt_get_port_module_status(bp);
10063 /* VF-reps may need to be re-opened after the PF is re-opened */
10065 bnxt_vf_reps_open(bp);
10072 bnxt_free_skbs(bp);
10074 bnxt_free_mem(bp, true);
10078 /* rtnl_lock held */
10079 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10083 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10086 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10088 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10089 dev_close(bp->dev);
10094 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10095 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10098 int bnxt_half_open_nic(struct bnxt *bp)
10102 rc = bnxt_alloc_mem(bp, false);
10104 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10105 goto half_open_err;
10107 rc = bnxt_init_nic(bp, false);
10109 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10110 goto half_open_err;
10115 bnxt_free_skbs(bp);
10116 bnxt_free_mem(bp, false);
10117 dev_close(bp->dev);
10121 /* rtnl_lock held, this call can only be made after a previous successful
10122 * call to bnxt_half_open_nic().
10124 void bnxt_half_close_nic(struct bnxt *bp)
10126 bnxt_hwrm_resource_free(bp, false, false);
10127 bnxt_free_skbs(bp);
10128 bnxt_free_mem(bp, false);
10131 static void bnxt_reenable_sriov(struct bnxt *bp)
10134 struct bnxt_pf_info *pf = &bp->pf;
10135 int n = pf->active_vfs;
10138 bnxt_cfg_hw_sriov(bp, &n, true);
10142 static int bnxt_open(struct net_device *dev)
10144 struct bnxt *bp = netdev_priv(dev);
10147 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10148 rc = bnxt_reinit_after_abort(bp);
10151 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10153 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10158 rc = bnxt_hwrm_if_change(bp, true);
10161 rc = __bnxt_open_nic(bp, true, true);
10163 bnxt_hwrm_if_change(bp, false);
10165 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10166 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10167 bnxt_ulp_start(bp, 0);
10168 bnxt_reenable_sriov(bp);
10171 bnxt_hwmon_open(bp);
10177 static bool bnxt_drv_busy(struct bnxt *bp)
10179 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10180 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10183 static void bnxt_get_ring_stats(struct bnxt *bp,
10184 struct rtnl_link_stats64 *stats);
10186 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10189 /* Close the VF-reps before closing PF */
10191 bnxt_vf_reps_close(bp);
10193 /* Change device state to avoid TX queue wake up's */
10194 bnxt_tx_disable(bp);
10196 clear_bit(BNXT_STATE_OPEN, &bp->state);
10197 smp_mb__after_atomic();
10198 while (bnxt_drv_busy(bp))
10201 /* Flush rings and and disable interrupts */
10202 bnxt_shutdown_nic(bp, irq_re_init);
10204 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10206 bnxt_debug_dev_exit(bp);
10207 bnxt_disable_napi(bp);
10208 del_timer_sync(&bp->timer);
10209 bnxt_free_skbs(bp);
10211 /* Save ring stats before shutdown */
10212 if (bp->bnapi && irq_re_init)
10213 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10218 bnxt_free_mem(bp, irq_re_init);
10221 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10225 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10226 /* If we get here, it means firmware reset is in progress
10227 * while we are trying to close. We can safely proceed with
10228 * the close because we are holding rtnl_lock(). Some firmware
10229 * messages may fail as we proceed to close. We set the
10230 * ABORT_ERR flag here so that the FW reset thread will later
10231 * abort when it gets the rtnl_lock() and sees the flag.
10233 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10234 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10237 #ifdef CONFIG_BNXT_SRIOV
10238 if (bp->sriov_cfg) {
10239 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10241 BNXT_SRIOV_CFG_WAIT_TMO);
10243 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10246 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10250 static int bnxt_close(struct net_device *dev)
10252 struct bnxt *bp = netdev_priv(dev);
10254 bnxt_hwmon_close(bp);
10255 bnxt_close_nic(bp, true, true);
10256 bnxt_hwrm_shutdown_link(bp);
10257 bnxt_hwrm_if_change(bp, false);
10261 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10264 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10265 struct hwrm_port_phy_mdio_read_input req = {0};
10268 if (bp->hwrm_spec_code < 0x10a00)
10269 return -EOPNOTSUPP;
10271 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10272 req.port_id = cpu_to_le16(bp->pf.port_id);
10273 req.phy_addr = phy_addr;
10274 req.reg_addr = cpu_to_le16(reg & 0x1f);
10275 if (mdio_phy_id_is_c45(phy_addr)) {
10277 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10278 req.dev_addr = mdio_phy_id_devad(phy_addr);
10279 req.reg_addr = cpu_to_le16(reg);
10282 mutex_lock(&bp->hwrm_cmd_lock);
10283 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10285 *val = le16_to_cpu(resp->reg_data);
10286 mutex_unlock(&bp->hwrm_cmd_lock);
10290 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10293 struct hwrm_port_phy_mdio_write_input req = {0};
10295 if (bp->hwrm_spec_code < 0x10a00)
10296 return -EOPNOTSUPP;
10298 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10299 req.port_id = cpu_to_le16(bp->pf.port_id);
10300 req.phy_addr = phy_addr;
10301 req.reg_addr = cpu_to_le16(reg & 0x1f);
10302 if (mdio_phy_id_is_c45(phy_addr)) {
10304 req.phy_addr = mdio_phy_id_prtad(phy_addr);
10305 req.dev_addr = mdio_phy_id_devad(phy_addr);
10306 req.reg_addr = cpu_to_le16(reg);
10308 req.reg_data = cpu_to_le16(val);
10310 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10313 /* rtnl_lock held */
10314 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10316 struct mii_ioctl_data *mdio = if_mii(ifr);
10317 struct bnxt *bp = netdev_priv(dev);
10322 mdio->phy_id = bp->link_info.phy_addr;
10325 case SIOCGMIIREG: {
10326 u16 mii_regval = 0;
10328 if (!netif_running(dev))
10331 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10333 mdio->val_out = mii_regval;
10338 if (!netif_running(dev))
10341 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10348 return -EOPNOTSUPP;
10351 static void bnxt_get_ring_stats(struct bnxt *bp,
10352 struct rtnl_link_stats64 *stats)
10356 for (i = 0; i < bp->cp_nr_rings; i++) {
10357 struct bnxt_napi *bnapi = bp->bnapi[i];
10358 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10359 u64 *sw = cpr->stats.sw_stats;
10361 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10362 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10363 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10365 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10366 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10367 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10369 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10370 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10371 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10373 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10374 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10375 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10377 stats->rx_missed_errors +=
10378 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10380 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10382 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10386 static void bnxt_add_prev_stats(struct bnxt *bp,
10387 struct rtnl_link_stats64 *stats)
10389 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10391 stats->rx_packets += prev_stats->rx_packets;
10392 stats->tx_packets += prev_stats->tx_packets;
10393 stats->rx_bytes += prev_stats->rx_bytes;
10394 stats->tx_bytes += prev_stats->tx_bytes;
10395 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10396 stats->multicast += prev_stats->multicast;
10397 stats->tx_dropped += prev_stats->tx_dropped;
10401 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10403 struct bnxt *bp = netdev_priv(dev);
10405 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10406 /* Make sure bnxt_close_nic() sees that we are reading stats before
10407 * we check the BNXT_STATE_OPEN flag.
10409 smp_mb__after_atomic();
10410 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10411 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10412 *stats = bp->net_stats_prev;
10416 bnxt_get_ring_stats(bp, stats);
10417 bnxt_add_prev_stats(bp, stats);
10419 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10420 u64 *rx = bp->port_stats.sw_stats;
10421 u64 *tx = bp->port_stats.sw_stats +
10422 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10424 stats->rx_crc_errors =
10425 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10426 stats->rx_frame_errors =
10427 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10428 stats->rx_length_errors =
10429 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10430 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10431 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10433 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10434 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10435 stats->collisions =
10436 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10437 stats->tx_fifo_errors =
10438 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10439 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10441 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10444 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10446 struct net_device *dev = bp->dev;
10447 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10448 struct netdev_hw_addr *ha;
10451 bool update = false;
10454 netdev_for_each_mc_addr(ha, dev) {
10455 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10456 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10457 vnic->mc_list_count = 0;
10461 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10462 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10469 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10471 if (mc_count != vnic->mc_list_count) {
10472 vnic->mc_list_count = mc_count;
10478 static bool bnxt_uc_list_updated(struct bnxt *bp)
10480 struct net_device *dev = bp->dev;
10481 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10482 struct netdev_hw_addr *ha;
10485 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10488 netdev_for_each_uc_addr(ha, dev) {
10489 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10497 static void bnxt_set_rx_mode(struct net_device *dev)
10499 struct bnxt *bp = netdev_priv(dev);
10500 struct bnxt_vnic_info *vnic;
10501 bool mc_update = false;
10505 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10508 vnic = &bp->vnic_info[0];
10509 mask = vnic->rx_mask;
10510 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10511 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10512 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10513 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10515 if (dev->flags & IFF_PROMISC)
10516 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10518 uc_update = bnxt_uc_list_updated(bp);
10520 if (dev->flags & IFF_BROADCAST)
10521 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10522 if (dev->flags & IFF_ALLMULTI) {
10523 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10524 vnic->mc_list_count = 0;
10526 mc_update = bnxt_mc_list_updated(bp, &mask);
10529 if (mask != vnic->rx_mask || uc_update || mc_update) {
10530 vnic->rx_mask = mask;
10532 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10533 bnxt_queue_sp_work(bp);
10537 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10539 struct net_device *dev = bp->dev;
10540 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10541 struct netdev_hw_addr *ha;
10542 int i, off = 0, rc;
10545 netif_addr_lock_bh(dev);
10546 uc_update = bnxt_uc_list_updated(bp);
10547 netif_addr_unlock_bh(dev);
10552 mutex_lock(&bp->hwrm_cmd_lock);
10553 for (i = 1; i < vnic->uc_filter_count; i++) {
10554 struct hwrm_cfa_l2_filter_free_input req = {0};
10556 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10559 req.l2_filter_id = vnic->fw_l2_filter_id[i];
10561 rc = _hwrm_send_message(bp, &req, sizeof(req),
10564 mutex_unlock(&bp->hwrm_cmd_lock);
10566 vnic->uc_filter_count = 1;
10568 netif_addr_lock_bh(dev);
10569 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10570 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10572 netdev_for_each_uc_addr(ha, dev) {
10573 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10575 vnic->uc_filter_count++;
10578 netif_addr_unlock_bh(dev);
10580 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10581 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10583 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10585 vnic->uc_filter_count = i;
10591 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
10592 !bnxt_promisc_ok(bp))
10593 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10594 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10595 if (rc && vnic->mc_list_count) {
10596 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10598 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10599 vnic->mc_list_count = 0;
10600 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10603 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10609 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10611 #ifdef CONFIG_BNXT_SRIOV
10612 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10613 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10615 /* No minimum rings were provisioned by the PF. Don't
10616 * reserve rings by default when device is down.
10618 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10621 if (!netif_running(bp->dev))
10628 /* If the chip and firmware supports RFS */
10629 static bool bnxt_rfs_supported(struct bnxt *bp)
10631 if (bp->flags & BNXT_FLAG_CHIP_P5) {
10632 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10636 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10638 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10643 /* If runtime conditions support RFS */
10644 static bool bnxt_rfs_capable(struct bnxt *bp)
10646 #ifdef CONFIG_RFS_ACCEL
10647 int vnics, max_vnics, max_rss_ctxs;
10649 if (bp->flags & BNXT_FLAG_CHIP_P5)
10650 return bnxt_rfs_supported(bp);
10651 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10654 vnics = 1 + bp->rx_nr_rings;
10655 max_vnics = bnxt_get_max_func_vnics(bp);
10656 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10658 /* RSS contexts not a limiting factor */
10659 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10660 max_rss_ctxs = max_vnics;
10661 if (vnics > max_vnics || vnics > max_rss_ctxs) {
10662 if (bp->rx_nr_rings > 1)
10663 netdev_warn(bp->dev,
10664 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10665 min(max_rss_ctxs - 1, max_vnics - 1));
10669 if (!BNXT_NEW_RM(bp))
10672 if (vnics == bp->hw_resc.resv_vnics)
10675 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10676 if (vnics <= bp->hw_resc.resv_vnics)
10679 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10680 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10687 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10688 netdev_features_t features)
10690 struct bnxt *bp = netdev_priv(dev);
10691 netdev_features_t vlan_features;
10693 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10694 features &= ~NETIF_F_NTUPLE;
10696 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10697 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10699 if (!(features & NETIF_F_GRO))
10700 features &= ~NETIF_F_GRO_HW;
10702 if (features & NETIF_F_GRO_HW)
10703 features &= ~NETIF_F_LRO;
10705 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
10706 * turned on or off together.
10708 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10709 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10710 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10711 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10712 else if (vlan_features)
10713 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10715 #ifdef CONFIG_BNXT_SRIOV
10716 if (BNXT_VF(bp) && bp->vf.vlan)
10717 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10722 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10724 struct bnxt *bp = netdev_priv(dev);
10725 u32 flags = bp->flags;
10728 bool re_init = false;
10729 bool update_tpa = false;
10731 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10732 if (features & NETIF_F_GRO_HW)
10733 flags |= BNXT_FLAG_GRO;
10734 else if (features & NETIF_F_LRO)
10735 flags |= BNXT_FLAG_LRO;
10737 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10738 flags &= ~BNXT_FLAG_TPA;
10740 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10741 flags |= BNXT_FLAG_STRIP_VLAN;
10743 if (features & NETIF_F_NTUPLE)
10744 flags |= BNXT_FLAG_RFS;
10746 changes = flags ^ bp->flags;
10747 if (changes & BNXT_FLAG_TPA) {
10749 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10750 (flags & BNXT_FLAG_TPA) == 0 ||
10751 (bp->flags & BNXT_FLAG_CHIP_P5))
10755 if (changes & ~BNXT_FLAG_TPA)
10758 if (flags != bp->flags) {
10759 u32 old_flags = bp->flags;
10761 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10764 bnxt_set_ring_params(bp);
10769 bnxt_close_nic(bp, false, false);
10772 bnxt_set_ring_params(bp);
10774 return bnxt_open_nic(bp, false, false);
10778 rc = bnxt_set_tpa(bp,
10779 (flags & BNXT_FLAG_TPA) ?
10782 bp->flags = old_flags;
10788 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
10789 struct net_device *dev,
10790 netdev_features_t features)
10796 features = vlan_features_check(skb, features);
10797 if (!skb->encapsulation)
10800 switch (vlan_get_protocol(skb)) {
10801 case htons(ETH_P_IP):
10802 l4_proto = ip_hdr(skb)->protocol;
10804 case htons(ETH_P_IPV6):
10805 l4_proto = ipv6_hdr(skb)->nexthdr;
10811 if (l4_proto != IPPROTO_UDP)
10814 bp = netdev_priv(dev);
10815 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
10816 udp_port = udp_hdr(skb)->dest;
10817 if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
10819 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
10822 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10825 struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10826 struct hwrm_dbg_read_direct_input req = {0};
10827 __le32 *dbg_reg_buf;
10828 dma_addr_t mapping;
10831 dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10832 &mapping, GFP_KERNEL);
10835 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10836 req.host_dest_addr = cpu_to_le64(mapping);
10837 req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10838 req.read_len32 = cpu_to_le32(num_words);
10839 mutex_lock(&bp->hwrm_cmd_lock);
10840 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10841 if (rc || resp->error_code) {
10843 goto dbg_rd_reg_exit;
10845 for (i = 0; i < num_words; i++)
10846 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10849 mutex_unlock(&bp->hwrm_cmd_lock);
10850 dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10854 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10855 u32 ring_id, u32 *prod, u32 *cons)
10857 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10858 struct hwrm_dbg_ring_info_get_input req = {0};
10861 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10862 req.ring_type = ring_type;
10863 req.fw_ring_id = cpu_to_le32(ring_id);
10864 mutex_lock(&bp->hwrm_cmd_lock);
10865 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10867 *prod = le32_to_cpu(resp->producer_index);
10868 *cons = le32_to_cpu(resp->consumer_index);
10870 mutex_unlock(&bp->hwrm_cmd_lock);
10874 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10876 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10877 int i = bnapi->index;
10882 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10883 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10887 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10889 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10890 int i = bnapi->index;
10895 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10896 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10897 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10898 rxr->rx_sw_agg_prod);
10901 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10903 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10904 int i = bnapi->index;
10906 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10907 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10910 static void bnxt_dbg_dump_states(struct bnxt *bp)
10913 struct bnxt_napi *bnapi;
10915 for (i = 0; i < bp->cp_nr_rings; i++) {
10916 bnapi = bp->bnapi[i];
10917 if (netif_msg_drv(bp)) {
10918 bnxt_dump_tx_sw_state(bnapi);
10919 bnxt_dump_rx_sw_state(bnapi);
10920 bnxt_dump_cp_sw_state(bnapi);
10925 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
10927 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
10928 struct hwrm_ring_reset_input req = {0};
10929 struct bnxt_napi *bnapi = rxr->bnapi;
10930 struct bnxt_cp_ring_info *cpr;
10933 cpr = &bnapi->cp_ring;
10934 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
10935 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
10936 req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
10937 req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
10938 return hwrm_send_message_silent(bp, &req, sizeof(req),
10942 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10945 bnxt_dbg_dump_states(bp);
10946 if (netif_running(bp->dev)) {
10950 bnxt_close_nic(bp, false, false);
10951 bnxt_open_nic(bp, false, false);
10954 bnxt_close_nic(bp, true, false);
10955 rc = bnxt_open_nic(bp, true, false);
10956 bnxt_ulp_start(bp, rc);
10961 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
10963 struct bnxt *bp = netdev_priv(dev);
10965 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
10966 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10967 bnxt_queue_sp_work(bp);
10970 static void bnxt_fw_health_check(struct bnxt *bp)
10972 struct bnxt_fw_health *fw_health = bp->fw_health;
10975 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10978 if (fw_health->tmr_counter) {
10979 fw_health->tmr_counter--;
10983 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10984 if (val == fw_health->last_fw_heartbeat)
10987 fw_health->last_fw_heartbeat = val;
10989 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10990 if (val != fw_health->last_fw_reset_cnt)
10993 fw_health->tmr_counter = fw_health->tmr_multiplier;
10997 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10998 bnxt_queue_sp_work(bp);
11001 static void bnxt_timer(struct timer_list *t)
11003 struct bnxt *bp = from_timer(bp, t, timer);
11004 struct net_device *dev = bp->dev;
11006 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11009 if (atomic_read(&bp->intr_sem) != 0)
11010 goto bnxt_restart_timer;
11012 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11013 bnxt_fw_health_check(bp);
11015 if (bp->link_info.link_up && bp->stats_coal_ticks) {
11016 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11017 bnxt_queue_sp_work(bp);
11020 if (bnxt_tc_flower_enabled(bp)) {
11021 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11022 bnxt_queue_sp_work(bp);
11025 #ifdef CONFIG_RFS_ACCEL
11026 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11027 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11028 bnxt_queue_sp_work(bp);
11030 #endif /*CONFIG_RFS_ACCEL*/
11032 if (bp->link_info.phy_retry) {
11033 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11034 bp->link_info.phy_retry = false;
11035 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11037 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11038 bnxt_queue_sp_work(bp);
11042 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11043 netif_carrier_ok(dev)) {
11044 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11045 bnxt_queue_sp_work(bp);
11047 bnxt_restart_timer:
11048 mod_timer(&bp->timer, jiffies + bp->current_interval);
11051 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11053 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11054 * set. If the device is being closed, bnxt_close() may be holding
11055 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11056 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11058 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11062 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11064 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11068 /* Only called from bnxt_sp_task() */
11069 static void bnxt_reset(struct bnxt *bp, bool silent)
11071 bnxt_rtnl_lock_sp(bp);
11072 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11073 bnxt_reset_task(bp, silent);
11074 bnxt_rtnl_unlock_sp(bp);
11077 /* Only called from bnxt_sp_task() */
11078 static void bnxt_rx_ring_reset(struct bnxt *bp)
11082 bnxt_rtnl_lock_sp(bp);
11083 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11084 bnxt_rtnl_unlock_sp(bp);
11087 /* Disable and flush TPA before resetting the RX ring */
11088 if (bp->flags & BNXT_FLAG_TPA)
11089 bnxt_set_tpa(bp, false);
11090 for (i = 0; i < bp->rx_nr_rings; i++) {
11091 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11092 struct bnxt_cp_ring_info *cpr;
11095 if (!rxr->bnapi->in_reset)
11098 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11100 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11101 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11103 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11105 bnxt_reset_task(bp, true);
11108 bnxt_free_one_rx_ring_skbs(bp, i);
11110 rxr->rx_agg_prod = 0;
11111 rxr->rx_sw_agg_prod = 0;
11112 rxr->rx_next_cons = 0;
11113 rxr->bnapi->in_reset = false;
11114 bnxt_alloc_one_rx_ring(bp, i);
11115 cpr = &rxr->bnapi->cp_ring;
11116 cpr->sw_stats.rx.rx_resets++;
11117 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11118 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11119 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11121 if (bp->flags & BNXT_FLAG_TPA)
11122 bnxt_set_tpa(bp, true);
11123 bnxt_rtnl_unlock_sp(bp);
11126 static void bnxt_fw_reset_close(struct bnxt *bp)
11129 /* When firmware is in fatal state, quiesce device and disable
11130 * bus master to prevent any potential bad DMAs before freeing
11133 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11136 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11138 bp->fw_reset_min_dsecs = 0;
11139 bnxt_tx_disable(bp);
11140 bnxt_disable_napi(bp);
11141 bnxt_disable_int_sync(bp);
11143 bnxt_clear_int_mode(bp);
11144 pci_disable_device(bp->pdev);
11146 __bnxt_close_nic(bp, true, false);
11147 bnxt_vf_reps_free(bp);
11148 bnxt_clear_int_mode(bp);
11149 bnxt_hwrm_func_drv_unrgtr(bp);
11150 if (pci_is_enabled(bp->pdev))
11151 pci_disable_device(bp->pdev);
11152 bnxt_free_ctx_mem(bp);
11157 static bool is_bnxt_fw_ok(struct bnxt *bp)
11159 struct bnxt_fw_health *fw_health = bp->fw_health;
11160 bool no_heartbeat = false, has_reset = false;
11163 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11164 if (val == fw_health->last_fw_heartbeat)
11165 no_heartbeat = true;
11167 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11168 if (val != fw_health->last_fw_reset_cnt)
11171 if (!no_heartbeat && has_reset)
11177 /* rtnl_lock is acquired before calling this function */
11178 static void bnxt_force_fw_reset(struct bnxt *bp)
11180 struct bnxt_fw_health *fw_health = bp->fw_health;
11183 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11184 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11187 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11188 bnxt_fw_reset_close(bp);
11189 wait_dsecs = fw_health->master_func_wait_dsecs;
11190 if (fw_health->master) {
11191 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11193 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11195 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11196 wait_dsecs = fw_health->normal_func_wait_dsecs;
11197 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11200 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11201 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11202 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11205 void bnxt_fw_exception(struct bnxt *bp)
11207 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11208 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11209 bnxt_rtnl_lock_sp(bp);
11210 bnxt_force_fw_reset(bp);
11211 bnxt_rtnl_unlock_sp(bp);
11214 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11217 static int bnxt_get_registered_vfs(struct bnxt *bp)
11219 #ifdef CONFIG_BNXT_SRIOV
11225 rc = bnxt_hwrm_func_qcfg(bp);
11227 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11230 if (bp->pf.registered_vfs)
11231 return bp->pf.registered_vfs;
11238 void bnxt_fw_reset(struct bnxt *bp)
11240 bnxt_rtnl_lock_sp(bp);
11241 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11242 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11245 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11246 if (bp->pf.active_vfs &&
11247 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11248 n = bnxt_get_registered_vfs(bp);
11250 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11252 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11253 dev_close(bp->dev);
11254 goto fw_reset_exit;
11255 } else if (n > 0) {
11256 u16 vf_tmo_dsecs = n * 10;
11258 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11259 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11260 bp->fw_reset_state =
11261 BNXT_FW_RESET_STATE_POLL_VF;
11262 bnxt_queue_fw_reset_work(bp, HZ / 10);
11263 goto fw_reset_exit;
11265 bnxt_fw_reset_close(bp);
11266 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11267 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11270 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11271 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11273 bnxt_queue_fw_reset_work(bp, tmo);
11276 bnxt_rtnl_unlock_sp(bp);
11279 static void bnxt_chk_missed_irq(struct bnxt *bp)
11283 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11286 for (i = 0; i < bp->cp_nr_rings; i++) {
11287 struct bnxt_napi *bnapi = bp->bnapi[i];
11288 struct bnxt_cp_ring_info *cpr;
11295 cpr = &bnapi->cp_ring;
11296 for (j = 0; j < 2; j++) {
11297 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11300 if (!cpr2 || cpr2->has_more_work ||
11301 !bnxt_has_work(bp, cpr2))
11304 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11305 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11308 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11309 bnxt_dbg_hwrm_ring_info_get(bp,
11310 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11311 fw_ring_id, &val[0], &val[1]);
11312 cpr->sw_stats.cmn.missed_irqs++;
11317 static void bnxt_cfg_ntp_filters(struct bnxt *);
11319 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11321 struct bnxt_link_info *link_info = &bp->link_info;
11323 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11324 link_info->autoneg = BNXT_AUTONEG_SPEED;
11325 if (bp->hwrm_spec_code >= 0x10201) {
11326 if (link_info->auto_pause_setting &
11327 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11328 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11330 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11332 link_info->advertising = link_info->auto_link_speeds;
11333 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11335 link_info->req_link_speed = link_info->force_link_speed;
11336 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11337 if (link_info->force_pam4_link_speed) {
11338 link_info->req_link_speed =
11339 link_info->force_pam4_link_speed;
11340 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11342 link_info->req_duplex = link_info->duplex_setting;
11344 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11345 link_info->req_flow_ctrl =
11346 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11348 link_info->req_flow_ctrl = link_info->force_pause_setting;
11351 static void bnxt_fw_echo_reply(struct bnxt *bp)
11353 struct bnxt_fw_health *fw_health = bp->fw_health;
11354 struct hwrm_func_echo_response_input req = {0};
11356 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
11357 req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11358 req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11359 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11362 static void bnxt_sp_task(struct work_struct *work)
11364 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11366 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11367 smp_mb__after_atomic();
11368 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11369 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11373 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11374 bnxt_cfg_rx_mode(bp);
11376 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11377 bnxt_cfg_ntp_filters(bp);
11378 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11379 bnxt_hwrm_exec_fwd_req(bp);
11380 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11381 bnxt_hwrm_port_qstats(bp, 0);
11382 bnxt_hwrm_port_qstats_ext(bp, 0);
11383 bnxt_accumulate_all_stats(bp);
11386 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11389 mutex_lock(&bp->link_lock);
11390 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11392 bnxt_hwrm_phy_qcaps(bp);
11394 rc = bnxt_update_link(bp, true);
11396 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11399 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11401 bnxt_init_ethtool_link_settings(bp);
11402 mutex_unlock(&bp->link_lock);
11404 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11407 mutex_lock(&bp->link_lock);
11408 rc = bnxt_update_phy_setting(bp);
11409 mutex_unlock(&bp->link_lock);
11411 netdev_warn(bp->dev, "update phy settings retry failed\n");
11413 bp->link_info.phy_retry = false;
11414 netdev_info(bp->dev, "update phy settings retry succeeded\n");
11417 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11418 mutex_lock(&bp->link_lock);
11419 bnxt_get_port_module_status(bp);
11420 mutex_unlock(&bp->link_lock);
11423 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11424 bnxt_tc_flow_stats_work(bp);
11426 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11427 bnxt_chk_missed_irq(bp);
11429 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
11430 bnxt_fw_echo_reply(bp);
11432 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
11433 * must be the last functions to be called before exiting.
11435 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11436 bnxt_reset(bp, false);
11438 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11439 bnxt_reset(bp, true);
11441 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11442 bnxt_rx_ring_reset(bp);
11444 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11445 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11447 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11448 if (!is_bnxt_fw_ok(bp))
11449 bnxt_devlink_health_report(bp,
11450 BNXT_FW_EXCEPTION_SP_EVENT);
11453 smp_mb__before_atomic();
11454 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11457 /* Under rtnl_lock */
11458 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11461 int max_rx, max_tx, tx_sets = 1;
11462 int tx_rings_needed, stats;
11469 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11476 tx_rings_needed = tx * tx_sets + tx_xdp;
11477 if (max_tx < tx_rings_needed)
11481 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11484 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11486 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11488 if (BNXT_NEW_RM(bp)) {
11489 cp += bnxt_get_ulp_msix_num(bp);
11490 stats += bnxt_get_ulp_stat_ctxs(bp);
11492 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11496 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11499 pci_iounmap(pdev, bp->bar2);
11504 pci_iounmap(pdev, bp->bar1);
11509 pci_iounmap(pdev, bp->bar0);
11514 static void bnxt_cleanup_pci(struct bnxt *bp)
11516 bnxt_unmap_bars(bp, bp->pdev);
11517 pci_release_regions(bp->pdev);
11518 if (pci_is_enabled(bp->pdev))
11519 pci_disable_device(bp->pdev);
11522 static void bnxt_init_dflt_coal(struct bnxt *bp)
11524 struct bnxt_coal *coal;
11526 /* Tick values in micro seconds.
11527 * 1 coal_buf x bufs_per_record = 1 completion record.
11529 coal = &bp->rx_coal;
11530 coal->coal_ticks = 10;
11531 coal->coal_bufs = 30;
11532 coal->coal_ticks_irq = 1;
11533 coal->coal_bufs_irq = 2;
11534 coal->idle_thresh = 50;
11535 coal->bufs_per_record = 2;
11536 coal->budget = 64; /* NAPI budget */
11538 coal = &bp->tx_coal;
11539 coal->coal_ticks = 28;
11540 coal->coal_bufs = 30;
11541 coal->coal_ticks_irq = 2;
11542 coal->coal_bufs_irq = 2;
11543 coal->bufs_per_record = 1;
11545 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11548 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11553 rc = bnxt_hwrm_ver_get(bp);
11554 bnxt_try_map_fw_health_reg(bp);
11556 rc = bnxt_try_recover_fw(bp);
11559 rc = bnxt_hwrm_ver_get(bp);
11564 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11565 rc = bnxt_alloc_kong_hwrm_resources(bp);
11567 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11570 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11571 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11572 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11576 bnxt_nvm_cfg_ver_get(bp);
11578 rc = bnxt_hwrm_func_reset(bp);
11582 bnxt_hwrm_fw_set_time(bp);
11586 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11590 /* Get the MAX capabilities for this function */
11591 rc = bnxt_hwrm_func_qcaps(bp);
11593 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11598 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11600 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11603 if (bnxt_alloc_fw_health(bp)) {
11604 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11606 rc = bnxt_hwrm_error_recovery_qcfg(bp);
11608 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11612 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11616 bnxt_hwrm_func_qcfg(bp);
11617 bnxt_hwrm_vnic_qcaps(bp);
11618 bnxt_hwrm_port_led_qcaps(bp);
11619 bnxt_ethtool_init(bp);
11624 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11626 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11627 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11628 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11629 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11630 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11631 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11632 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11633 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11634 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11638 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11640 struct net_device *dev = bp->dev;
11642 dev->hw_features &= ~NETIF_F_NTUPLE;
11643 dev->features &= ~NETIF_F_NTUPLE;
11644 bp->flags &= ~BNXT_FLAG_RFS;
11645 if (bnxt_rfs_supported(bp)) {
11646 dev->hw_features |= NETIF_F_NTUPLE;
11647 if (bnxt_rfs_capable(bp)) {
11648 bp->flags |= BNXT_FLAG_RFS;
11649 dev->features |= NETIF_F_NTUPLE;
11654 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11656 struct pci_dev *pdev = bp->pdev;
11658 bnxt_set_dflt_rss_hash_type(bp);
11659 bnxt_set_dflt_rfs(bp);
11661 bnxt_get_wol_settings(bp);
11662 if (bp->flags & BNXT_FLAG_WOL_CAP)
11663 device_set_wakeup_enable(&pdev->dev, bp->wol);
11665 device_set_wakeup_capable(&pdev->dev, false);
11667 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11668 bnxt_hwrm_coal_params_qcaps(bp);
11671 static int bnxt_fw_init_one(struct bnxt *bp)
11675 rc = bnxt_fw_init_one_p1(bp);
11677 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11680 rc = bnxt_fw_init_one_p2(bp);
11682 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11685 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11689 /* In case fw capabilities have changed, destroy the unneeded
11690 * reporters and create newly capable ones.
11692 bnxt_dl_fw_reporters_destroy(bp, false);
11693 bnxt_dl_fw_reporters_create(bp);
11694 bnxt_fw_init_one_p3(bp);
11698 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11700 struct bnxt_fw_health *fw_health = bp->fw_health;
11701 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11702 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11703 u32 reg_type, reg_off, delay_msecs;
11705 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11706 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11707 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11708 switch (reg_type) {
11709 case BNXT_FW_HEALTH_REG_TYPE_CFG:
11710 pci_write_config_dword(bp->pdev, reg_off, val);
11712 case BNXT_FW_HEALTH_REG_TYPE_GRC:
11713 writel(reg_off & BNXT_GRC_BASE_MASK,
11714 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11715 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11717 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11718 writel(val, bp->bar0 + reg_off);
11720 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11721 writel(val, bp->bar1 + reg_off);
11725 pci_read_config_dword(bp->pdev, 0, &val);
11726 msleep(delay_msecs);
11730 static void bnxt_reset_all(struct bnxt *bp)
11732 struct bnxt_fw_health *fw_health = bp->fw_health;
11735 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11736 bnxt_fw_reset_via_optee(bp);
11737 bp->fw_reset_timestamp = jiffies;
11741 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11742 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11743 bnxt_fw_reset_writel(bp, i);
11744 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11745 struct hwrm_fw_reset_input req = {0};
11747 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11748 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11749 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11750 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11751 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11752 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11754 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11756 bp->fw_reset_timestamp = jiffies;
11759 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
11761 return time_after(jiffies, bp->fw_reset_timestamp +
11762 (bp->fw_reset_max_dsecs * HZ / 10));
11765 static void bnxt_fw_reset_task(struct work_struct *work)
11767 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11770 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11771 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11775 switch (bp->fw_reset_state) {
11776 case BNXT_FW_RESET_STATE_POLL_VF: {
11777 int n = bnxt_get_registered_vfs(bp);
11781 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11782 n, jiffies_to_msecs(jiffies -
11783 bp->fw_reset_timestamp));
11784 goto fw_reset_abort;
11785 } else if (n > 0) {
11786 if (bnxt_fw_reset_timeout(bp)) {
11787 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11788 bp->fw_reset_state = 0;
11789 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11793 bnxt_queue_fw_reset_work(bp, HZ / 10);
11796 bp->fw_reset_timestamp = jiffies;
11798 bnxt_fw_reset_close(bp);
11799 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11800 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11803 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11804 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11807 bnxt_queue_fw_reset_work(bp, tmo);
11810 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11813 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11814 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11815 !bnxt_fw_reset_timeout(bp)) {
11816 bnxt_queue_fw_reset_work(bp, HZ / 5);
11820 if (!bp->fw_health->master) {
11821 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11823 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11824 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11827 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11830 case BNXT_FW_RESET_STATE_RESET_FW:
11831 bnxt_reset_all(bp);
11832 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11833 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11835 case BNXT_FW_RESET_STATE_ENABLE_DEV:
11836 bnxt_inv_fw_health_reg(bp);
11837 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
11838 !bp->fw_reset_min_dsecs) {
11841 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11842 if (val == 0xffff) {
11843 if (bnxt_fw_reset_timeout(bp)) {
11844 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
11845 goto fw_reset_abort;
11847 bnxt_queue_fw_reset_work(bp, HZ / 1000);
11851 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11852 if (pci_enable_device(bp->pdev)) {
11853 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11854 goto fw_reset_abort;
11856 pci_set_master(bp->pdev);
11857 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11859 case BNXT_FW_RESET_STATE_POLL_FW:
11860 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11861 rc = __bnxt_hwrm_ver_get(bp, true);
11863 if (bnxt_fw_reset_timeout(bp)) {
11864 netdev_err(bp->dev, "Firmware reset aborted\n");
11865 goto fw_reset_abort_status;
11867 bnxt_queue_fw_reset_work(bp, HZ / 5);
11870 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11871 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11873 case BNXT_FW_RESET_STATE_OPENING:
11874 while (!rtnl_trylock()) {
11875 bnxt_queue_fw_reset_work(bp, HZ / 10);
11878 rc = bnxt_open(bp->dev);
11880 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11881 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11882 dev_close(bp->dev);
11885 bp->fw_reset_state = 0;
11886 /* Make sure fw_reset_state is 0 before clearing the flag */
11887 smp_mb__before_atomic();
11888 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11889 bnxt_ulp_start(bp, rc);
11891 bnxt_reenable_sriov(bp);
11892 bnxt_vf_reps_alloc(bp);
11893 bnxt_vf_reps_open(bp);
11894 bnxt_dl_health_recovery_done(bp);
11895 bnxt_dl_health_status_update(bp, true);
11901 fw_reset_abort_status:
11902 if (bp->fw_health->status_reliable ||
11903 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
11904 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11906 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
11909 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11910 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11911 bnxt_dl_health_status_update(bp, false);
11912 bp->fw_reset_state = 0;
11914 dev_close(bp->dev);
11918 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
11921 struct bnxt *bp = netdev_priv(dev);
11923 SET_NETDEV_DEV(dev, &pdev->dev);
11925 /* enable device (incl. PCI PM wakeup), and bus-mastering */
11926 rc = pci_enable_device(pdev);
11928 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
11932 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11933 dev_err(&pdev->dev,
11934 "Cannot find PCI device base address, aborting\n");
11936 goto init_err_disable;
11939 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11941 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
11942 goto init_err_disable;
11945 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
11946 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11947 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
11949 goto init_err_release;
11952 pci_set_master(pdev);
11957 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11958 * determines the BAR size.
11960 bp->bar0 = pci_ioremap_bar(pdev, 0);
11962 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
11964 goto init_err_release;
11967 bp->bar2 = pci_ioremap_bar(pdev, 4);
11969 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
11971 goto init_err_release;
11974 pci_enable_pcie_error_reporting(pdev);
11976 INIT_WORK(&bp->sp_task, bnxt_sp_task);
11977 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11979 spin_lock_init(&bp->ntp_fltr_lock);
11980 #if BITS_PER_LONG == 32
11981 spin_lock_init(&bp->db_lock);
11984 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11985 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11987 bnxt_init_dflt_coal(bp);
11989 timer_setup(&bp->timer, bnxt_timer, 0);
11990 bp->current_interval = BNXT_TIMER_INTERVAL;
11992 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11993 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11995 clear_bit(BNXT_STATE_OPEN, &bp->state);
11999 bnxt_unmap_bars(bp, pdev);
12000 pci_release_regions(pdev);
12003 pci_disable_device(pdev);
12009 /* rtnl_lock held */
12010 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12012 struct sockaddr *addr = p;
12013 struct bnxt *bp = netdev_priv(dev);
12016 if (!is_valid_ether_addr(addr->sa_data))
12017 return -EADDRNOTAVAIL;
12019 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12022 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12026 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12027 if (netif_running(dev)) {
12028 bnxt_close_nic(bp, false, false);
12029 rc = bnxt_open_nic(bp, false, false);
12035 /* rtnl_lock held */
12036 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12038 struct bnxt *bp = netdev_priv(dev);
12040 if (netif_running(dev))
12041 bnxt_close_nic(bp, true, false);
12043 dev->mtu = new_mtu;
12044 bnxt_set_ring_params(bp);
12046 if (netif_running(dev))
12047 return bnxt_open_nic(bp, true, false);
12052 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12054 struct bnxt *bp = netdev_priv(dev);
12058 if (tc > bp->max_tc) {
12059 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12064 if (netdev_get_num_tc(dev) == tc)
12067 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12070 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12071 sh, tc, bp->tx_nr_rings_xdp);
12075 /* Needs to close the device and do hw resource re-allocations */
12076 if (netif_running(bp->dev))
12077 bnxt_close_nic(bp, true, false);
12080 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12081 netdev_set_num_tc(dev, tc);
12083 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12084 netdev_reset_tc(dev);
12086 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12087 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12088 bp->tx_nr_rings + bp->rx_nr_rings;
12090 if (netif_running(bp->dev))
12091 return bnxt_open_nic(bp, true, false);
12096 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12099 struct bnxt *bp = cb_priv;
12101 if (!bnxt_tc_flower_enabled(bp) ||
12102 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12103 return -EOPNOTSUPP;
12106 case TC_SETUP_CLSFLOWER:
12107 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12109 return -EOPNOTSUPP;
12113 LIST_HEAD(bnxt_block_cb_list);
12115 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12118 struct bnxt *bp = netdev_priv(dev);
12121 case TC_SETUP_BLOCK:
12122 return flow_block_cb_setup_simple(type_data,
12123 &bnxt_block_cb_list,
12124 bnxt_setup_tc_block_cb,
12126 case TC_SETUP_QDISC_MQPRIO: {
12127 struct tc_mqprio_qopt *mqprio = type_data;
12129 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12131 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12134 return -EOPNOTSUPP;
12138 #ifdef CONFIG_RFS_ACCEL
12139 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12140 struct bnxt_ntuple_filter *f2)
12142 struct flow_keys *keys1 = &f1->fkeys;
12143 struct flow_keys *keys2 = &f2->fkeys;
12145 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12146 keys1->basic.ip_proto != keys2->basic.ip_proto)
12149 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12150 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12151 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12154 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12155 sizeof(keys1->addrs.v6addrs.src)) ||
12156 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12157 sizeof(keys1->addrs.v6addrs.dst)))
12161 if (keys1->ports.ports == keys2->ports.ports &&
12162 keys1->control.flags == keys2->control.flags &&
12163 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12164 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12170 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12171 u16 rxq_index, u32 flow_id)
12173 struct bnxt *bp = netdev_priv(dev);
12174 struct bnxt_ntuple_filter *fltr, *new_fltr;
12175 struct flow_keys *fkeys;
12176 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12177 int rc = 0, idx, bit_id, l2_idx = 0;
12178 struct hlist_head *head;
12181 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12182 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12185 netif_addr_lock_bh(dev);
12186 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12187 if (ether_addr_equal(eth->h_dest,
12188 vnic->uc_list + off)) {
12193 netif_addr_unlock_bh(dev);
12197 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12201 fkeys = &new_fltr->fkeys;
12202 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12203 rc = -EPROTONOSUPPORT;
12207 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12208 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12209 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12210 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12211 rc = -EPROTONOSUPPORT;
12214 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12215 bp->hwrm_spec_code < 0x10601) {
12216 rc = -EPROTONOSUPPORT;
12219 flags = fkeys->control.flags;
12220 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12221 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12222 rc = -EPROTONOSUPPORT;
12226 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12227 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12229 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12230 head = &bp->ntp_fltr_hash_tbl[idx];
12232 hlist_for_each_entry_rcu(fltr, head, hash) {
12233 if (bnxt_fltr_match(fltr, new_fltr)) {
12241 spin_lock_bh(&bp->ntp_fltr_lock);
12242 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12243 BNXT_NTP_FLTR_MAX_FLTR, 0);
12245 spin_unlock_bh(&bp->ntp_fltr_lock);
12250 new_fltr->sw_id = (u16)bit_id;
12251 new_fltr->flow_id = flow_id;
12252 new_fltr->l2_fltr_idx = l2_idx;
12253 new_fltr->rxq = rxq_index;
12254 hlist_add_head_rcu(&new_fltr->hash, head);
12255 bp->ntp_fltr_count++;
12256 spin_unlock_bh(&bp->ntp_fltr_lock);
12258 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12259 bnxt_queue_sp_work(bp);
12261 return new_fltr->sw_id;
12268 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12272 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12273 struct hlist_head *head;
12274 struct hlist_node *tmp;
12275 struct bnxt_ntuple_filter *fltr;
12278 head = &bp->ntp_fltr_hash_tbl[i];
12279 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12282 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12283 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12286 bnxt_hwrm_cfa_ntuple_filter_free(bp,
12291 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12296 set_bit(BNXT_FLTR_VALID, &fltr->state);
12300 spin_lock_bh(&bp->ntp_fltr_lock);
12301 hlist_del_rcu(&fltr->hash);
12302 bp->ntp_fltr_count--;
12303 spin_unlock_bh(&bp->ntp_fltr_lock);
12305 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12310 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12311 netdev_info(bp->dev, "Receive PF driver unload event!\n");
12316 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12320 #endif /* CONFIG_RFS_ACCEL */
12322 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12324 struct bnxt *bp = netdev_priv(netdev);
12325 struct udp_tunnel_info ti;
12328 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12329 if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
12330 bp->vxlan_port = ti.port;
12331 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12333 bp->nge_port = ti.port;
12334 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12338 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12340 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12343 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12344 .sync_table = bnxt_udp_tunnel_sync,
12345 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12346 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12348 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
12349 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12353 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12354 struct net_device *dev, u32 filter_mask,
12357 struct bnxt *bp = netdev_priv(dev);
12359 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12360 nlflags, filter_mask, NULL);
12363 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12364 u16 flags, struct netlink_ext_ack *extack)
12366 struct bnxt *bp = netdev_priv(dev);
12367 struct nlattr *attr, *br_spec;
12370 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12371 return -EOPNOTSUPP;
12373 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12377 nla_for_each_nested(attr, br_spec, rem) {
12380 if (nla_type(attr) != IFLA_BRIDGE_MODE)
12383 if (nla_len(attr) < sizeof(mode))
12386 mode = nla_get_u16(attr);
12387 if (mode == bp->br_mode)
12390 rc = bnxt_hwrm_set_br_mode(bp, mode);
12392 bp->br_mode = mode;
12398 int bnxt_get_port_parent_id(struct net_device *dev,
12399 struct netdev_phys_item_id *ppid)
12401 struct bnxt *bp = netdev_priv(dev);
12403 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12404 return -EOPNOTSUPP;
12406 /* The PF and it's VF-reps only support the switchdev framework */
12407 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12408 return -EOPNOTSUPP;
12410 ppid->id_len = sizeof(bp->dsn);
12411 memcpy(ppid->id, bp->dsn, ppid->id_len);
12416 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12418 struct bnxt *bp = netdev_priv(dev);
12420 return &bp->dl_port;
12423 static const struct net_device_ops bnxt_netdev_ops = {
12424 .ndo_open = bnxt_open,
12425 .ndo_start_xmit = bnxt_start_xmit,
12426 .ndo_stop = bnxt_close,
12427 .ndo_get_stats64 = bnxt_get_stats64,
12428 .ndo_set_rx_mode = bnxt_set_rx_mode,
12429 .ndo_do_ioctl = bnxt_ioctl,
12430 .ndo_validate_addr = eth_validate_addr,
12431 .ndo_set_mac_address = bnxt_change_mac_addr,
12432 .ndo_change_mtu = bnxt_change_mtu,
12433 .ndo_fix_features = bnxt_fix_features,
12434 .ndo_set_features = bnxt_set_features,
12435 .ndo_features_check = bnxt_features_check,
12436 .ndo_tx_timeout = bnxt_tx_timeout,
12437 #ifdef CONFIG_BNXT_SRIOV
12438 .ndo_get_vf_config = bnxt_get_vf_config,
12439 .ndo_set_vf_mac = bnxt_set_vf_mac,
12440 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
12441 .ndo_set_vf_rate = bnxt_set_vf_bw,
12442 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
12443 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
12444 .ndo_set_vf_trust = bnxt_set_vf_trust,
12446 .ndo_setup_tc = bnxt_setup_tc,
12447 #ifdef CONFIG_RFS_ACCEL
12448 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
12450 .ndo_bpf = bnxt_xdp,
12451 .ndo_xdp_xmit = bnxt_xdp_xmit,
12452 .ndo_bridge_getlink = bnxt_bridge_getlink,
12453 .ndo_bridge_setlink = bnxt_bridge_setlink,
12454 .ndo_get_devlink_port = bnxt_get_devlink_port,
12457 static void bnxt_remove_one(struct pci_dev *pdev)
12459 struct net_device *dev = pci_get_drvdata(pdev);
12460 struct bnxt *bp = netdev_priv(dev);
12463 bnxt_sriov_disable(bp);
12466 devlink_port_type_clear(&bp->dl_port);
12467 pci_disable_pcie_error_reporting(pdev);
12468 unregister_netdev(dev);
12469 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12470 /* Flush any pending tasks */
12471 cancel_work_sync(&bp->sp_task);
12472 cancel_delayed_work_sync(&bp->fw_reset_task);
12475 bnxt_dl_fw_reporters_destroy(bp, true);
12476 bnxt_dl_unregister(bp);
12477 bnxt_shutdown_tc(bp);
12479 bnxt_clear_int_mode(bp);
12480 bnxt_hwrm_func_drv_unrgtr(bp);
12481 bnxt_free_hwrm_resources(bp);
12482 bnxt_free_hwrm_short_cmd_req(bp);
12483 bnxt_ethtool_free(bp);
12487 kfree(bp->fw_health);
12488 bp->fw_health = NULL;
12489 bnxt_cleanup_pci(bp);
12490 bnxt_free_ctx_mem(bp);
12493 kfree(bp->rss_indir_tbl);
12494 bp->rss_indir_tbl = NULL;
12495 bnxt_free_port_stats(bp);
12499 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12502 struct bnxt_link_info *link_info = &bp->link_info;
12505 rc = bnxt_hwrm_phy_qcaps(bp);
12507 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12511 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
12512 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
12514 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
12518 rc = bnxt_update_link(bp, false);
12520 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12525 /* Older firmware does not have supported_auto_speeds, so assume
12526 * that all supported speeds can be autonegotiated.
12528 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12529 link_info->support_auto_speeds = link_info->support_speeds;
12531 bnxt_init_ethtool_link_settings(bp);
12535 static int bnxt_get_max_irq(struct pci_dev *pdev)
12539 if (!pdev->msix_cap)
12542 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12543 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12546 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12549 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12550 int max_ring_grps = 0, max_irq;
12552 *max_tx = hw_resc->max_tx_rings;
12553 *max_rx = hw_resc->max_rx_rings;
12554 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12555 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12556 bnxt_get_ulp_msix_num(bp),
12557 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12558 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12559 *max_cp = min_t(int, *max_cp, max_irq);
12560 max_ring_grps = hw_resc->max_hw_ring_grps;
12561 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12565 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12567 if (bp->flags & BNXT_FLAG_CHIP_P5) {
12568 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12569 /* On P5 chips, max_cp output param should be available NQs */
12572 *max_rx = min_t(int, *max_rx, max_ring_grps);
12575 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12579 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
12582 if (!rx || !tx || !cp)
12585 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12588 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12593 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12594 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12595 /* Not enough rings, try disabling agg rings. */
12596 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12597 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12599 /* set BNXT_FLAG_AGG_RINGS back for consistency */
12600 bp->flags |= BNXT_FLAG_AGG_RINGS;
12603 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12604 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12605 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12606 bnxt_set_ring_params(bp);
12609 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12610 int max_cp, max_stat, max_irq;
12612 /* Reserve minimum resources for RoCE */
12613 max_cp = bnxt_get_max_func_cp_rings(bp);
12614 max_stat = bnxt_get_max_func_stat_ctxs(bp);
12615 max_irq = bnxt_get_max_func_irqs(bp);
12616 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12617 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12618 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12621 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12622 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12623 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12624 max_cp = min_t(int, max_cp, max_irq);
12625 max_cp = min_t(int, max_cp, max_stat);
12626 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12633 /* In initial default shared ring setting, each shared ring must have a
12636 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12638 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12639 bp->rx_nr_rings = bp->cp_nr_rings;
12640 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12641 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12644 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12646 int dflt_rings, max_rx_rings, max_tx_rings, rc;
12648 if (!bnxt_can_reserve_rings(bp))
12652 bp->flags |= BNXT_FLAG_SHARED_RINGS;
12653 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12654 /* Reduce default rings on multi-port cards so that total default
12655 * rings do not exceed CPU count.
12657 if (bp->port_count > 1) {
12659 max_t(int, num_online_cpus() / bp->port_count, 1);
12661 dflt_rings = min_t(int, dflt_rings, max_rings);
12663 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12666 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12667 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12669 bnxt_trim_dflt_sh_rings(bp);
12671 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12672 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12674 rc = __bnxt_reserve_rings(bp);
12676 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12677 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12679 bnxt_trim_dflt_sh_rings(bp);
12681 /* Rings may have been trimmed, re-reserve the trimmed rings. */
12682 if (bnxt_need_reserve_rings(bp)) {
12683 rc = __bnxt_reserve_rings(bp);
12685 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12686 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12688 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12693 bp->tx_nr_rings = 0;
12694 bp->rx_nr_rings = 0;
12699 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12703 if (bp->tx_nr_rings)
12706 bnxt_ulp_irq_stop(bp);
12707 bnxt_clear_int_mode(bp);
12708 rc = bnxt_set_dflt_rings(bp, true);
12710 netdev_err(bp->dev, "Not enough rings available.\n");
12711 goto init_dflt_ring_err;
12713 rc = bnxt_init_int_mode(bp);
12715 goto init_dflt_ring_err;
12717 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12718 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12719 bp->flags |= BNXT_FLAG_RFS;
12720 bp->dev->features |= NETIF_F_NTUPLE;
12722 init_dflt_ring_err:
12723 bnxt_ulp_irq_restart(bp, rc);
12727 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12732 bnxt_hwrm_func_qcaps(bp);
12734 if (netif_running(bp->dev))
12735 __bnxt_close_nic(bp, true, false);
12737 bnxt_ulp_irq_stop(bp);
12738 bnxt_clear_int_mode(bp);
12739 rc = bnxt_init_int_mode(bp);
12740 bnxt_ulp_irq_restart(bp, rc);
12742 if (netif_running(bp->dev)) {
12744 dev_close(bp->dev);
12746 rc = bnxt_open_nic(bp, true, false);
12752 static int bnxt_init_mac_addr(struct bnxt *bp)
12757 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12759 #ifdef CONFIG_BNXT_SRIOV
12760 struct bnxt_vf_info *vf = &bp->vf;
12761 bool strict_approval = true;
12763 if (is_valid_ether_addr(vf->mac_addr)) {
12764 /* overwrite netdev dev_addr with admin VF MAC */
12765 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12766 /* Older PF driver or firmware may not approve this
12769 strict_approval = false;
12771 eth_hw_addr_random(bp->dev);
12773 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12779 #define BNXT_VPD_LEN 512
12780 static void bnxt_vpd_read_info(struct bnxt *bp)
12782 struct pci_dev *pdev = bp->pdev;
12783 int i, len, pos, ro_size, size;
12787 vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12791 vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12792 if (vpd_size <= 0) {
12793 netdev_err(bp->dev, "Unable to read VPD\n");
12797 i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA);
12799 netdev_err(bp->dev, "VPD READ-Only not found\n");
12803 ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12804 i += PCI_VPD_LRDT_TAG_SIZE;
12805 if (i + ro_size > vpd_size)
12808 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12809 PCI_VPD_RO_KEYWORD_PARTNO);
12813 len = pci_vpd_info_field_size(&vpd_data[pos]);
12814 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12815 if (len + pos > vpd_size)
12818 size = min(len, BNXT_VPD_FLD_LEN - 1);
12819 memcpy(bp->board_partno, &vpd_data[pos], size);
12822 pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12823 PCI_VPD_RO_KEYWORD_SERIALNO);
12827 len = pci_vpd_info_field_size(&vpd_data[pos]);
12828 pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12829 if (len + pos > vpd_size)
12832 size = min(len, BNXT_VPD_FLD_LEN - 1);
12833 memcpy(bp->board_serialno, &vpd_data[pos], size);
12838 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12840 struct pci_dev *pdev = bp->pdev;
12843 qword = pci_get_dsn(pdev);
12845 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12846 return -EOPNOTSUPP;
12849 put_unaligned_le64(qword, dsn);
12851 bp->flags |= BNXT_FLAG_DSN_VALID;
12855 static int bnxt_map_db_bar(struct bnxt *bp)
12859 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12865 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12867 struct net_device *dev;
12871 if (pci_is_bridge(pdev))
12874 /* Clear any pending DMA transactions from crash kernel
12875 * while loading driver in capture kernel.
12877 if (is_kdump_kernel()) {
12878 pci_clear_master(pdev);
12882 max_irqs = bnxt_get_max_irq(pdev);
12883 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12887 bp = netdev_priv(dev);
12888 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
12889 bnxt_set_max_func_irqs(bp, max_irqs);
12891 if (bnxt_vf_pciid(ent->driver_data))
12892 bp->flags |= BNXT_FLAG_VF;
12894 if (pdev->msix_cap)
12895 bp->flags |= BNXT_FLAG_MSIX_CAP;
12897 rc = bnxt_init_board(pdev, dev);
12899 goto init_err_free;
12901 dev->netdev_ops = &bnxt_netdev_ops;
12902 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12903 dev->ethtool_ops = &bnxt_ethtool_ops;
12904 pci_set_drvdata(pdev, dev);
12906 rc = bnxt_alloc_hwrm_resources(bp);
12908 goto init_err_pci_clean;
12910 mutex_init(&bp->hwrm_cmd_lock);
12911 mutex_init(&bp->link_lock);
12913 rc = bnxt_fw_init_one_p1(bp);
12915 goto init_err_pci_clean;
12918 bnxt_vpd_read_info(bp);
12920 if (BNXT_CHIP_P5(bp)) {
12921 bp->flags |= BNXT_FLAG_CHIP_P5;
12922 if (BNXT_CHIP_SR2(bp))
12923 bp->flags |= BNXT_FLAG_CHIP_SR2;
12926 rc = bnxt_alloc_rss_indir_tbl(bp);
12928 goto init_err_pci_clean;
12930 rc = bnxt_fw_init_one_p2(bp);
12932 goto init_err_pci_clean;
12934 rc = bnxt_map_db_bar(bp);
12936 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
12938 goto init_err_pci_clean;
12941 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12942 NETIF_F_TSO | NETIF_F_TSO6 |
12943 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12944 NETIF_F_GSO_IPXIP4 |
12945 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12946 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
12947 NETIF_F_RXCSUM | NETIF_F_GRO;
12949 if (BNXT_SUPPORTS_TPA(bp))
12950 dev->hw_features |= NETIF_F_LRO;
12952 dev->hw_enc_features =
12953 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12954 NETIF_F_TSO | NETIF_F_TSO6 |
12955 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12956 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12957 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
12958 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
12960 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
12961 NETIF_F_GSO_GRE_CSUM;
12962 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
12963 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12964 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12965 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12966 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
12967 if (BNXT_SUPPORTS_TPA(bp))
12968 dev->hw_features |= NETIF_F_GRO_HW;
12969 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
12970 if (dev->features & NETIF_F_GRO_HW)
12971 dev->features &= ~NETIF_F_LRO;
12972 dev->priv_flags |= IFF_UNICAST_FLT;
12974 #ifdef CONFIG_BNXT_SRIOV
12975 init_waitqueue_head(&bp->sriov_cfg_wait);
12976 mutex_init(&bp->sriov_lock);
12978 if (BNXT_SUPPORTS_TPA(bp)) {
12979 bp->gro_func = bnxt_gro_func_5730x;
12980 if (BNXT_CHIP_P4(bp))
12981 bp->gro_func = bnxt_gro_func_5731x;
12982 else if (BNXT_CHIP_P5(bp))
12983 bp->gro_func = bnxt_gro_func_5750x;
12985 if (!BNXT_CHIP_P4_PLUS(bp))
12986 bp->flags |= BNXT_FLAG_DOUBLE_DB;
12988 rc = bnxt_init_mac_addr(bp);
12990 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
12991 rc = -EADDRNOTAVAIL;
12992 goto init_err_pci_clean;
12996 /* Read the adapter's DSN to use as the eswitch switch_id */
12997 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13000 /* MTU range: 60 - FW defined max */
13001 dev->min_mtu = ETH_ZLEN;
13002 dev->max_mtu = bp->max_mtu;
13004 rc = bnxt_probe_phy(bp, true);
13006 goto init_err_pci_clean;
13008 bnxt_set_rx_skb_mode(bp, false);
13009 bnxt_set_tpa_flags(bp);
13010 bnxt_set_ring_params(bp);
13011 rc = bnxt_set_dflt_rings(bp, true);
13013 netdev_err(bp->dev, "Not enough rings available.\n");
13015 goto init_err_pci_clean;
13018 bnxt_fw_init_one_p3(bp);
13020 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13021 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13023 rc = bnxt_init_int_mode(bp);
13025 goto init_err_pci_clean;
13027 /* No TC has been set yet and rings may have been trimmed due to
13028 * limited MSIX, so we re-initialize the TX rings per TC.
13030 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13035 create_singlethread_workqueue("bnxt_pf_wq");
13037 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13039 goto init_err_pci_clean;
13042 rc = bnxt_init_tc(bp);
13044 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13048 bnxt_inv_fw_health_reg(bp);
13049 bnxt_dl_register(bp);
13051 rc = register_netdev(dev);
13053 goto init_err_cleanup;
13056 devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13057 bnxt_dl_fw_reporters_create(bp);
13059 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
13060 board_info[ent->driver_data].name,
13061 (long)pci_resource_start(pdev, 0), dev->dev_addr);
13062 pcie_print_link_status(pdev);
13064 pci_save_state(pdev);
13068 bnxt_dl_unregister(bp);
13069 bnxt_shutdown_tc(bp);
13070 bnxt_clear_int_mode(bp);
13072 init_err_pci_clean:
13073 bnxt_hwrm_func_drv_unrgtr(bp);
13074 bnxt_free_hwrm_short_cmd_req(bp);
13075 bnxt_free_hwrm_resources(bp);
13076 kfree(bp->fw_health);
13077 bp->fw_health = NULL;
13078 bnxt_cleanup_pci(bp);
13079 bnxt_free_ctx_mem(bp);
13082 kfree(bp->rss_indir_tbl);
13083 bp->rss_indir_tbl = NULL;
13090 static void bnxt_shutdown(struct pci_dev *pdev)
13092 struct net_device *dev = pci_get_drvdata(pdev);
13099 bp = netdev_priv(dev);
13101 goto shutdown_exit;
13103 if (netif_running(dev))
13106 bnxt_ulp_shutdown(bp);
13107 bnxt_clear_int_mode(bp);
13108 pci_disable_device(pdev);
13110 if (system_state == SYSTEM_POWER_OFF) {
13111 pci_wake_from_d3(pdev, bp->wol);
13112 pci_set_power_state(pdev, PCI_D3hot);
13119 #ifdef CONFIG_PM_SLEEP
13120 static int bnxt_suspend(struct device *device)
13122 struct net_device *dev = dev_get_drvdata(device);
13123 struct bnxt *bp = netdev_priv(dev);
13128 if (netif_running(dev)) {
13129 netif_device_detach(dev);
13130 rc = bnxt_close(dev);
13132 bnxt_hwrm_func_drv_unrgtr(bp);
13133 pci_disable_device(bp->pdev);
13134 bnxt_free_ctx_mem(bp);
13141 static int bnxt_resume(struct device *device)
13143 struct net_device *dev = dev_get_drvdata(device);
13144 struct bnxt *bp = netdev_priv(dev);
13148 rc = pci_enable_device(bp->pdev);
13150 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13154 pci_set_master(bp->pdev);
13155 if (bnxt_hwrm_ver_get(bp)) {
13159 rc = bnxt_hwrm_func_reset(bp);
13165 rc = bnxt_hwrm_func_qcaps(bp);
13169 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13174 bnxt_get_wol_settings(bp);
13175 if (netif_running(dev)) {
13176 rc = bnxt_open(dev);
13178 netif_device_attach(dev);
13182 bnxt_ulp_start(bp, rc);
13184 bnxt_reenable_sriov(bp);
13189 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13190 #define BNXT_PM_OPS (&bnxt_pm_ops)
13194 #define BNXT_PM_OPS NULL
13196 #endif /* CONFIG_PM_SLEEP */
13199 * bnxt_io_error_detected - called when PCI error is detected
13200 * @pdev: Pointer to PCI device
13201 * @state: The current pci connection state
13203 * This function is called after a PCI bus error affecting
13204 * this device has been detected.
13206 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13207 pci_channel_state_t state)
13209 struct net_device *netdev = pci_get_drvdata(pdev);
13210 struct bnxt *bp = netdev_priv(netdev);
13212 netdev_info(netdev, "PCI I/O error detected\n");
13215 netif_device_detach(netdev);
13219 if (state == pci_channel_io_perm_failure) {
13221 return PCI_ERS_RESULT_DISCONNECT;
13224 if (state == pci_channel_io_frozen)
13225 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13227 if (netif_running(netdev))
13228 bnxt_close(netdev);
13230 pci_disable_device(pdev);
13231 bnxt_free_ctx_mem(bp);
13236 /* Request a slot slot reset. */
13237 return PCI_ERS_RESULT_NEED_RESET;
13241 * bnxt_io_slot_reset - called after the pci bus has been reset.
13242 * @pdev: Pointer to PCI device
13244 * Restart the card from scratch, as if from a cold-boot.
13245 * At this point, the card has exprienced a hard reset,
13246 * followed by fixups by BIOS, and has its config space
13247 * set up identically to what it was at cold boot.
13249 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13251 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13252 struct net_device *netdev = pci_get_drvdata(pdev);
13253 struct bnxt *bp = netdev_priv(netdev);
13256 netdev_info(bp->dev, "PCI Slot Reset\n");
13260 if (pci_enable_device(pdev)) {
13261 dev_err(&pdev->dev,
13262 "Cannot re-enable PCI device after reset.\n");
13264 pci_set_master(pdev);
13265 /* Upon fatal error, our device internal logic that latches to
13266 * BAR value is getting reset and will restore only upon
13267 * rewritting the BARs.
13269 * As pci_restore_state() does not re-write the BARs if the
13270 * value is same as saved value earlier, driver needs to
13271 * write the BARs to 0 to force restore, in case of fatal error.
13273 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13275 for (off = PCI_BASE_ADDRESS_0;
13276 off <= PCI_BASE_ADDRESS_5; off += 4)
13277 pci_write_config_dword(bp->pdev, off, 0);
13279 pci_restore_state(pdev);
13280 pci_save_state(pdev);
13282 err = bnxt_hwrm_func_reset(bp);
13284 result = PCI_ERS_RESULT_RECOVERED;
13293 * bnxt_io_resume - called when traffic can start flowing again.
13294 * @pdev: Pointer to PCI device
13296 * This callback is called when the error recovery driver tells
13297 * us that its OK to resume normal operation.
13299 static void bnxt_io_resume(struct pci_dev *pdev)
13301 struct net_device *netdev = pci_get_drvdata(pdev);
13302 struct bnxt *bp = netdev_priv(netdev);
13305 netdev_info(bp->dev, "PCI Slot Resume\n");
13308 err = bnxt_hwrm_func_qcaps(bp);
13309 if (!err && netif_running(netdev))
13310 err = bnxt_open(netdev);
13312 bnxt_ulp_start(bp, err);
13314 bnxt_reenable_sriov(bp);
13315 netif_device_attach(netdev);
13321 static const struct pci_error_handlers bnxt_err_handler = {
13322 .error_detected = bnxt_io_error_detected,
13323 .slot_reset = bnxt_io_slot_reset,
13324 .resume = bnxt_io_resume
13327 static struct pci_driver bnxt_pci_driver = {
13328 .name = DRV_MODULE_NAME,
13329 .id_table = bnxt_pci_tbl,
13330 .probe = bnxt_init_one,
13331 .remove = bnxt_remove_one,
13332 .shutdown = bnxt_shutdown,
13333 .driver.pm = BNXT_PM_OPS,
13334 .err_handler = &bnxt_err_handler,
13335 #if defined(CONFIG_BNXT_SRIOV)
13336 .sriov_configure = bnxt_sriov_configure,
13340 static int __init bnxt_init(void)
13343 return pci_register_driver(&bnxt_pci_driver);
13346 static void __exit bnxt_exit(void)
13348 pci_unregister_driver(&bnxt_pci_driver);
13350 destroy_workqueue(bnxt_pf_wq);
13354 module_init(bnxt_init);
13355 module_exit(bnxt_exit);