0bb9d7b3a2b622211a2d401fd81dd70f6cff4ea0
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57
58 #include "bnxt_hsi.h"
59 #include "bnxt.h"
60 #include "bnxt_ulp.h"
61 #include "bnxt_sriov.h"
62 #include "bnxt_ethtool.h"
63 #include "bnxt_dcb.h"
64 #include "bnxt_xdp.h"
65 #include "bnxt_vfr.h"
66 #include "bnxt_tc.h"
67 #include "bnxt_devlink.h"
68 #include "bnxt_debugfs.h"
69
70 #define BNXT_TX_TIMEOUT         (5 * HZ)
71
72 static const char version[] =
73         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
74
75 MODULE_LICENSE("GPL");
76 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
77 MODULE_VERSION(DRV_MODULE_VERSION);
78
79 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
80 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
81 #define BNXT_RX_COPY_THRESH 256
82
83 #define BNXT_TX_PUSH_THRESH 164
84
85 enum board_idx {
86         BCM57301,
87         BCM57302,
88         BCM57304,
89         BCM57417_NPAR,
90         BCM58700,
91         BCM57311,
92         BCM57312,
93         BCM57402,
94         BCM57404,
95         BCM57406,
96         BCM57402_NPAR,
97         BCM57407,
98         BCM57412,
99         BCM57414,
100         BCM57416,
101         BCM57417,
102         BCM57412_NPAR,
103         BCM57314,
104         BCM57417_SFP,
105         BCM57416_SFP,
106         BCM57404_NPAR,
107         BCM57406_NPAR,
108         BCM57407_SFP,
109         BCM57407_NPAR,
110         BCM57414_NPAR,
111         BCM57416_NPAR,
112         BCM57452,
113         BCM57454,
114         BCM5745x_NPAR,
115         BCM57508,
116         BCM57504,
117         BCM58802,
118         BCM58804,
119         BCM58808,
120         NETXTREME_E_VF,
121         NETXTREME_C_VF,
122         NETXTREME_S_VF,
123         NETXTREME_E_P5_VF,
124 };
125
126 /* indexed by enum above */
127 static const struct {
128         char *name;
129 } board_info[] = {
130         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
131         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
132         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
133         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
134         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
135         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
136         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
137         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
138         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
139         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
140         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
141         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
142         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
143         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
144         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
145         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
146         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
147         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
148         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
149         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
150         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
151         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
152         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
153         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
154         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
155         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
156         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
157         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
158         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
159         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
160         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
161         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
162         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
164         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
165         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
166         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
167         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
168 };
169
170 static const struct pci_device_id bnxt_pci_tbl[] = {
171         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
174         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
175         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
176         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
177         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
178         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
179         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
180         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
181         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
182         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
183         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
184         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
185         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
186         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
187         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
188         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
189         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
190         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
191         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
192         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
193         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
194         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
195         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
196         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
197         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
198         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
199         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
200         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
202         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
203         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
204         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
205         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
206         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
207         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
208         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
209         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
210 #ifdef CONFIG_BNXT_SRIOV
211         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
212         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
213         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
214         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
215         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
216         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
217         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
218         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
219         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
220         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
221 #endif
222         { 0 }
223 };
224
225 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
226
227 static const u16 bnxt_vf_req_snif[] = {
228         HWRM_FUNC_CFG,
229         HWRM_FUNC_VF_CFG,
230         HWRM_PORT_PHY_QCFG,
231         HWRM_CFA_L2_FILTER_ALLOC,
232 };
233
234 static const u16 bnxt_async_events_arr[] = {
235         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
236         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
237         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
238         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
239         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
240 };
241
242 static struct workqueue_struct *bnxt_pf_wq;
243
244 static bool bnxt_vf_pciid(enum board_idx idx)
245 {
246         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
247                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
248 }
249
250 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
251 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
253
254 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
255                 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
257 #define BNXT_DB_CQ(db, idx)                                             \
258         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260 #define BNXT_DB_NQ_P5(db, idx)                                          \
261         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
262
263 #define BNXT_DB_CQ_ARM(db, idx)                                         \
264         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
265
266 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
267         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
268
269 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
270 {
271         if (bp->flags & BNXT_FLAG_CHIP_P5)
272                 BNXT_DB_NQ_P5(db, idx);
273         else
274                 BNXT_DB_CQ(db, idx);
275 }
276
277 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
278 {
279         if (bp->flags & BNXT_FLAG_CHIP_P5)
280                 BNXT_DB_NQ_ARM_P5(db, idx);
281         else
282                 BNXT_DB_CQ_ARM(db, idx);
283 }
284
285 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
286 {
287         if (bp->flags & BNXT_FLAG_CHIP_P5)
288                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
289                        db->doorbell);
290         else
291                 BNXT_DB_CQ(db, idx);
292 }
293
294 const u16 bnxt_lhint_arr[] = {
295         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
296         TX_BD_FLAGS_LHINT_512_TO_1023,
297         TX_BD_FLAGS_LHINT_1024_TO_2047,
298         TX_BD_FLAGS_LHINT_1024_TO_2047,
299         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
308         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
309         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
310         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
311         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
312         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
313         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
314 };
315
316 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
317 {
318         struct metadata_dst *md_dst = skb_metadata_dst(skb);
319
320         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
321                 return 0;
322
323         return md_dst->u.port_info.port_id;
324 }
325
326 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
327 {
328         struct bnxt *bp = netdev_priv(dev);
329         struct tx_bd *txbd;
330         struct tx_bd_ext *txbd1;
331         struct netdev_queue *txq;
332         int i;
333         dma_addr_t mapping;
334         unsigned int length, pad = 0;
335         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
336         u16 prod, last_frag;
337         struct pci_dev *pdev = bp->pdev;
338         struct bnxt_tx_ring_info *txr;
339         struct bnxt_sw_tx_bd *tx_buf;
340
341         i = skb_get_queue_mapping(skb);
342         if (unlikely(i >= bp->tx_nr_rings)) {
343                 dev_kfree_skb_any(skb);
344                 return NETDEV_TX_OK;
345         }
346
347         txq = netdev_get_tx_queue(dev, i);
348         txr = &bp->tx_ring[bp->tx_ring_map[i]];
349         prod = txr->tx_prod;
350
351         free_size = bnxt_tx_avail(bp, txr);
352         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
353                 netif_tx_stop_queue(txq);
354                 return NETDEV_TX_BUSY;
355         }
356
357         length = skb->len;
358         len = skb_headlen(skb);
359         last_frag = skb_shinfo(skb)->nr_frags;
360
361         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
362
363         txbd->tx_bd_opaque = prod;
364
365         tx_buf = &txr->tx_buf_ring[prod];
366         tx_buf->skb = skb;
367         tx_buf->nr_frags = last_frag;
368
369         vlan_tag_flags = 0;
370         cfa_action = bnxt_xmit_get_cfa_action(skb);
371         if (skb_vlan_tag_present(skb)) {
372                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
373                                  skb_vlan_tag_get(skb);
374                 /* Currently supports 8021Q, 8021AD vlan offloads
375                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
376                  */
377                 if (skb->vlan_proto == htons(ETH_P_8021Q))
378                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
379         }
380
381         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
382                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
383                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
384                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
385                 void __iomem *db = txr->tx_db.doorbell;
386                 void *pdata = tx_push_buf->data;
387                 u64 *end;
388                 int j, push_len;
389
390                 /* Set COAL_NOW to be ready quickly for the next push */
391                 tx_push->tx_bd_len_flags_type =
392                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
393                                         TX_BD_TYPE_LONG_TX_BD |
394                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
395                                         TX_BD_FLAGS_COAL_NOW |
396                                         TX_BD_FLAGS_PACKET_END |
397                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
398
399                 if (skb->ip_summed == CHECKSUM_PARTIAL)
400                         tx_push1->tx_bd_hsize_lflags =
401                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
402                 else
403                         tx_push1->tx_bd_hsize_lflags = 0;
404
405                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
406                 tx_push1->tx_bd_cfa_action =
407                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
408
409                 end = pdata + length;
410                 end = PTR_ALIGN(end, 8) - 1;
411                 *end = 0;
412
413                 skb_copy_from_linear_data(skb, pdata, len);
414                 pdata += len;
415                 for (j = 0; j < last_frag; j++) {
416                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
417                         void *fptr;
418
419                         fptr = skb_frag_address_safe(frag);
420                         if (!fptr)
421                                 goto normal_tx;
422
423                         memcpy(pdata, fptr, skb_frag_size(frag));
424                         pdata += skb_frag_size(frag);
425                 }
426
427                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
428                 txbd->tx_bd_haddr = txr->data_mapping;
429                 prod = NEXT_TX(prod);
430                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
431                 memcpy(txbd, tx_push1, sizeof(*txbd));
432                 prod = NEXT_TX(prod);
433                 tx_push->doorbell =
434                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
435                 txr->tx_prod = prod;
436
437                 tx_buf->is_push = 1;
438                 netdev_tx_sent_queue(txq, skb->len);
439                 wmb();  /* Sync is_push and byte queue before pushing data */
440
441                 push_len = (length + sizeof(*tx_push) + 7) / 8;
442                 if (push_len > 16) {
443                         __iowrite64_copy(db, tx_push_buf, 16);
444                         __iowrite32_copy(db + 4, tx_push_buf + 1,
445                                          (push_len - 16) << 1);
446                 } else {
447                         __iowrite64_copy(db, tx_push_buf, push_len);
448                 }
449
450                 goto tx_done;
451         }
452
453 normal_tx:
454         if (length < BNXT_MIN_PKT_SIZE) {
455                 pad = BNXT_MIN_PKT_SIZE - length;
456                 if (skb_pad(skb, pad)) {
457                         /* SKB already freed. */
458                         tx_buf->skb = NULL;
459                         return NETDEV_TX_OK;
460                 }
461                 length = BNXT_MIN_PKT_SIZE;
462         }
463
464         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
465
466         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
467                 dev_kfree_skb_any(skb);
468                 tx_buf->skb = NULL;
469                 return NETDEV_TX_OK;
470         }
471
472         dma_unmap_addr_set(tx_buf, mapping, mapping);
473         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
474                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
475
476         txbd->tx_bd_haddr = cpu_to_le64(mapping);
477
478         prod = NEXT_TX(prod);
479         txbd1 = (struct tx_bd_ext *)
480                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
481
482         txbd1->tx_bd_hsize_lflags = 0;
483         if (skb_is_gso(skb)) {
484                 u32 hdr_len;
485
486                 if (skb->encapsulation)
487                         hdr_len = skb_inner_network_offset(skb) +
488                                 skb_inner_network_header_len(skb) +
489                                 inner_tcp_hdrlen(skb);
490                 else
491                         hdr_len = skb_transport_offset(skb) +
492                                 tcp_hdrlen(skb);
493
494                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
495                                         TX_BD_FLAGS_T_IPID |
496                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
497                 length = skb_shinfo(skb)->gso_size;
498                 txbd1->tx_bd_mss = cpu_to_le32(length);
499                 length += hdr_len;
500         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
501                 txbd1->tx_bd_hsize_lflags =
502                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
503                 txbd1->tx_bd_mss = 0;
504         }
505
506         length >>= 9;
507         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
508                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
509                                      skb->len);
510                 i = 0;
511                 goto tx_dma_error;
512         }
513         flags |= bnxt_lhint_arr[length];
514         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
515
516         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
517         txbd1->tx_bd_cfa_action =
518                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
519         for (i = 0; i < last_frag; i++) {
520                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
521
522                 prod = NEXT_TX(prod);
523                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
524
525                 len = skb_frag_size(frag);
526                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
527                                            DMA_TO_DEVICE);
528
529                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
530                         goto tx_dma_error;
531
532                 tx_buf = &txr->tx_buf_ring[prod];
533                 dma_unmap_addr_set(tx_buf, mapping, mapping);
534
535                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
536
537                 flags = len << TX_BD_LEN_SHIFT;
538                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
539         }
540
541         flags &= ~TX_BD_LEN;
542         txbd->tx_bd_len_flags_type =
543                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
544                             TX_BD_FLAGS_PACKET_END);
545
546         netdev_tx_sent_queue(txq, skb->len);
547
548         /* Sync BD data before updating doorbell */
549         wmb();
550
551         prod = NEXT_TX(prod);
552         txr->tx_prod = prod;
553
554         if (!skb->xmit_more || netif_xmit_stopped(txq))
555                 bnxt_db_write(bp, &txr->tx_db, prod);
556
557 tx_done:
558
559         mmiowb();
560
561         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
562                 if (skb->xmit_more && !tx_buf->is_push)
563                         bnxt_db_write(bp, &txr->tx_db, prod);
564
565                 netif_tx_stop_queue(txq);
566
567                 /* netif_tx_stop_queue() must be done before checking
568                  * tx index in bnxt_tx_avail() below, because in
569                  * bnxt_tx_int(), we update tx index before checking for
570                  * netif_tx_queue_stopped().
571                  */
572                 smp_mb();
573                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
574                         netif_tx_wake_queue(txq);
575         }
576         return NETDEV_TX_OK;
577
578 tx_dma_error:
579         last_frag = i;
580
581         /* start back at beginning and unmap skb */
582         prod = txr->tx_prod;
583         tx_buf = &txr->tx_buf_ring[prod];
584         tx_buf->skb = NULL;
585         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
586                          skb_headlen(skb), PCI_DMA_TODEVICE);
587         prod = NEXT_TX(prod);
588
589         /* unmap remaining mapped pages */
590         for (i = 0; i < last_frag; i++) {
591                 prod = NEXT_TX(prod);
592                 tx_buf = &txr->tx_buf_ring[prod];
593                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
594                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
595                                PCI_DMA_TODEVICE);
596         }
597
598         dev_kfree_skb_any(skb);
599         return NETDEV_TX_OK;
600 }
601
602 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
603 {
604         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
605         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
606         u16 cons = txr->tx_cons;
607         struct pci_dev *pdev = bp->pdev;
608         int i;
609         unsigned int tx_bytes = 0;
610
611         for (i = 0; i < nr_pkts; i++) {
612                 struct bnxt_sw_tx_bd *tx_buf;
613                 struct sk_buff *skb;
614                 int j, last;
615
616                 tx_buf = &txr->tx_buf_ring[cons];
617                 cons = NEXT_TX(cons);
618                 skb = tx_buf->skb;
619                 tx_buf->skb = NULL;
620
621                 if (tx_buf->is_push) {
622                         tx_buf->is_push = 0;
623                         goto next_tx_int;
624                 }
625
626                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
627                                  skb_headlen(skb), PCI_DMA_TODEVICE);
628                 last = tx_buf->nr_frags;
629
630                 for (j = 0; j < last; j++) {
631                         cons = NEXT_TX(cons);
632                         tx_buf = &txr->tx_buf_ring[cons];
633                         dma_unmap_page(
634                                 &pdev->dev,
635                                 dma_unmap_addr(tx_buf, mapping),
636                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
637                                 PCI_DMA_TODEVICE);
638                 }
639
640 next_tx_int:
641                 cons = NEXT_TX(cons);
642
643                 tx_bytes += skb->len;
644                 dev_kfree_skb_any(skb);
645         }
646
647         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
648         txr->tx_cons = cons;
649
650         /* Need to make the tx_cons update visible to bnxt_start_xmit()
651          * before checking for netif_tx_queue_stopped().  Without the
652          * memory barrier, there is a small possibility that bnxt_start_xmit()
653          * will miss it and cause the queue to be stopped forever.
654          */
655         smp_mb();
656
657         if (unlikely(netif_tx_queue_stopped(txq)) &&
658             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
659                 __netif_tx_lock(txq, smp_processor_id());
660                 if (netif_tx_queue_stopped(txq) &&
661                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
662                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
663                         netif_tx_wake_queue(txq);
664                 __netif_tx_unlock(txq);
665         }
666 }
667
668 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
669                                          gfp_t gfp)
670 {
671         struct device *dev = &bp->pdev->dev;
672         struct page *page;
673
674         page = alloc_page(gfp);
675         if (!page)
676                 return NULL;
677
678         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
679                                       DMA_ATTR_WEAK_ORDERING);
680         if (dma_mapping_error(dev, *mapping)) {
681                 __free_page(page);
682                 return NULL;
683         }
684         *mapping += bp->rx_dma_offset;
685         return page;
686 }
687
688 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
689                                        gfp_t gfp)
690 {
691         u8 *data;
692         struct pci_dev *pdev = bp->pdev;
693
694         data = kmalloc(bp->rx_buf_size, gfp);
695         if (!data)
696                 return NULL;
697
698         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
699                                         bp->rx_buf_use_size, bp->rx_dir,
700                                         DMA_ATTR_WEAK_ORDERING);
701
702         if (dma_mapping_error(&pdev->dev, *mapping)) {
703                 kfree(data);
704                 data = NULL;
705         }
706         return data;
707 }
708
709 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
710                        u16 prod, gfp_t gfp)
711 {
712         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
713         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
714         dma_addr_t mapping;
715
716         if (BNXT_RX_PAGE_MODE(bp)) {
717                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
718
719                 if (!page)
720                         return -ENOMEM;
721
722                 rx_buf->data = page;
723                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
724         } else {
725                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
726
727                 if (!data)
728                         return -ENOMEM;
729
730                 rx_buf->data = data;
731                 rx_buf->data_ptr = data + bp->rx_offset;
732         }
733         rx_buf->mapping = mapping;
734
735         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
736         return 0;
737 }
738
739 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
740 {
741         u16 prod = rxr->rx_prod;
742         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
743         struct rx_bd *cons_bd, *prod_bd;
744
745         prod_rx_buf = &rxr->rx_buf_ring[prod];
746         cons_rx_buf = &rxr->rx_buf_ring[cons];
747
748         prod_rx_buf->data = data;
749         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
750
751         prod_rx_buf->mapping = cons_rx_buf->mapping;
752
753         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
754         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
755
756         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
757 }
758
759 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
760 {
761         u16 next, max = rxr->rx_agg_bmap_size;
762
763         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
764         if (next >= max)
765                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
766         return next;
767 }
768
769 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
770                                      struct bnxt_rx_ring_info *rxr,
771                                      u16 prod, gfp_t gfp)
772 {
773         struct rx_bd *rxbd =
774                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
775         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
776         struct pci_dev *pdev = bp->pdev;
777         struct page *page;
778         dma_addr_t mapping;
779         u16 sw_prod = rxr->rx_sw_agg_prod;
780         unsigned int offset = 0;
781
782         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
783                 page = rxr->rx_page;
784                 if (!page) {
785                         page = alloc_page(gfp);
786                         if (!page)
787                                 return -ENOMEM;
788                         rxr->rx_page = page;
789                         rxr->rx_page_offset = 0;
790                 }
791                 offset = rxr->rx_page_offset;
792                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
793                 if (rxr->rx_page_offset == PAGE_SIZE)
794                         rxr->rx_page = NULL;
795                 else
796                         get_page(page);
797         } else {
798                 page = alloc_page(gfp);
799                 if (!page)
800                         return -ENOMEM;
801         }
802
803         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
804                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
805                                      DMA_ATTR_WEAK_ORDERING);
806         if (dma_mapping_error(&pdev->dev, mapping)) {
807                 __free_page(page);
808                 return -EIO;
809         }
810
811         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
812                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
813
814         __set_bit(sw_prod, rxr->rx_agg_bmap);
815         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
816         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
817
818         rx_agg_buf->page = page;
819         rx_agg_buf->offset = offset;
820         rx_agg_buf->mapping = mapping;
821         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
822         rxbd->rx_bd_opaque = sw_prod;
823         return 0;
824 }
825
826 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
827                                    u32 agg_bufs)
828 {
829         struct bnxt_napi *bnapi = cpr->bnapi;
830         struct bnxt *bp = bnapi->bp;
831         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
832         u16 prod = rxr->rx_agg_prod;
833         u16 sw_prod = rxr->rx_sw_agg_prod;
834         u32 i;
835
836         for (i = 0; i < agg_bufs; i++) {
837                 u16 cons;
838                 struct rx_agg_cmp *agg;
839                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
840                 struct rx_bd *prod_bd;
841                 struct page *page;
842
843                 agg = (struct rx_agg_cmp *)
844                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
845                 cons = agg->rx_agg_cmp_opaque;
846                 __clear_bit(cons, rxr->rx_agg_bmap);
847
848                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
849                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
850
851                 __set_bit(sw_prod, rxr->rx_agg_bmap);
852                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
853                 cons_rx_buf = &rxr->rx_agg_ring[cons];
854
855                 /* It is possible for sw_prod to be equal to cons, so
856                  * set cons_rx_buf->page to NULL first.
857                  */
858                 page = cons_rx_buf->page;
859                 cons_rx_buf->page = NULL;
860                 prod_rx_buf->page = page;
861                 prod_rx_buf->offset = cons_rx_buf->offset;
862
863                 prod_rx_buf->mapping = cons_rx_buf->mapping;
864
865                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
866
867                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
868                 prod_bd->rx_bd_opaque = sw_prod;
869
870                 prod = NEXT_RX_AGG(prod);
871                 sw_prod = NEXT_RX_AGG(sw_prod);
872                 cp_cons = NEXT_CMP(cp_cons);
873         }
874         rxr->rx_agg_prod = prod;
875         rxr->rx_sw_agg_prod = sw_prod;
876 }
877
878 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
879                                         struct bnxt_rx_ring_info *rxr,
880                                         u16 cons, void *data, u8 *data_ptr,
881                                         dma_addr_t dma_addr,
882                                         unsigned int offset_and_len)
883 {
884         unsigned int payload = offset_and_len >> 16;
885         unsigned int len = offset_and_len & 0xffff;
886         struct skb_frag_struct *frag;
887         struct page *page = data;
888         u16 prod = rxr->rx_prod;
889         struct sk_buff *skb;
890         int off, err;
891
892         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
893         if (unlikely(err)) {
894                 bnxt_reuse_rx_data(rxr, cons, data);
895                 return NULL;
896         }
897         dma_addr -= bp->rx_dma_offset;
898         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
899                              DMA_ATTR_WEAK_ORDERING);
900
901         if (unlikely(!payload))
902                 payload = eth_get_headlen(data_ptr, len);
903
904         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
905         if (!skb) {
906                 __free_page(page);
907                 return NULL;
908         }
909
910         off = (void *)data_ptr - page_address(page);
911         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
912         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
913                payload + NET_IP_ALIGN);
914
915         frag = &skb_shinfo(skb)->frags[0];
916         skb_frag_size_sub(frag, payload);
917         frag->page_offset += payload;
918         skb->data_len -= payload;
919         skb->tail += payload;
920
921         return skb;
922 }
923
924 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
925                                    struct bnxt_rx_ring_info *rxr, u16 cons,
926                                    void *data, u8 *data_ptr,
927                                    dma_addr_t dma_addr,
928                                    unsigned int offset_and_len)
929 {
930         u16 prod = rxr->rx_prod;
931         struct sk_buff *skb;
932         int err;
933
934         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
935         if (unlikely(err)) {
936                 bnxt_reuse_rx_data(rxr, cons, data);
937                 return NULL;
938         }
939
940         skb = build_skb(data, 0);
941         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
942                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
943         if (!skb) {
944                 kfree(data);
945                 return NULL;
946         }
947
948         skb_reserve(skb, bp->rx_offset);
949         skb_put(skb, offset_and_len & 0xffff);
950         return skb;
951 }
952
953 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
954                                      struct bnxt_cp_ring_info *cpr,
955                                      struct sk_buff *skb, u16 cp_cons,
956                                      u32 agg_bufs)
957 {
958         struct bnxt_napi *bnapi = cpr->bnapi;
959         struct pci_dev *pdev = bp->pdev;
960         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
961         u16 prod = rxr->rx_agg_prod;
962         u32 i;
963
964         for (i = 0; i < agg_bufs; i++) {
965                 u16 cons, frag_len;
966                 struct rx_agg_cmp *agg;
967                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
968                 struct page *page;
969                 dma_addr_t mapping;
970
971                 agg = (struct rx_agg_cmp *)
972                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
973                 cons = agg->rx_agg_cmp_opaque;
974                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
975                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
976
977                 cons_rx_buf = &rxr->rx_agg_ring[cons];
978                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
979                                    cons_rx_buf->offset, frag_len);
980                 __clear_bit(cons, rxr->rx_agg_bmap);
981
982                 /* It is possible for bnxt_alloc_rx_page() to allocate
983                  * a sw_prod index that equals the cons index, so we
984                  * need to clear the cons entry now.
985                  */
986                 mapping = cons_rx_buf->mapping;
987                 page = cons_rx_buf->page;
988                 cons_rx_buf->page = NULL;
989
990                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
991                         struct skb_shared_info *shinfo;
992                         unsigned int nr_frags;
993
994                         shinfo = skb_shinfo(skb);
995                         nr_frags = --shinfo->nr_frags;
996                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
997
998                         dev_kfree_skb(skb);
999
1000                         cons_rx_buf->page = page;
1001
1002                         /* Update prod since possibly some pages have been
1003                          * allocated already.
1004                          */
1005                         rxr->rx_agg_prod = prod;
1006                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
1007                         return NULL;
1008                 }
1009
1010                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1011                                      PCI_DMA_FROMDEVICE,
1012                                      DMA_ATTR_WEAK_ORDERING);
1013
1014                 skb->data_len += frag_len;
1015                 skb->len += frag_len;
1016                 skb->truesize += PAGE_SIZE;
1017
1018                 prod = NEXT_RX_AGG(prod);
1019                 cp_cons = NEXT_CMP(cp_cons);
1020         }
1021         rxr->rx_agg_prod = prod;
1022         return skb;
1023 }
1024
1025 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1026                                u8 agg_bufs, u32 *raw_cons)
1027 {
1028         u16 last;
1029         struct rx_agg_cmp *agg;
1030
1031         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1032         last = RING_CMP(*raw_cons);
1033         agg = (struct rx_agg_cmp *)
1034                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1035         return RX_AGG_CMP_VALID(agg, *raw_cons);
1036 }
1037
1038 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1039                                             unsigned int len,
1040                                             dma_addr_t mapping)
1041 {
1042         struct bnxt *bp = bnapi->bp;
1043         struct pci_dev *pdev = bp->pdev;
1044         struct sk_buff *skb;
1045
1046         skb = napi_alloc_skb(&bnapi->napi, len);
1047         if (!skb)
1048                 return NULL;
1049
1050         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1051                                 bp->rx_dir);
1052
1053         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1054                len + NET_IP_ALIGN);
1055
1056         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1057                                    bp->rx_dir);
1058
1059         skb_put(skb, len);
1060         return skb;
1061 }
1062
1063 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1064                            u32 *raw_cons, void *cmp)
1065 {
1066         struct rx_cmp *rxcmp = cmp;
1067         u32 tmp_raw_cons = *raw_cons;
1068         u8 cmp_type, agg_bufs = 0;
1069
1070         cmp_type = RX_CMP_TYPE(rxcmp);
1071
1072         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1073                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1074                             RX_CMP_AGG_BUFS) >>
1075                            RX_CMP_AGG_BUFS_SHIFT;
1076         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1077                 struct rx_tpa_end_cmp *tpa_end = cmp;
1078
1079                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1080                             RX_TPA_END_CMP_AGG_BUFS) >>
1081                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1082         }
1083
1084         if (agg_bufs) {
1085                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1086                         return -EBUSY;
1087         }
1088         *raw_cons = tmp_raw_cons;
1089         return 0;
1090 }
1091
1092 static void bnxt_queue_sp_work(struct bnxt *bp)
1093 {
1094         if (BNXT_PF(bp))
1095                 queue_work(bnxt_pf_wq, &bp->sp_task);
1096         else
1097                 schedule_work(&bp->sp_task);
1098 }
1099
1100 static void bnxt_cancel_sp_work(struct bnxt *bp)
1101 {
1102         if (BNXT_PF(bp))
1103                 flush_workqueue(bnxt_pf_wq);
1104         else
1105                 cancel_work_sync(&bp->sp_task);
1106 }
1107
1108 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1109 {
1110         if (!rxr->bnapi->in_reset) {
1111                 rxr->bnapi->in_reset = true;
1112                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1113                 bnxt_queue_sp_work(bp);
1114         }
1115         rxr->rx_next_cons = 0xffff;
1116 }
1117
1118 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1119                            struct rx_tpa_start_cmp *tpa_start,
1120                            struct rx_tpa_start_cmp_ext *tpa_start1)
1121 {
1122         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1123         u16 cons, prod;
1124         struct bnxt_tpa_info *tpa_info;
1125         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1126         struct rx_bd *prod_bd;
1127         dma_addr_t mapping;
1128
1129         cons = tpa_start->rx_tpa_start_cmp_opaque;
1130         prod = rxr->rx_prod;
1131         cons_rx_buf = &rxr->rx_buf_ring[cons];
1132         prod_rx_buf = &rxr->rx_buf_ring[prod];
1133         tpa_info = &rxr->rx_tpa[agg_id];
1134
1135         if (unlikely(cons != rxr->rx_next_cons)) {
1136                 bnxt_sched_reset(bp, rxr);
1137                 return;
1138         }
1139         /* Store cfa_code in tpa_info to use in tpa_end
1140          * completion processing.
1141          */
1142         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1143         prod_rx_buf->data = tpa_info->data;
1144         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1145
1146         mapping = tpa_info->mapping;
1147         prod_rx_buf->mapping = mapping;
1148
1149         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1150
1151         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1152
1153         tpa_info->data = cons_rx_buf->data;
1154         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1155         cons_rx_buf->data = NULL;
1156         tpa_info->mapping = cons_rx_buf->mapping;
1157
1158         tpa_info->len =
1159                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1160                                 RX_TPA_START_CMP_LEN_SHIFT;
1161         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1162                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1163
1164                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1165                 tpa_info->gso_type = SKB_GSO_TCPV4;
1166                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1167                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1168                         tpa_info->gso_type = SKB_GSO_TCPV6;
1169                 tpa_info->rss_hash =
1170                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1171         } else {
1172                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1173                 tpa_info->gso_type = 0;
1174                 if (netif_msg_rx_err(bp))
1175                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1176         }
1177         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1178         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1179         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1180
1181         rxr->rx_prod = NEXT_RX(prod);
1182         cons = NEXT_RX(cons);
1183         rxr->rx_next_cons = NEXT_RX(cons);
1184         cons_rx_buf = &rxr->rx_buf_ring[cons];
1185
1186         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1187         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1188         cons_rx_buf->data = NULL;
1189 }
1190
1191 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1192                            u32 agg_bufs)
1193 {
1194         if (agg_bufs)
1195                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1196 }
1197
1198 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1199                                            int payload_off, int tcp_ts,
1200                                            struct sk_buff *skb)
1201 {
1202 #ifdef CONFIG_INET
1203         struct tcphdr *th;
1204         int len, nw_off;
1205         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1206         u32 hdr_info = tpa_info->hdr_info;
1207         bool loopback = false;
1208
1209         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1210         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1211         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1212
1213         /* If the packet is an internal loopback packet, the offsets will
1214          * have an extra 4 bytes.
1215          */
1216         if (inner_mac_off == 4) {
1217                 loopback = true;
1218         } else if (inner_mac_off > 4) {
1219                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1220                                             ETH_HLEN - 2));
1221
1222                 /* We only support inner iPv4/ipv6.  If we don't see the
1223                  * correct protocol ID, it must be a loopback packet where
1224                  * the offsets are off by 4.
1225                  */
1226                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1227                         loopback = true;
1228         }
1229         if (loopback) {
1230                 /* internal loopback packet, subtract all offsets by 4 */
1231                 inner_ip_off -= 4;
1232                 inner_mac_off -= 4;
1233                 outer_ip_off -= 4;
1234         }
1235
1236         nw_off = inner_ip_off - ETH_HLEN;
1237         skb_set_network_header(skb, nw_off);
1238         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1239                 struct ipv6hdr *iph = ipv6_hdr(skb);
1240
1241                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1242                 len = skb->len - skb_transport_offset(skb);
1243                 th = tcp_hdr(skb);
1244                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1245         } else {
1246                 struct iphdr *iph = ip_hdr(skb);
1247
1248                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1249                 len = skb->len - skb_transport_offset(skb);
1250                 th = tcp_hdr(skb);
1251                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1252         }
1253
1254         if (inner_mac_off) { /* tunnel */
1255                 struct udphdr *uh = NULL;
1256                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1257                                             ETH_HLEN - 2));
1258
1259                 if (proto == htons(ETH_P_IP)) {
1260                         struct iphdr *iph = (struct iphdr *)skb->data;
1261
1262                         if (iph->protocol == IPPROTO_UDP)
1263                                 uh = (struct udphdr *)(iph + 1);
1264                 } else {
1265                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1266
1267                         if (iph->nexthdr == IPPROTO_UDP)
1268                                 uh = (struct udphdr *)(iph + 1);
1269                 }
1270                 if (uh) {
1271                         if (uh->check)
1272                                 skb_shinfo(skb)->gso_type |=
1273                                         SKB_GSO_UDP_TUNNEL_CSUM;
1274                         else
1275                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1276                 }
1277         }
1278 #endif
1279         return skb;
1280 }
1281
1282 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1283 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1284
1285 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1286                                            int payload_off, int tcp_ts,
1287                                            struct sk_buff *skb)
1288 {
1289 #ifdef CONFIG_INET
1290         struct tcphdr *th;
1291         int len, nw_off, tcp_opt_len = 0;
1292
1293         if (tcp_ts)
1294                 tcp_opt_len = 12;
1295
1296         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1297                 struct iphdr *iph;
1298
1299                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1300                          ETH_HLEN;
1301                 skb_set_network_header(skb, nw_off);
1302                 iph = ip_hdr(skb);
1303                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1304                 len = skb->len - skb_transport_offset(skb);
1305                 th = tcp_hdr(skb);
1306                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1307         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1308                 struct ipv6hdr *iph;
1309
1310                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1311                          ETH_HLEN;
1312                 skb_set_network_header(skb, nw_off);
1313                 iph = ipv6_hdr(skb);
1314                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1315                 len = skb->len - skb_transport_offset(skb);
1316                 th = tcp_hdr(skb);
1317                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1318         } else {
1319                 dev_kfree_skb_any(skb);
1320                 return NULL;
1321         }
1322
1323         if (nw_off) { /* tunnel */
1324                 struct udphdr *uh = NULL;
1325
1326                 if (skb->protocol == htons(ETH_P_IP)) {
1327                         struct iphdr *iph = (struct iphdr *)skb->data;
1328
1329                         if (iph->protocol == IPPROTO_UDP)
1330                                 uh = (struct udphdr *)(iph + 1);
1331                 } else {
1332                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1333
1334                         if (iph->nexthdr == IPPROTO_UDP)
1335                                 uh = (struct udphdr *)(iph + 1);
1336                 }
1337                 if (uh) {
1338                         if (uh->check)
1339                                 skb_shinfo(skb)->gso_type |=
1340                                         SKB_GSO_UDP_TUNNEL_CSUM;
1341                         else
1342                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1343                 }
1344         }
1345 #endif
1346         return skb;
1347 }
1348
1349 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1350                                            struct bnxt_tpa_info *tpa_info,
1351                                            struct rx_tpa_end_cmp *tpa_end,
1352                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1353                                            struct sk_buff *skb)
1354 {
1355 #ifdef CONFIG_INET
1356         int payload_off;
1357         u16 segs;
1358
1359         segs = TPA_END_TPA_SEGS(tpa_end);
1360         if (segs == 1)
1361                 return skb;
1362
1363         NAPI_GRO_CB(skb)->count = segs;
1364         skb_shinfo(skb)->gso_size =
1365                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1366         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1367         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1368                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1369                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1370         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1371         if (likely(skb))
1372                 tcp_gro_complete(skb);
1373 #endif
1374         return skb;
1375 }
1376
1377 /* Given the cfa_code of a received packet determine which
1378  * netdev (vf-rep or PF) the packet is destined to.
1379  */
1380 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1381 {
1382         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1383
1384         /* if vf-rep dev is NULL, the must belongs to the PF */
1385         return dev ? dev : bp->dev;
1386 }
1387
1388 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1389                                            struct bnxt_cp_ring_info *cpr,
1390                                            u32 *raw_cons,
1391                                            struct rx_tpa_end_cmp *tpa_end,
1392                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1393                                            u8 *event)
1394 {
1395         struct bnxt_napi *bnapi = cpr->bnapi;
1396         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1397         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1398         u8 *data_ptr, agg_bufs;
1399         u16 cp_cons = RING_CMP(*raw_cons);
1400         unsigned int len;
1401         struct bnxt_tpa_info *tpa_info;
1402         dma_addr_t mapping;
1403         struct sk_buff *skb;
1404         void *data;
1405
1406         if (unlikely(bnapi->in_reset)) {
1407                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1408
1409                 if (rc < 0)
1410                         return ERR_PTR(-EBUSY);
1411                 return NULL;
1412         }
1413
1414         tpa_info = &rxr->rx_tpa[agg_id];
1415         data = tpa_info->data;
1416         data_ptr = tpa_info->data_ptr;
1417         prefetch(data_ptr);
1418         len = tpa_info->len;
1419         mapping = tpa_info->mapping;
1420
1421         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1422                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1423
1424         if (agg_bufs) {
1425                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1426                         return ERR_PTR(-EBUSY);
1427
1428                 *event |= BNXT_AGG_EVENT;
1429                 cp_cons = NEXT_CMP(cp_cons);
1430         }
1431
1432         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1433                 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1434                 if (agg_bufs > MAX_SKB_FRAGS)
1435                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1436                                     agg_bufs, (int)MAX_SKB_FRAGS);
1437                 return NULL;
1438         }
1439
1440         if (len <= bp->rx_copy_thresh) {
1441                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1442                 if (!skb) {
1443                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1444                         return NULL;
1445                 }
1446         } else {
1447                 u8 *new_data;
1448                 dma_addr_t new_mapping;
1449
1450                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1451                 if (!new_data) {
1452                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1453                         return NULL;
1454                 }
1455
1456                 tpa_info->data = new_data;
1457                 tpa_info->data_ptr = new_data + bp->rx_offset;
1458                 tpa_info->mapping = new_mapping;
1459
1460                 skb = build_skb(data, 0);
1461                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1462                                        bp->rx_buf_use_size, bp->rx_dir,
1463                                        DMA_ATTR_WEAK_ORDERING);
1464
1465                 if (!skb) {
1466                         kfree(data);
1467                         bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
1468                         return NULL;
1469                 }
1470                 skb_reserve(skb, bp->rx_offset);
1471                 skb_put(skb, len);
1472         }
1473
1474         if (agg_bufs) {
1475                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1476                 if (!skb) {
1477                         /* Page reuse already handled by bnxt_rx_pages(). */
1478                         return NULL;
1479                 }
1480         }
1481
1482         skb->protocol =
1483                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1484
1485         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1486                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1487
1488         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1489             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1490                 u16 vlan_proto = tpa_info->metadata >>
1491                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1492                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1493
1494                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1495         }
1496
1497         skb_checksum_none_assert(skb);
1498         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1499                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1500                 skb->csum_level =
1501                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1502         }
1503
1504         if (TPA_END_GRO(tpa_end))
1505                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1506
1507         return skb;
1508 }
1509
1510 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1511                              struct sk_buff *skb)
1512 {
1513         if (skb->dev != bp->dev) {
1514                 /* this packet belongs to a vf-rep */
1515                 bnxt_vf_rep_rx(bp, skb);
1516                 return;
1517         }
1518         skb_record_rx_queue(skb, bnapi->index);
1519         napi_gro_receive(&bnapi->napi, skb);
1520 }
1521
1522 /* returns the following:
1523  * 1       - 1 packet successfully received
1524  * 0       - successful TPA_START, packet not completed yet
1525  * -EBUSY  - completion ring does not have all the agg buffers yet
1526  * -ENOMEM - packet aborted due to out of memory
1527  * -EIO    - packet aborted due to hw error indicated in BD
1528  */
1529 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1530                        u32 *raw_cons, u8 *event)
1531 {
1532         struct bnxt_napi *bnapi = cpr->bnapi;
1533         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1534         struct net_device *dev = bp->dev;
1535         struct rx_cmp *rxcmp;
1536         struct rx_cmp_ext *rxcmp1;
1537         u32 tmp_raw_cons = *raw_cons;
1538         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1539         struct bnxt_sw_rx_bd *rx_buf;
1540         unsigned int len;
1541         u8 *data_ptr, agg_bufs, cmp_type;
1542         dma_addr_t dma_addr;
1543         struct sk_buff *skb;
1544         void *data;
1545         int rc = 0;
1546         u32 misc;
1547
1548         rxcmp = (struct rx_cmp *)
1549                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1550
1551         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1552         cp_cons = RING_CMP(tmp_raw_cons);
1553         rxcmp1 = (struct rx_cmp_ext *)
1554                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1555
1556         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1557                 return -EBUSY;
1558
1559         cmp_type = RX_CMP_TYPE(rxcmp);
1560
1561         prod = rxr->rx_prod;
1562
1563         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1564                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1565                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1566
1567                 *event |= BNXT_RX_EVENT;
1568                 goto next_rx_no_prod_no_len;
1569
1570         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1571                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1572                                    (struct rx_tpa_end_cmp *)rxcmp,
1573                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1574
1575                 if (IS_ERR(skb))
1576                         return -EBUSY;
1577
1578                 rc = -ENOMEM;
1579                 if (likely(skb)) {
1580                         bnxt_deliver_skb(bp, bnapi, skb);
1581                         rc = 1;
1582                 }
1583                 *event |= BNXT_RX_EVENT;
1584                 goto next_rx_no_prod_no_len;
1585         }
1586
1587         cons = rxcmp->rx_cmp_opaque;
1588         rx_buf = &rxr->rx_buf_ring[cons];
1589         data = rx_buf->data;
1590         data_ptr = rx_buf->data_ptr;
1591         if (unlikely(cons != rxr->rx_next_cons)) {
1592                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1593
1594                 bnxt_sched_reset(bp, rxr);
1595                 return rc1;
1596         }
1597         prefetch(data_ptr);
1598
1599         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1600         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1601
1602         if (agg_bufs) {
1603                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1604                         return -EBUSY;
1605
1606                 cp_cons = NEXT_CMP(cp_cons);
1607                 *event |= BNXT_AGG_EVENT;
1608         }
1609         *event |= BNXT_RX_EVENT;
1610
1611         rx_buf->data = NULL;
1612         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1613                 bnxt_reuse_rx_data(rxr, cons, data);
1614                 if (agg_bufs)
1615                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
1616
1617                 rc = -EIO;
1618                 goto next_rx;
1619         }
1620
1621         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1622         dma_addr = rx_buf->mapping;
1623
1624         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1625                 rc = 1;
1626                 goto next_rx;
1627         }
1628
1629         if (len <= bp->rx_copy_thresh) {
1630                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1631                 bnxt_reuse_rx_data(rxr, cons, data);
1632                 if (!skb) {
1633                         rc = -ENOMEM;
1634                         goto next_rx;
1635                 }
1636         } else {
1637                 u32 payload;
1638
1639                 if (rx_buf->data_ptr == data_ptr)
1640                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1641                 else
1642                         payload = 0;
1643                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1644                                       payload | len);
1645                 if (!skb) {
1646                         rc = -ENOMEM;
1647                         goto next_rx;
1648                 }
1649         }
1650
1651         if (agg_bufs) {
1652                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
1653                 if (!skb) {
1654                         rc = -ENOMEM;
1655                         goto next_rx;
1656                 }
1657         }
1658
1659         if (RX_CMP_HASH_VALID(rxcmp)) {
1660                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1661                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1662
1663                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1664                 if (hash_type != 1 && hash_type != 3)
1665                         type = PKT_HASH_TYPE_L3;
1666                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1667         }
1668
1669         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1670         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1671
1672         if ((rxcmp1->rx_cmp_flags2 &
1673              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1674             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1675                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1676                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1677                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1678
1679                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1680         }
1681
1682         skb_checksum_none_assert(skb);
1683         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1684                 if (dev->features & NETIF_F_RXCSUM) {
1685                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1686                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1687                 }
1688         } else {
1689                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1690                         if (dev->features & NETIF_F_RXCSUM)
1691                                 bnapi->cp_ring.rx_l4_csum_errors++;
1692                 }
1693         }
1694
1695         bnxt_deliver_skb(bp, bnapi, skb);
1696         rc = 1;
1697
1698 next_rx:
1699         rxr->rx_prod = NEXT_RX(prod);
1700         rxr->rx_next_cons = NEXT_RX(cons);
1701
1702         cpr->rx_packets += 1;
1703         cpr->rx_bytes += len;
1704
1705 next_rx_no_prod_no_len:
1706         *raw_cons = tmp_raw_cons;
1707
1708         return rc;
1709 }
1710
1711 /* In netpoll mode, if we are using a combined completion ring, we need to
1712  * discard the rx packets and recycle the buffers.
1713  */
1714 static int bnxt_force_rx_discard(struct bnxt *bp,
1715                                  struct bnxt_cp_ring_info *cpr,
1716                                  u32 *raw_cons, u8 *event)
1717 {
1718         u32 tmp_raw_cons = *raw_cons;
1719         struct rx_cmp_ext *rxcmp1;
1720         struct rx_cmp *rxcmp;
1721         u16 cp_cons;
1722         u8 cmp_type;
1723
1724         cp_cons = RING_CMP(tmp_raw_cons);
1725         rxcmp = (struct rx_cmp *)
1726                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1727
1728         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1729         cp_cons = RING_CMP(tmp_raw_cons);
1730         rxcmp1 = (struct rx_cmp_ext *)
1731                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1732
1733         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1734                 return -EBUSY;
1735
1736         cmp_type = RX_CMP_TYPE(rxcmp);
1737         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1738                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1739                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1740         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1741                 struct rx_tpa_end_cmp_ext *tpa_end1;
1742
1743                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1744                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1745                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1746         }
1747         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1748 }
1749
1750 #define BNXT_GET_EVENT_PORT(data)       \
1751         ((data) &                       \
1752          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1753
1754 static int bnxt_async_event_process(struct bnxt *bp,
1755                                     struct hwrm_async_event_cmpl *cmpl)
1756 {
1757         u16 event_id = le16_to_cpu(cmpl->event_id);
1758
1759         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1760         switch (event_id) {
1761         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1762                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1763                 struct bnxt_link_info *link_info = &bp->link_info;
1764
1765                 if (BNXT_VF(bp))
1766                         goto async_event_process_exit;
1767
1768                 /* print unsupported speed warning in forced speed mode only */
1769                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1770                     (data1 & 0x20000)) {
1771                         u16 fw_speed = link_info->force_link_speed;
1772                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1773
1774                         if (speed != SPEED_UNKNOWN)
1775                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1776                                             speed);
1777                 }
1778                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1779         }
1780         /* fall through */
1781         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1782                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1783                 break;
1784         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1785                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1786                 break;
1787         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1788                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1789                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1790
1791                 if (BNXT_VF(bp))
1792                         break;
1793
1794                 if (bp->pf.port_id != port_id)
1795                         break;
1796
1797                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1798                 break;
1799         }
1800         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1801                 if (BNXT_PF(bp))
1802                         goto async_event_process_exit;
1803                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1804                 break;
1805         default:
1806                 goto async_event_process_exit;
1807         }
1808         bnxt_queue_sp_work(bp);
1809 async_event_process_exit:
1810         bnxt_ulp_async_events(bp, cmpl);
1811         return 0;
1812 }
1813
1814 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1815 {
1816         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1817         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1818         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1819                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1820
1821         switch (cmpl_type) {
1822         case CMPL_BASE_TYPE_HWRM_DONE:
1823                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1824                 if (seq_id == bp->hwrm_intr_seq_id)
1825                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
1826                 else
1827                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1828                 break;
1829
1830         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1831                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1832
1833                 if ((vf_id < bp->pf.first_vf_id) ||
1834                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1835                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1836                                    vf_id);
1837                         return -EINVAL;
1838                 }
1839
1840                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1841                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1842                 bnxt_queue_sp_work(bp);
1843                 break;
1844
1845         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1846                 bnxt_async_event_process(bp,
1847                                          (struct hwrm_async_event_cmpl *)txcmp);
1848
1849         default:
1850                 break;
1851         }
1852
1853         return 0;
1854 }
1855
1856 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1857 {
1858         struct bnxt_napi *bnapi = dev_instance;
1859         struct bnxt *bp = bnapi->bp;
1860         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1861         u32 cons = RING_CMP(cpr->cp_raw_cons);
1862
1863         cpr->event_ctr++;
1864         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1865         napi_schedule(&bnapi->napi);
1866         return IRQ_HANDLED;
1867 }
1868
1869 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1870 {
1871         u32 raw_cons = cpr->cp_raw_cons;
1872         u16 cons = RING_CMP(raw_cons);
1873         struct tx_cmp *txcmp;
1874
1875         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1876
1877         return TX_CMP_VALID(txcmp, raw_cons);
1878 }
1879
1880 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1881 {
1882         struct bnxt_napi *bnapi = dev_instance;
1883         struct bnxt *bp = bnapi->bp;
1884         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1885         u32 cons = RING_CMP(cpr->cp_raw_cons);
1886         u32 int_status;
1887
1888         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1889
1890         if (!bnxt_has_work(bp, cpr)) {
1891                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1892                 /* return if erroneous interrupt */
1893                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1894                         return IRQ_NONE;
1895         }
1896
1897         /* disable ring IRQ */
1898         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
1899
1900         /* Return here if interrupt is shared and is disabled. */
1901         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1902                 return IRQ_HANDLED;
1903
1904         napi_schedule(&bnapi->napi);
1905         return IRQ_HANDLED;
1906 }
1907
1908 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1909                             int budget)
1910 {
1911         struct bnxt_napi *bnapi = cpr->bnapi;
1912         u32 raw_cons = cpr->cp_raw_cons;
1913         u32 cons;
1914         int tx_pkts = 0;
1915         int rx_pkts = 0;
1916         u8 event = 0;
1917         struct tx_cmp *txcmp;
1918
1919         cpr->has_more_work = 0;
1920         while (1) {
1921                 int rc;
1922
1923                 cons = RING_CMP(raw_cons);
1924                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1925
1926                 if (!TX_CMP_VALID(txcmp, raw_cons))
1927                         break;
1928
1929                 /* The valid test of the entry must be done first before
1930                  * reading any further.
1931                  */
1932                 dma_rmb();
1933                 cpr->had_work_done = 1;
1934                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1935                         tx_pkts++;
1936                         /* return full budget so NAPI will complete. */
1937                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1938                                 rx_pkts = budget;
1939                                 raw_cons = NEXT_RAW_CMP(raw_cons);
1940                                 if (budget)
1941                                         cpr->has_more_work = 1;
1942                                 break;
1943                         }
1944                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1945                         if (likely(budget))
1946                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
1947                         else
1948                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
1949                                                            &event);
1950                         if (likely(rc >= 0))
1951                                 rx_pkts += rc;
1952                         /* Increment rx_pkts when rc is -ENOMEM to count towards
1953                          * the NAPI budget.  Otherwise, we may potentially loop
1954                          * here forever if we consistently cannot allocate
1955                          * buffers.
1956                          */
1957                         else if (rc == -ENOMEM && budget)
1958                                 rx_pkts++;
1959                         else if (rc == -EBUSY)  /* partial completion */
1960                                 break;
1961                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1962                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1963                                     (TX_CMP_TYPE(txcmp) ==
1964                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1965                                     (TX_CMP_TYPE(txcmp) ==
1966                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1967                         bnxt_hwrm_handler(bp, txcmp);
1968                 }
1969                 raw_cons = NEXT_RAW_CMP(raw_cons);
1970
1971                 if (rx_pkts && rx_pkts == budget) {
1972                         cpr->has_more_work = 1;
1973                         break;
1974                 }
1975         }
1976
1977         if (event & BNXT_TX_EVENT) {
1978                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1979                 u16 prod = txr->tx_prod;
1980
1981                 /* Sync BD data before updating doorbell */
1982                 wmb();
1983
1984                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
1985         }
1986
1987         cpr->cp_raw_cons = raw_cons;
1988         bnapi->tx_pkts += tx_pkts;
1989         bnapi->events |= event;
1990         return rx_pkts;
1991 }
1992
1993 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1994 {
1995         if (bnapi->tx_pkts) {
1996                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1997                 bnapi->tx_pkts = 0;
1998         }
1999
2000         if (bnapi->events & BNXT_RX_EVENT) {
2001                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2002
2003                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2004                 if (bnapi->events & BNXT_AGG_EVENT)
2005                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2006         }
2007         bnapi->events = 0;
2008 }
2009
2010 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2011                           int budget)
2012 {
2013         struct bnxt_napi *bnapi = cpr->bnapi;
2014         int rx_pkts;
2015
2016         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2017
2018         /* ACK completion ring before freeing tx ring and producing new
2019          * buffers in rx/agg rings to prevent overflowing the completion
2020          * ring.
2021          */
2022         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2023
2024         __bnxt_poll_work_done(bp, bnapi);
2025         return rx_pkts;
2026 }
2027
2028 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2029 {
2030         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2031         struct bnxt *bp = bnapi->bp;
2032         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2033         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2034         struct tx_cmp *txcmp;
2035         struct rx_cmp_ext *rxcmp1;
2036         u32 cp_cons, tmp_raw_cons;
2037         u32 raw_cons = cpr->cp_raw_cons;
2038         u32 rx_pkts = 0;
2039         u8 event = 0;
2040
2041         while (1) {
2042                 int rc;
2043
2044                 cp_cons = RING_CMP(raw_cons);
2045                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2046
2047                 if (!TX_CMP_VALID(txcmp, raw_cons))
2048                         break;
2049
2050                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2051                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2052                         cp_cons = RING_CMP(tmp_raw_cons);
2053                         rxcmp1 = (struct rx_cmp_ext *)
2054                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2055
2056                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2057                                 break;
2058
2059                         /* force an error to recycle the buffer */
2060                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2061                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2062
2063                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2064                         if (likely(rc == -EIO) && budget)
2065                                 rx_pkts++;
2066                         else if (rc == -EBUSY)  /* partial completion */
2067                                 break;
2068                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2069                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2070                         bnxt_hwrm_handler(bp, txcmp);
2071                 } else {
2072                         netdev_err(bp->dev,
2073                                    "Invalid completion received on special ring\n");
2074                 }
2075                 raw_cons = NEXT_RAW_CMP(raw_cons);
2076
2077                 if (rx_pkts == budget)
2078                         break;
2079         }
2080
2081         cpr->cp_raw_cons = raw_cons;
2082         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2083         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2084
2085         if (event & BNXT_AGG_EVENT)
2086                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2087
2088         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2089                 napi_complete_done(napi, rx_pkts);
2090                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2091         }
2092         return rx_pkts;
2093 }
2094
2095 static int bnxt_poll(struct napi_struct *napi, int budget)
2096 {
2097         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2098         struct bnxt *bp = bnapi->bp;
2099         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2100         int work_done = 0;
2101
2102         while (1) {
2103                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2104
2105                 if (work_done >= budget) {
2106                         if (!budget)
2107                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2108                         break;
2109                 }
2110
2111                 if (!bnxt_has_work(bp, cpr)) {
2112                         if (napi_complete_done(napi, work_done))
2113                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2114                         break;
2115                 }
2116         }
2117         if (bp->flags & BNXT_FLAG_DIM) {
2118                 struct net_dim_sample dim_sample;
2119
2120                 net_dim_sample(cpr->event_ctr,
2121                                cpr->rx_packets,
2122                                cpr->rx_bytes,
2123                                &dim_sample);
2124                 net_dim(&cpr->dim, dim_sample);
2125         }
2126         mmiowb();
2127         return work_done;
2128 }
2129
2130 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2131 {
2132         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2133         int i, work_done = 0;
2134
2135         for (i = 0; i < 2; i++) {
2136                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2137
2138                 if (cpr2) {
2139                         work_done += __bnxt_poll_work(bp, cpr2,
2140                                                       budget - work_done);
2141                         cpr->has_more_work |= cpr2->has_more_work;
2142                 }
2143         }
2144         return work_done;
2145 }
2146
2147 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2148                                  u64 dbr_type, bool all)
2149 {
2150         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2151         int i;
2152
2153         for (i = 0; i < 2; i++) {
2154                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2155                 struct bnxt_db_info *db;
2156
2157                 if (cpr2 && (all || cpr2->had_work_done)) {
2158                         db = &cpr2->cp_db;
2159                         writeq(db->db_key64 | dbr_type |
2160                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2161                         cpr2->had_work_done = 0;
2162                 }
2163         }
2164         __bnxt_poll_work_done(bp, bnapi);
2165 }
2166
2167 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2168 {
2169         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2170         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2171         u32 raw_cons = cpr->cp_raw_cons;
2172         struct bnxt *bp = bnapi->bp;
2173         struct nqe_cn *nqcmp;
2174         int work_done = 0;
2175         u32 cons;
2176
2177         if (cpr->has_more_work) {
2178                 cpr->has_more_work = 0;
2179                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2180                 if (cpr->has_more_work) {
2181                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2182                         return work_done;
2183                 }
2184                 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2185                 if (napi_complete_done(napi, work_done))
2186                         BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2187                 return work_done;
2188         }
2189         while (1) {
2190                 cons = RING_CMP(raw_cons);
2191                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2192
2193                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2194                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2195                                              false);
2196                         cpr->cp_raw_cons = raw_cons;
2197                         if (napi_complete_done(napi, work_done))
2198                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2199                                                   cpr->cp_raw_cons);
2200                         return work_done;
2201                 }
2202
2203                 /* The valid test of the entry must be done first before
2204                  * reading any further.
2205                  */
2206                 dma_rmb();
2207
2208                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2209                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2210                         struct bnxt_cp_ring_info *cpr2;
2211
2212                         cpr2 = cpr->cp_ring_arr[idx];
2213                         work_done += __bnxt_poll_work(bp, cpr2,
2214                                                       budget - work_done);
2215                         cpr->has_more_work = cpr2->has_more_work;
2216                 } else {
2217                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2218                 }
2219                 raw_cons = NEXT_RAW_CMP(raw_cons);
2220                 if (cpr->has_more_work)
2221                         break;
2222         }
2223         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2224         cpr->cp_raw_cons = raw_cons;
2225         return work_done;
2226 }
2227
2228 static void bnxt_free_tx_skbs(struct bnxt *bp)
2229 {
2230         int i, max_idx;
2231         struct pci_dev *pdev = bp->pdev;
2232
2233         if (!bp->tx_ring)
2234                 return;
2235
2236         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2237         for (i = 0; i < bp->tx_nr_rings; i++) {
2238                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2239                 int j;
2240
2241                 for (j = 0; j < max_idx;) {
2242                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2243                         struct sk_buff *skb = tx_buf->skb;
2244                         int k, last;
2245
2246                         if (!skb) {
2247                                 j++;
2248                                 continue;
2249                         }
2250
2251                         tx_buf->skb = NULL;
2252
2253                         if (tx_buf->is_push) {
2254                                 dev_kfree_skb(skb);
2255                                 j += 2;
2256                                 continue;
2257                         }
2258
2259                         dma_unmap_single(&pdev->dev,
2260                                          dma_unmap_addr(tx_buf, mapping),
2261                                          skb_headlen(skb),
2262                                          PCI_DMA_TODEVICE);
2263
2264                         last = tx_buf->nr_frags;
2265                         j += 2;
2266                         for (k = 0; k < last; k++, j++) {
2267                                 int ring_idx = j & bp->tx_ring_mask;
2268                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2269
2270                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2271                                 dma_unmap_page(
2272                                         &pdev->dev,
2273                                         dma_unmap_addr(tx_buf, mapping),
2274                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2275                         }
2276                         dev_kfree_skb(skb);
2277                 }
2278                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2279         }
2280 }
2281
2282 static void bnxt_free_rx_skbs(struct bnxt *bp)
2283 {
2284         int i, max_idx, max_agg_idx;
2285         struct pci_dev *pdev = bp->pdev;
2286
2287         if (!bp->rx_ring)
2288                 return;
2289
2290         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2291         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2292         for (i = 0; i < bp->rx_nr_rings; i++) {
2293                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2294                 int j;
2295
2296                 if (rxr->rx_tpa) {
2297                         for (j = 0; j < MAX_TPA; j++) {
2298                                 struct bnxt_tpa_info *tpa_info =
2299                                                         &rxr->rx_tpa[j];
2300                                 u8 *data = tpa_info->data;
2301
2302                                 if (!data)
2303                                         continue;
2304
2305                                 dma_unmap_single_attrs(&pdev->dev,
2306                                                        tpa_info->mapping,
2307                                                        bp->rx_buf_use_size,
2308                                                        bp->rx_dir,
2309                                                        DMA_ATTR_WEAK_ORDERING);
2310
2311                                 tpa_info->data = NULL;
2312
2313                                 kfree(data);
2314                         }
2315                 }
2316
2317                 for (j = 0; j < max_idx; j++) {
2318                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2319                         dma_addr_t mapping = rx_buf->mapping;
2320                         void *data = rx_buf->data;
2321
2322                         if (!data)
2323                                 continue;
2324
2325                         rx_buf->data = NULL;
2326
2327                         if (BNXT_RX_PAGE_MODE(bp)) {
2328                                 mapping -= bp->rx_dma_offset;
2329                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2330                                                      PAGE_SIZE, bp->rx_dir,
2331                                                      DMA_ATTR_WEAK_ORDERING);
2332                                 __free_page(data);
2333                         } else {
2334                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2335                                                        bp->rx_buf_use_size,
2336                                                        bp->rx_dir,
2337                                                        DMA_ATTR_WEAK_ORDERING);
2338                                 kfree(data);
2339                         }
2340                 }
2341
2342                 for (j = 0; j < max_agg_idx; j++) {
2343                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2344                                 &rxr->rx_agg_ring[j];
2345                         struct page *page = rx_agg_buf->page;
2346
2347                         if (!page)
2348                                 continue;
2349
2350                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2351                                              BNXT_RX_PAGE_SIZE,
2352                                              PCI_DMA_FROMDEVICE,
2353                                              DMA_ATTR_WEAK_ORDERING);
2354
2355                         rx_agg_buf->page = NULL;
2356                         __clear_bit(j, rxr->rx_agg_bmap);
2357
2358                         __free_page(page);
2359                 }
2360                 if (rxr->rx_page) {
2361                         __free_page(rxr->rx_page);
2362                         rxr->rx_page = NULL;
2363                 }
2364         }
2365 }
2366
2367 static void bnxt_free_skbs(struct bnxt *bp)
2368 {
2369         bnxt_free_tx_skbs(bp);
2370         bnxt_free_rx_skbs(bp);
2371 }
2372
2373 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2374 {
2375         struct pci_dev *pdev = bp->pdev;
2376         int i;
2377
2378         for (i = 0; i < rmem->nr_pages; i++) {
2379                 if (!rmem->pg_arr[i])
2380                         continue;
2381
2382                 dma_free_coherent(&pdev->dev, rmem->page_size,
2383                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2384
2385                 rmem->pg_arr[i] = NULL;
2386         }
2387         if (rmem->pg_tbl) {
2388                 size_t pg_tbl_size = rmem->nr_pages * 8;
2389
2390                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2391                         pg_tbl_size = rmem->page_size;
2392                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2393                                   rmem->pg_tbl, rmem->pg_tbl_map);
2394                 rmem->pg_tbl = NULL;
2395         }
2396         if (rmem->vmem_size && *rmem->vmem) {
2397                 vfree(*rmem->vmem);
2398                 *rmem->vmem = NULL;
2399         }
2400 }
2401
2402 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2403 {
2404         struct pci_dev *pdev = bp->pdev;
2405         u64 valid_bit = 0;
2406         int i;
2407
2408         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2409                 valid_bit = PTU_PTE_VALID;
2410         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2411                 size_t pg_tbl_size = rmem->nr_pages * 8;
2412
2413                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2414                         pg_tbl_size = rmem->page_size;
2415                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2416                                                   &rmem->pg_tbl_map,
2417                                                   GFP_KERNEL);
2418                 if (!rmem->pg_tbl)
2419                         return -ENOMEM;
2420         }
2421
2422         for (i = 0; i < rmem->nr_pages; i++) {
2423                 u64 extra_bits = valid_bit;
2424
2425                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2426                                                      rmem->page_size,
2427                                                      &rmem->dma_arr[i],
2428                                                      GFP_KERNEL);
2429                 if (!rmem->pg_arr[i])
2430                         return -ENOMEM;
2431
2432                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2433                         if (i == rmem->nr_pages - 2 &&
2434                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2435                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2436                         else if (i == rmem->nr_pages - 1 &&
2437                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2438                                 extra_bits |= PTU_PTE_LAST;
2439                         rmem->pg_tbl[i] =
2440                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2441                 }
2442         }
2443
2444         if (rmem->vmem_size) {
2445                 *rmem->vmem = vzalloc(rmem->vmem_size);
2446                 if (!(*rmem->vmem))
2447                         return -ENOMEM;
2448         }
2449         return 0;
2450 }
2451
2452 static void bnxt_free_rx_rings(struct bnxt *bp)
2453 {
2454         int i;
2455
2456         if (!bp->rx_ring)
2457                 return;
2458
2459         for (i = 0; i < bp->rx_nr_rings; i++) {
2460                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2461                 struct bnxt_ring_struct *ring;
2462
2463                 if (rxr->xdp_prog)
2464                         bpf_prog_put(rxr->xdp_prog);
2465
2466                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2467                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2468
2469                 kfree(rxr->rx_tpa);
2470                 rxr->rx_tpa = NULL;
2471
2472                 kfree(rxr->rx_agg_bmap);
2473                 rxr->rx_agg_bmap = NULL;
2474
2475                 ring = &rxr->rx_ring_struct;
2476                 bnxt_free_ring(bp, &ring->ring_mem);
2477
2478                 ring = &rxr->rx_agg_ring_struct;
2479                 bnxt_free_ring(bp, &ring->ring_mem);
2480         }
2481 }
2482
2483 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2484 {
2485         int i, rc, agg_rings = 0, tpa_rings = 0;
2486
2487         if (!bp->rx_ring)
2488                 return -ENOMEM;
2489
2490         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2491                 agg_rings = 1;
2492
2493         if (bp->flags & BNXT_FLAG_TPA)
2494                 tpa_rings = 1;
2495
2496         for (i = 0; i < bp->rx_nr_rings; i++) {
2497                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2498                 struct bnxt_ring_struct *ring;
2499
2500                 ring = &rxr->rx_ring_struct;
2501
2502                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2503                 if (rc < 0)
2504                         return rc;
2505
2506                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2507                 if (rc)
2508                         return rc;
2509
2510                 ring->grp_idx = i;
2511                 if (agg_rings) {
2512                         u16 mem_size;
2513
2514                         ring = &rxr->rx_agg_ring_struct;
2515                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2516                         if (rc)
2517                                 return rc;
2518
2519                         ring->grp_idx = i;
2520                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2521                         mem_size = rxr->rx_agg_bmap_size / 8;
2522                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2523                         if (!rxr->rx_agg_bmap)
2524                                 return -ENOMEM;
2525
2526                         if (tpa_rings) {
2527                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2528                                                 sizeof(struct bnxt_tpa_info),
2529                                                 GFP_KERNEL);
2530                                 if (!rxr->rx_tpa)
2531                                         return -ENOMEM;
2532                         }
2533                 }
2534         }
2535         return 0;
2536 }
2537
2538 static void bnxt_free_tx_rings(struct bnxt *bp)
2539 {
2540         int i;
2541         struct pci_dev *pdev = bp->pdev;
2542
2543         if (!bp->tx_ring)
2544                 return;
2545
2546         for (i = 0; i < bp->tx_nr_rings; i++) {
2547                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2548                 struct bnxt_ring_struct *ring;
2549
2550                 if (txr->tx_push) {
2551                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2552                                           txr->tx_push, txr->tx_push_mapping);
2553                         txr->tx_push = NULL;
2554                 }
2555
2556                 ring = &txr->tx_ring_struct;
2557
2558                 bnxt_free_ring(bp, &ring->ring_mem);
2559         }
2560 }
2561
2562 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2563 {
2564         int i, j, rc;
2565         struct pci_dev *pdev = bp->pdev;
2566
2567         bp->tx_push_size = 0;
2568         if (bp->tx_push_thresh) {
2569                 int push_size;
2570
2571                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2572                                         bp->tx_push_thresh);
2573
2574                 if (push_size > 256) {
2575                         push_size = 0;
2576                         bp->tx_push_thresh = 0;
2577                 }
2578
2579                 bp->tx_push_size = push_size;
2580         }
2581
2582         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2583                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2584                 struct bnxt_ring_struct *ring;
2585                 u8 qidx;
2586
2587                 ring = &txr->tx_ring_struct;
2588
2589                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2590                 if (rc)
2591                         return rc;
2592
2593                 ring->grp_idx = txr->bnapi->index;
2594                 if (bp->tx_push_size) {
2595                         dma_addr_t mapping;
2596
2597                         /* One pre-allocated DMA buffer to backup
2598                          * TX push operation
2599                          */
2600                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2601                                                 bp->tx_push_size,
2602                                                 &txr->tx_push_mapping,
2603                                                 GFP_KERNEL);
2604
2605                         if (!txr->tx_push)
2606                                 return -ENOMEM;
2607
2608                         mapping = txr->tx_push_mapping +
2609                                 sizeof(struct tx_push_bd);
2610                         txr->data_mapping = cpu_to_le64(mapping);
2611
2612                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2613                 }
2614                 qidx = bp->tc_to_qidx[j];
2615                 ring->queue_id = bp->q_info[qidx].queue_id;
2616                 if (i < bp->tx_nr_rings_xdp)
2617                         continue;
2618                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2619                         j++;
2620         }
2621         return 0;
2622 }
2623
2624 static void bnxt_free_cp_rings(struct bnxt *bp)
2625 {
2626         int i;
2627
2628         if (!bp->bnapi)
2629                 return;
2630
2631         for (i = 0; i < bp->cp_nr_rings; i++) {
2632                 struct bnxt_napi *bnapi = bp->bnapi[i];
2633                 struct bnxt_cp_ring_info *cpr;
2634                 struct bnxt_ring_struct *ring;
2635                 int j;
2636
2637                 if (!bnapi)
2638                         continue;
2639
2640                 cpr = &bnapi->cp_ring;
2641                 ring = &cpr->cp_ring_struct;
2642
2643                 bnxt_free_ring(bp, &ring->ring_mem);
2644
2645                 for (j = 0; j < 2; j++) {
2646                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2647
2648                         if (cpr2) {
2649                                 ring = &cpr2->cp_ring_struct;
2650                                 bnxt_free_ring(bp, &ring->ring_mem);
2651                                 kfree(cpr2);
2652                                 cpr->cp_ring_arr[j] = NULL;
2653                         }
2654                 }
2655         }
2656 }
2657
2658 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2659 {
2660         struct bnxt_ring_mem_info *rmem;
2661         struct bnxt_ring_struct *ring;
2662         struct bnxt_cp_ring_info *cpr;
2663         int rc;
2664
2665         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2666         if (!cpr)
2667                 return NULL;
2668
2669         ring = &cpr->cp_ring_struct;
2670         rmem = &ring->ring_mem;
2671         rmem->nr_pages = bp->cp_nr_pages;
2672         rmem->page_size = HW_CMPD_RING_SIZE;
2673         rmem->pg_arr = (void **)cpr->cp_desc_ring;
2674         rmem->dma_arr = cpr->cp_desc_mapping;
2675         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2676         rc = bnxt_alloc_ring(bp, rmem);
2677         if (rc) {
2678                 bnxt_free_ring(bp, rmem);
2679                 kfree(cpr);
2680                 cpr = NULL;
2681         }
2682         return cpr;
2683 }
2684
2685 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2686 {
2687         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
2688         int i, rc, ulp_base_vec, ulp_msix;
2689
2690         ulp_msix = bnxt_get_ulp_msix_num(bp);
2691         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
2692         for (i = 0; i < bp->cp_nr_rings; i++) {
2693                 struct bnxt_napi *bnapi = bp->bnapi[i];
2694                 struct bnxt_cp_ring_info *cpr;
2695                 struct bnxt_ring_struct *ring;
2696
2697                 if (!bnapi)
2698                         continue;
2699
2700                 cpr = &bnapi->cp_ring;
2701                 cpr->bnapi = bnapi;
2702                 ring = &cpr->cp_ring_struct;
2703
2704                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2705                 if (rc)
2706                         return rc;
2707
2708                 if (ulp_msix && i >= ulp_base_vec)
2709                         ring->map_idx = i + ulp_msix;
2710                 else
2711                         ring->map_idx = i;
2712
2713                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2714                         continue;
2715
2716                 if (i < bp->rx_nr_rings) {
2717                         struct bnxt_cp_ring_info *cpr2 =
2718                                 bnxt_alloc_cp_sub_ring(bp);
2719
2720                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2721                         if (!cpr2)
2722                                 return -ENOMEM;
2723                         cpr2->bnapi = bnapi;
2724                 }
2725                 if ((sh && i < bp->tx_nr_rings) ||
2726                     (!sh && i >= bp->rx_nr_rings)) {
2727                         struct bnxt_cp_ring_info *cpr2 =
2728                                 bnxt_alloc_cp_sub_ring(bp);
2729
2730                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2731                         if (!cpr2)
2732                                 return -ENOMEM;
2733                         cpr2->bnapi = bnapi;
2734                 }
2735         }
2736         return 0;
2737 }
2738
2739 static void bnxt_init_ring_struct(struct bnxt *bp)
2740 {
2741         int i;
2742
2743         for (i = 0; i < bp->cp_nr_rings; i++) {
2744                 struct bnxt_napi *bnapi = bp->bnapi[i];
2745                 struct bnxt_ring_mem_info *rmem;
2746                 struct bnxt_cp_ring_info *cpr;
2747                 struct bnxt_rx_ring_info *rxr;
2748                 struct bnxt_tx_ring_info *txr;
2749                 struct bnxt_ring_struct *ring;
2750
2751                 if (!bnapi)
2752                         continue;
2753
2754                 cpr = &bnapi->cp_ring;
2755                 ring = &cpr->cp_ring_struct;
2756                 rmem = &ring->ring_mem;
2757                 rmem->nr_pages = bp->cp_nr_pages;
2758                 rmem->page_size = HW_CMPD_RING_SIZE;
2759                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2760                 rmem->dma_arr = cpr->cp_desc_mapping;
2761                 rmem->vmem_size = 0;
2762
2763                 rxr = bnapi->rx_ring;
2764                 if (!rxr)
2765                         goto skip_rx;
2766
2767                 ring = &rxr->rx_ring_struct;
2768                 rmem = &ring->ring_mem;
2769                 rmem->nr_pages = bp->rx_nr_pages;
2770                 rmem->page_size = HW_RXBD_RING_SIZE;
2771                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2772                 rmem->dma_arr = rxr->rx_desc_mapping;
2773                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2774                 rmem->vmem = (void **)&rxr->rx_buf_ring;
2775
2776                 ring = &rxr->rx_agg_ring_struct;
2777                 rmem = &ring->ring_mem;
2778                 rmem->nr_pages = bp->rx_agg_nr_pages;
2779                 rmem->page_size = HW_RXBD_RING_SIZE;
2780                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2781                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2782                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2783                 rmem->vmem = (void **)&rxr->rx_agg_ring;
2784
2785 skip_rx:
2786                 txr = bnapi->tx_ring;
2787                 if (!txr)
2788                         continue;
2789
2790                 ring = &txr->tx_ring_struct;
2791                 rmem = &ring->ring_mem;
2792                 rmem->nr_pages = bp->tx_nr_pages;
2793                 rmem->page_size = HW_RXBD_RING_SIZE;
2794                 rmem->pg_arr = (void **)txr->tx_desc_ring;
2795                 rmem->dma_arr = txr->tx_desc_mapping;
2796                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2797                 rmem->vmem = (void **)&txr->tx_buf_ring;
2798         }
2799 }
2800
2801 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2802 {
2803         int i;
2804         u32 prod;
2805         struct rx_bd **rx_buf_ring;
2806
2807         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2808         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
2809                 int j;
2810                 struct rx_bd *rxbd;
2811
2812                 rxbd = rx_buf_ring[i];
2813                 if (!rxbd)
2814                         continue;
2815
2816                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2817                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2818                         rxbd->rx_bd_opaque = prod;
2819                 }
2820         }
2821 }
2822
2823 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2824 {
2825         struct net_device *dev = bp->dev;
2826         struct bnxt_rx_ring_info *rxr;
2827         struct bnxt_ring_struct *ring;
2828         u32 prod, type;
2829         int i;
2830
2831         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2832                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2833
2834         if (NET_IP_ALIGN == 2)
2835                 type |= RX_BD_FLAGS_SOP;
2836
2837         rxr = &bp->rx_ring[ring_nr];
2838         ring = &rxr->rx_ring_struct;
2839         bnxt_init_rxbd_pages(ring, type);
2840
2841         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2842                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2843                 if (IS_ERR(rxr->xdp_prog)) {
2844                         int rc = PTR_ERR(rxr->xdp_prog);
2845
2846                         rxr->xdp_prog = NULL;
2847                         return rc;
2848                 }
2849         }
2850         prod = rxr->rx_prod;
2851         for (i = 0; i < bp->rx_ring_size; i++) {
2852                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2853                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2854                                     ring_nr, i, bp->rx_ring_size);
2855                         break;
2856                 }
2857                 prod = NEXT_RX(prod);
2858         }
2859         rxr->rx_prod = prod;
2860         ring->fw_ring_id = INVALID_HW_RING_ID;
2861
2862         ring = &rxr->rx_agg_ring_struct;
2863         ring->fw_ring_id = INVALID_HW_RING_ID;
2864
2865         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2866                 return 0;
2867
2868         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2869                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2870
2871         bnxt_init_rxbd_pages(ring, type);
2872
2873         prod = rxr->rx_agg_prod;
2874         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2875                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2876                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2877                                     ring_nr, i, bp->rx_ring_size);
2878                         break;
2879                 }
2880                 prod = NEXT_RX_AGG(prod);
2881         }
2882         rxr->rx_agg_prod = prod;
2883
2884         if (bp->flags & BNXT_FLAG_TPA) {
2885                 if (rxr->rx_tpa) {
2886                         u8 *data;
2887                         dma_addr_t mapping;
2888
2889                         for (i = 0; i < MAX_TPA; i++) {
2890                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2891                                                             GFP_KERNEL);
2892                                 if (!data)
2893                                         return -ENOMEM;
2894
2895                                 rxr->rx_tpa[i].data = data;
2896                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2897                                 rxr->rx_tpa[i].mapping = mapping;
2898                         }
2899                 } else {
2900                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2901                         return -ENOMEM;
2902                 }
2903         }
2904
2905         return 0;
2906 }
2907
2908 static void bnxt_init_cp_rings(struct bnxt *bp)
2909 {
2910         int i, j;
2911
2912         for (i = 0; i < bp->cp_nr_rings; i++) {
2913                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2914                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2915
2916                 ring->fw_ring_id = INVALID_HW_RING_ID;
2917                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2918                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2919                 for (j = 0; j < 2; j++) {
2920                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2921
2922                         if (!cpr2)
2923                                 continue;
2924
2925                         ring = &cpr2->cp_ring_struct;
2926                         ring->fw_ring_id = INVALID_HW_RING_ID;
2927                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2928                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2929                 }
2930         }
2931 }
2932
2933 static int bnxt_init_rx_rings(struct bnxt *bp)
2934 {
2935         int i, rc = 0;
2936
2937         if (BNXT_RX_PAGE_MODE(bp)) {
2938                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2939                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2940         } else {
2941                 bp->rx_offset = BNXT_RX_OFFSET;
2942                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2943         }
2944
2945         for (i = 0; i < bp->rx_nr_rings; i++) {
2946                 rc = bnxt_init_one_rx_ring(bp, i);
2947                 if (rc)
2948                         break;
2949         }
2950
2951         return rc;
2952 }
2953
2954 static int bnxt_init_tx_rings(struct bnxt *bp)
2955 {
2956         u16 i;
2957
2958         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2959                                    MAX_SKB_FRAGS + 1);
2960
2961         for (i = 0; i < bp->tx_nr_rings; i++) {
2962                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2963                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2964
2965                 ring->fw_ring_id = INVALID_HW_RING_ID;
2966         }
2967
2968         return 0;
2969 }
2970
2971 static void bnxt_free_ring_grps(struct bnxt *bp)
2972 {
2973         kfree(bp->grp_info);
2974         bp->grp_info = NULL;
2975 }
2976
2977 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2978 {
2979         int i;
2980
2981         if (irq_re_init) {
2982                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2983                                        sizeof(struct bnxt_ring_grp_info),
2984                                        GFP_KERNEL);
2985                 if (!bp->grp_info)
2986                         return -ENOMEM;
2987         }
2988         for (i = 0; i < bp->cp_nr_rings; i++) {
2989                 if (irq_re_init)
2990                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2991                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2992                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2993                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2994                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2995         }
2996         return 0;
2997 }
2998
2999 static void bnxt_free_vnics(struct bnxt *bp)
3000 {
3001         kfree(bp->vnic_info);
3002         bp->vnic_info = NULL;
3003         bp->nr_vnics = 0;
3004 }
3005
3006 static int bnxt_alloc_vnics(struct bnxt *bp)
3007 {
3008         int num_vnics = 1;
3009
3010 #ifdef CONFIG_RFS_ACCEL
3011         if (bp->flags & BNXT_FLAG_RFS)
3012                 num_vnics += bp->rx_nr_rings;
3013 #endif
3014
3015         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3016                 num_vnics++;
3017
3018         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3019                                 GFP_KERNEL);
3020         if (!bp->vnic_info)
3021                 return -ENOMEM;
3022
3023         bp->nr_vnics = num_vnics;
3024         return 0;
3025 }
3026
3027 static void bnxt_init_vnics(struct bnxt *bp)
3028 {
3029         int i;
3030
3031         for (i = 0; i < bp->nr_vnics; i++) {
3032                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3033                 int j;
3034
3035                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3036                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3037                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3038
3039                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3040
3041                 if (bp->vnic_info[i].rss_hash_key) {
3042                         if (i == 0)
3043                                 prandom_bytes(vnic->rss_hash_key,
3044                                               HW_HASH_KEY_SIZE);
3045                         else
3046                                 memcpy(vnic->rss_hash_key,
3047                                        bp->vnic_info[0].rss_hash_key,
3048                                        HW_HASH_KEY_SIZE);
3049                 }
3050         }
3051 }
3052
3053 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3054 {
3055         int pages;
3056
3057         pages = ring_size / desc_per_pg;
3058
3059         if (!pages)
3060                 return 1;
3061
3062         pages++;
3063
3064         while (pages & (pages - 1))
3065                 pages++;
3066
3067         return pages;
3068 }
3069
3070 void bnxt_set_tpa_flags(struct bnxt *bp)
3071 {
3072         bp->flags &= ~BNXT_FLAG_TPA;
3073         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3074                 return;
3075         if (bp->dev->features & NETIF_F_LRO)
3076                 bp->flags |= BNXT_FLAG_LRO;
3077         else if (bp->dev->features & NETIF_F_GRO_HW)
3078                 bp->flags |= BNXT_FLAG_GRO;
3079 }
3080
3081 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3082  * be set on entry.
3083  */
3084 void bnxt_set_ring_params(struct bnxt *bp)
3085 {
3086         u32 ring_size, rx_size, rx_space;
3087         u32 agg_factor = 0, agg_ring_size = 0;
3088
3089         /* 8 for CRC and VLAN */
3090         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3091
3092         rx_space = rx_size + NET_SKB_PAD +
3093                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3094
3095         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3096         ring_size = bp->rx_ring_size;
3097         bp->rx_agg_ring_size = 0;
3098         bp->rx_agg_nr_pages = 0;
3099
3100         if (bp->flags & BNXT_FLAG_TPA)
3101                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3102
3103         bp->flags &= ~BNXT_FLAG_JUMBO;
3104         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3105                 u32 jumbo_factor;
3106
3107                 bp->flags |= BNXT_FLAG_JUMBO;
3108                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3109                 if (jumbo_factor > agg_factor)
3110                         agg_factor = jumbo_factor;
3111         }
3112         agg_ring_size = ring_size * agg_factor;
3113
3114         if (agg_ring_size) {
3115                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3116                                                         RX_DESC_CNT);
3117                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3118                         u32 tmp = agg_ring_size;
3119
3120                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3121                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3122                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3123                                     tmp, agg_ring_size);
3124                 }
3125                 bp->rx_agg_ring_size = agg_ring_size;
3126                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3127                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3128                 rx_space = rx_size + NET_SKB_PAD +
3129                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3130         }
3131
3132         bp->rx_buf_use_size = rx_size;
3133         bp->rx_buf_size = rx_space;
3134
3135         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3136         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3137
3138         ring_size = bp->tx_ring_size;
3139         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3140         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3141
3142         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3143         bp->cp_ring_size = ring_size;
3144
3145         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3146         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3147                 bp->cp_nr_pages = MAX_CP_PAGES;
3148                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3149                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3150                             ring_size, bp->cp_ring_size);
3151         }
3152         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3153         bp->cp_ring_mask = bp->cp_bit - 1;
3154 }
3155
3156 /* Changing allocation mode of RX rings.
3157  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3158  */
3159 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3160 {
3161         if (page_mode) {
3162                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3163                         return -EOPNOTSUPP;
3164                 bp->dev->max_mtu =
3165                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3166                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3167                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3168                 bp->rx_dir = DMA_BIDIRECTIONAL;
3169                 bp->rx_skb_func = bnxt_rx_page_skb;
3170                 /* Disable LRO or GRO_HW */
3171                 netdev_update_features(bp->dev);
3172         } else {
3173                 bp->dev->max_mtu = bp->max_mtu;
3174                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3175                 bp->rx_dir = DMA_FROM_DEVICE;
3176                 bp->rx_skb_func = bnxt_rx_skb;
3177         }
3178         return 0;
3179 }
3180
3181 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3182 {
3183         int i;
3184         struct bnxt_vnic_info *vnic;
3185         struct pci_dev *pdev = bp->pdev;
3186
3187         if (!bp->vnic_info)
3188                 return;
3189
3190         for (i = 0; i < bp->nr_vnics; i++) {
3191                 vnic = &bp->vnic_info[i];
3192
3193                 kfree(vnic->fw_grp_ids);
3194                 vnic->fw_grp_ids = NULL;
3195
3196                 kfree(vnic->uc_list);
3197                 vnic->uc_list = NULL;
3198
3199                 if (vnic->mc_list) {
3200                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3201                                           vnic->mc_list, vnic->mc_list_mapping);
3202                         vnic->mc_list = NULL;
3203                 }
3204
3205                 if (vnic->rss_table) {
3206                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
3207                                           vnic->rss_table,
3208                                           vnic->rss_table_dma_addr);
3209                         vnic->rss_table = NULL;
3210                 }
3211
3212                 vnic->rss_hash_key = NULL;
3213                 vnic->flags = 0;
3214         }
3215 }
3216
3217 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3218 {
3219         int i, rc = 0, size;
3220         struct bnxt_vnic_info *vnic;
3221         struct pci_dev *pdev = bp->pdev;
3222         int max_rings;
3223
3224         for (i = 0; i < bp->nr_vnics; i++) {
3225                 vnic = &bp->vnic_info[i];
3226
3227                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3228                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3229
3230                         if (mem_size > 0) {
3231                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3232                                 if (!vnic->uc_list) {
3233                                         rc = -ENOMEM;
3234                                         goto out;
3235                                 }
3236                         }
3237                 }
3238
3239                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3240                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3241                         vnic->mc_list =
3242                                 dma_alloc_coherent(&pdev->dev,
3243                                                    vnic->mc_list_size,
3244                                                    &vnic->mc_list_mapping,
3245                                                    GFP_KERNEL);
3246                         if (!vnic->mc_list) {
3247                                 rc = -ENOMEM;
3248                                 goto out;
3249                         }
3250                 }
3251
3252                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3253                         goto vnic_skip_grps;
3254
3255                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3256                         max_rings = bp->rx_nr_rings;
3257                 else
3258                         max_rings = 1;
3259
3260                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3261                 if (!vnic->fw_grp_ids) {
3262                         rc = -ENOMEM;
3263                         goto out;
3264                 }
3265 vnic_skip_grps:
3266                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3267                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3268                         continue;
3269
3270                 /* Allocate rss table and hash key */
3271                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3272                                                      &vnic->rss_table_dma_addr,
3273                                                      GFP_KERNEL);
3274                 if (!vnic->rss_table) {
3275                         rc = -ENOMEM;
3276                         goto out;
3277                 }
3278
3279                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3280
3281                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3282                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3283         }
3284         return 0;
3285
3286 out:
3287         return rc;
3288 }
3289
3290 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3291 {
3292         struct pci_dev *pdev = bp->pdev;
3293
3294         if (bp->hwrm_cmd_resp_addr) {
3295                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3296                                   bp->hwrm_cmd_resp_dma_addr);
3297                 bp->hwrm_cmd_resp_addr = NULL;
3298         }
3299
3300         if (bp->hwrm_cmd_kong_resp_addr) {
3301                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3302                                   bp->hwrm_cmd_kong_resp_addr,
3303                                   bp->hwrm_cmd_kong_resp_dma_addr);
3304                 bp->hwrm_cmd_kong_resp_addr = NULL;
3305         }
3306 }
3307
3308 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3309 {
3310         struct pci_dev *pdev = bp->pdev;
3311
3312         bp->hwrm_cmd_kong_resp_addr =
3313                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3314                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3315                                    GFP_KERNEL);
3316         if (!bp->hwrm_cmd_kong_resp_addr)
3317                 return -ENOMEM;
3318
3319         return 0;
3320 }
3321
3322 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3323 {
3324         struct pci_dev *pdev = bp->pdev;
3325
3326         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3327                                                    &bp->hwrm_cmd_resp_dma_addr,
3328                                                    GFP_KERNEL);
3329         if (!bp->hwrm_cmd_resp_addr)
3330                 return -ENOMEM;
3331
3332         return 0;
3333 }
3334
3335 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3336 {
3337         if (bp->hwrm_short_cmd_req_addr) {
3338                 struct pci_dev *pdev = bp->pdev;
3339
3340                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3341                                   bp->hwrm_short_cmd_req_addr,
3342                                   bp->hwrm_short_cmd_req_dma_addr);
3343                 bp->hwrm_short_cmd_req_addr = NULL;
3344         }
3345 }
3346
3347 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3348 {
3349         struct pci_dev *pdev = bp->pdev;
3350
3351         bp->hwrm_short_cmd_req_addr =
3352                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3353                                    &bp->hwrm_short_cmd_req_dma_addr,
3354                                    GFP_KERNEL);
3355         if (!bp->hwrm_short_cmd_req_addr)
3356                 return -ENOMEM;
3357
3358         return 0;
3359 }
3360
3361 static void bnxt_free_port_stats(struct bnxt *bp)
3362 {
3363         struct pci_dev *pdev = bp->pdev;
3364
3365         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3366         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3367
3368         if (bp->hw_rx_port_stats) {
3369                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3370                                   bp->hw_rx_port_stats,
3371                                   bp->hw_rx_port_stats_map);
3372                 bp->hw_rx_port_stats = NULL;
3373         }
3374
3375         if (bp->hw_tx_port_stats_ext) {
3376                 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3377                                   bp->hw_tx_port_stats_ext,
3378                                   bp->hw_tx_port_stats_ext_map);
3379                 bp->hw_tx_port_stats_ext = NULL;
3380         }
3381
3382         if (bp->hw_rx_port_stats_ext) {
3383                 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3384                                   bp->hw_rx_port_stats_ext,
3385                                   bp->hw_rx_port_stats_ext_map);
3386                 bp->hw_rx_port_stats_ext = NULL;
3387         }
3388 }
3389
3390 static void bnxt_free_ring_stats(struct bnxt *bp)
3391 {
3392         struct pci_dev *pdev = bp->pdev;
3393         int size, i;
3394
3395         if (!bp->bnapi)
3396                 return;
3397
3398         size = sizeof(struct ctx_hw_stats);
3399
3400         for (i = 0; i < bp->cp_nr_rings; i++) {
3401                 struct bnxt_napi *bnapi = bp->bnapi[i];
3402                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3403
3404                 if (cpr->hw_stats) {
3405                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3406                                           cpr->hw_stats_map);
3407                         cpr->hw_stats = NULL;
3408                 }
3409         }
3410 }
3411
3412 static int bnxt_alloc_stats(struct bnxt *bp)
3413 {
3414         u32 size, i;
3415         struct pci_dev *pdev = bp->pdev;
3416
3417         size = sizeof(struct ctx_hw_stats);
3418
3419         for (i = 0; i < bp->cp_nr_rings; i++) {
3420                 struct bnxt_napi *bnapi = bp->bnapi[i];
3421                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3422
3423                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3424                                                    &cpr->hw_stats_map,
3425                                                    GFP_KERNEL);
3426                 if (!cpr->hw_stats)
3427                         return -ENOMEM;
3428
3429                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3430         }
3431
3432         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3433                 if (bp->hw_rx_port_stats)
3434                         goto alloc_ext_stats;
3435
3436                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3437                                          sizeof(struct tx_port_stats) + 1024;
3438
3439                 bp->hw_rx_port_stats =
3440                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3441                                            &bp->hw_rx_port_stats_map,
3442                                            GFP_KERNEL);
3443                 if (!bp->hw_rx_port_stats)
3444                         return -ENOMEM;
3445
3446                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3447                                        512;
3448                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3449                                            sizeof(struct rx_port_stats) + 512;
3450                 bp->flags |= BNXT_FLAG_PORT_STATS;
3451
3452 alloc_ext_stats:
3453                 /* Display extended statistics only if FW supports it */
3454                 if (bp->hwrm_spec_code < 0x10804 ||
3455                     bp->hwrm_spec_code == 0x10900)
3456                         return 0;
3457
3458                 if (bp->hw_rx_port_stats_ext)
3459                         goto alloc_tx_ext_stats;
3460
3461                 bp->hw_rx_port_stats_ext =
3462                         dma_alloc_coherent(&pdev->dev,
3463                                            sizeof(struct rx_port_stats_ext),
3464                                            &bp->hw_rx_port_stats_ext_map,
3465                                            GFP_KERNEL);
3466                 if (!bp->hw_rx_port_stats_ext)
3467                         return 0;
3468
3469 alloc_tx_ext_stats:
3470                 if (bp->hw_tx_port_stats_ext)
3471                         return 0;
3472
3473                 if (bp->hwrm_spec_code >= 0x10902) {
3474                         bp->hw_tx_port_stats_ext =
3475                                 dma_alloc_coherent(&pdev->dev,
3476                                                    sizeof(struct tx_port_stats_ext),
3477                                                    &bp->hw_tx_port_stats_ext_map,
3478                                                    GFP_KERNEL);
3479                 }
3480                 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3481         }
3482         return 0;
3483 }
3484
3485 static void bnxt_clear_ring_indices(struct bnxt *bp)
3486 {
3487         int i;
3488
3489         if (!bp->bnapi)
3490                 return;
3491
3492         for (i = 0; i < bp->cp_nr_rings; i++) {
3493                 struct bnxt_napi *bnapi = bp->bnapi[i];
3494                 struct bnxt_cp_ring_info *cpr;
3495                 struct bnxt_rx_ring_info *rxr;
3496                 struct bnxt_tx_ring_info *txr;
3497
3498                 if (!bnapi)
3499                         continue;
3500
3501                 cpr = &bnapi->cp_ring;
3502                 cpr->cp_raw_cons = 0;
3503
3504                 txr = bnapi->tx_ring;
3505                 if (txr) {
3506                         txr->tx_prod = 0;
3507                         txr->tx_cons = 0;
3508                 }
3509
3510                 rxr = bnapi->rx_ring;
3511                 if (rxr) {
3512                         rxr->rx_prod = 0;
3513                         rxr->rx_agg_prod = 0;
3514                         rxr->rx_sw_agg_prod = 0;
3515                         rxr->rx_next_cons = 0;
3516                 }
3517         }
3518 }
3519
3520 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3521 {
3522 #ifdef CONFIG_RFS_ACCEL
3523         int i;
3524
3525         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3526          * safe to delete the hash table.
3527          */
3528         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3529                 struct hlist_head *head;
3530                 struct hlist_node *tmp;
3531                 struct bnxt_ntuple_filter *fltr;
3532
3533                 head = &bp->ntp_fltr_hash_tbl[i];
3534                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3535                         hlist_del(&fltr->hash);
3536                         kfree(fltr);
3537                 }
3538         }
3539         if (irq_reinit) {
3540                 kfree(bp->ntp_fltr_bmap);
3541                 bp->ntp_fltr_bmap = NULL;
3542         }
3543         bp->ntp_fltr_count = 0;
3544 #endif
3545 }
3546
3547 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3548 {
3549 #ifdef CONFIG_RFS_ACCEL
3550         int i, rc = 0;
3551
3552         if (!(bp->flags & BNXT_FLAG_RFS))
3553                 return 0;
3554
3555         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3556                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3557
3558         bp->ntp_fltr_count = 0;
3559         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3560                                     sizeof(long),
3561                                     GFP_KERNEL);
3562
3563         if (!bp->ntp_fltr_bmap)
3564                 rc = -ENOMEM;
3565
3566         return rc;
3567 #else
3568         return 0;
3569 #endif
3570 }
3571
3572 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3573 {
3574         bnxt_free_vnic_attributes(bp);
3575         bnxt_free_tx_rings(bp);
3576         bnxt_free_rx_rings(bp);
3577         bnxt_free_cp_rings(bp);
3578         bnxt_free_ntp_fltrs(bp, irq_re_init);
3579         if (irq_re_init) {
3580                 bnxt_free_ring_stats(bp);
3581                 bnxt_free_ring_grps(bp);
3582                 bnxt_free_vnics(bp);
3583                 kfree(bp->tx_ring_map);
3584                 bp->tx_ring_map = NULL;
3585                 kfree(bp->tx_ring);
3586                 bp->tx_ring = NULL;
3587                 kfree(bp->rx_ring);
3588                 bp->rx_ring = NULL;
3589                 kfree(bp->bnapi);
3590                 bp->bnapi = NULL;
3591         } else {
3592                 bnxt_clear_ring_indices(bp);
3593         }
3594 }
3595
3596 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3597 {
3598         int i, j, rc, size, arr_size;
3599         void *bnapi;
3600
3601         if (irq_re_init) {
3602                 /* Allocate bnapi mem pointer array and mem block for
3603                  * all queues
3604                  */
3605                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3606                                 bp->cp_nr_rings);
3607                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3608                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3609                 if (!bnapi)
3610                         return -ENOMEM;
3611
3612                 bp->bnapi = bnapi;
3613                 bnapi += arr_size;
3614                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3615                         bp->bnapi[i] = bnapi;
3616                         bp->bnapi[i]->index = i;
3617                         bp->bnapi[i]->bp = bp;
3618                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3619                                 struct bnxt_cp_ring_info *cpr =
3620                                         &bp->bnapi[i]->cp_ring;
3621
3622                                 cpr->cp_ring_struct.ring_mem.flags =
3623                                         BNXT_RMEM_RING_PTE_FLAG;
3624                         }
3625                 }
3626
3627                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3628                                       sizeof(struct bnxt_rx_ring_info),
3629                                       GFP_KERNEL);
3630                 if (!bp->rx_ring)
3631                         return -ENOMEM;
3632
3633                 for (i = 0; i < bp->rx_nr_rings; i++) {
3634                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3635
3636                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3637                                 rxr->rx_ring_struct.ring_mem.flags =
3638                                         BNXT_RMEM_RING_PTE_FLAG;
3639                                 rxr->rx_agg_ring_struct.ring_mem.flags =
3640                                         BNXT_RMEM_RING_PTE_FLAG;
3641                         }
3642                         rxr->bnapi = bp->bnapi[i];
3643                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3644                 }
3645
3646                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3647                                       sizeof(struct bnxt_tx_ring_info),
3648                                       GFP_KERNEL);
3649                 if (!bp->tx_ring)
3650                         return -ENOMEM;
3651
3652                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3653                                           GFP_KERNEL);
3654
3655                 if (!bp->tx_ring_map)
3656                         return -ENOMEM;
3657
3658                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3659                         j = 0;
3660                 else
3661                         j = bp->rx_nr_rings;
3662
3663                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3664                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3665
3666                         if (bp->flags & BNXT_FLAG_CHIP_P5)
3667                                 txr->tx_ring_struct.ring_mem.flags =
3668                                         BNXT_RMEM_RING_PTE_FLAG;
3669                         txr->bnapi = bp->bnapi[j];
3670                         bp->bnapi[j]->tx_ring = txr;
3671                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3672                         if (i >= bp->tx_nr_rings_xdp) {
3673                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
3674                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3675                         } else {
3676                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3677                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3678                         }
3679                 }
3680
3681                 rc = bnxt_alloc_stats(bp);
3682                 if (rc)
3683                         goto alloc_mem_err;
3684
3685                 rc = bnxt_alloc_ntp_fltrs(bp);
3686                 if (rc)
3687                         goto alloc_mem_err;
3688
3689                 rc = bnxt_alloc_vnics(bp);
3690                 if (rc)
3691                         goto alloc_mem_err;
3692         }
3693
3694         bnxt_init_ring_struct(bp);
3695
3696         rc = bnxt_alloc_rx_rings(bp);
3697         if (rc)
3698                 goto alloc_mem_err;
3699
3700         rc = bnxt_alloc_tx_rings(bp);
3701         if (rc)
3702                 goto alloc_mem_err;
3703
3704         rc = bnxt_alloc_cp_rings(bp);
3705         if (rc)
3706                 goto alloc_mem_err;
3707
3708         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3709                                   BNXT_VNIC_UCAST_FLAG;
3710         rc = bnxt_alloc_vnic_attributes(bp);
3711         if (rc)
3712                 goto alloc_mem_err;
3713         return 0;
3714
3715 alloc_mem_err:
3716         bnxt_free_mem(bp, true);
3717         return rc;
3718 }
3719
3720 static void bnxt_disable_int(struct bnxt *bp)
3721 {
3722         int i;
3723
3724         if (!bp->bnapi)
3725                 return;
3726
3727         for (i = 0; i < bp->cp_nr_rings; i++) {
3728                 struct bnxt_napi *bnapi = bp->bnapi[i];
3729                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3730                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3731
3732                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3733                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
3734         }
3735 }
3736
3737 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3738 {
3739         struct bnxt_napi *bnapi = bp->bnapi[n];
3740         struct bnxt_cp_ring_info *cpr;
3741
3742         cpr = &bnapi->cp_ring;
3743         return cpr->cp_ring_struct.map_idx;
3744 }
3745
3746 static void bnxt_disable_int_sync(struct bnxt *bp)
3747 {
3748         int i;
3749
3750         atomic_inc(&bp->intr_sem);
3751
3752         bnxt_disable_int(bp);
3753         for (i = 0; i < bp->cp_nr_rings; i++) {
3754                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3755
3756                 synchronize_irq(bp->irq_tbl[map_idx].vector);
3757         }
3758 }
3759
3760 static void bnxt_enable_int(struct bnxt *bp)
3761 {
3762         int i;
3763
3764         atomic_set(&bp->intr_sem, 0);
3765         for (i = 0; i < bp->cp_nr_rings; i++) {
3766                 struct bnxt_napi *bnapi = bp->bnapi[i];
3767                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3768
3769                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
3770         }
3771 }
3772
3773 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3774                             u16 cmpl_ring, u16 target_id)
3775 {
3776         struct input *req = request;
3777
3778         req->req_type = cpu_to_le16(req_type);
3779         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3780         req->target_id = cpu_to_le16(target_id);
3781         if (bnxt_kong_hwrm_message(bp, req))
3782                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
3783         else
3784                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3785 }
3786
3787 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3788                                  int timeout, bool silent)
3789 {
3790         int i, intr_process, rc, tmo_count;
3791         struct input *req = msg;
3792         u32 *data = msg;
3793         __le32 *resp_len;
3794         u8 *valid;
3795         u16 cp_ring_id, len = 0;
3796         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3797         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3798         struct hwrm_short_input short_input = {0};
3799         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
3800         u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
3801         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
3802         u16 dst = BNXT_HWRM_CHNL_CHIMP;
3803
3804         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3805                 if (msg_len > bp->hwrm_max_ext_req_len ||
3806                     !bp->hwrm_short_cmd_req_addr)
3807                         return -EINVAL;
3808         }
3809
3810         if (bnxt_hwrm_kong_chnl(bp, req)) {
3811                 dst = BNXT_HWRM_CHNL_KONG;
3812                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
3813                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
3814                 resp = bp->hwrm_cmd_kong_resp_addr;
3815                 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
3816         }
3817
3818         memset(resp, 0, PAGE_SIZE);
3819         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3820         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3821
3822         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
3823         /* currently supports only one outstanding message */
3824         if (intr_process)
3825                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3826
3827         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3828             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3829                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3830                 u16 max_msg_len;
3831
3832                 /* Set boundary for maximum extended request length for short
3833                  * cmd format. If passed up from device use the max supported
3834                  * internal req length.
3835                  */
3836                 max_msg_len = bp->hwrm_max_ext_req_len;
3837
3838                 memcpy(short_cmd_req, req, msg_len);
3839                 if (msg_len < max_msg_len)
3840                         memset(short_cmd_req + msg_len, 0,
3841                                max_msg_len - msg_len);
3842
3843                 short_input.req_type = req->req_type;
3844                 short_input.signature =
3845                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3846                 short_input.size = cpu_to_le16(msg_len);
3847                 short_input.req_addr =
3848                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3849
3850                 data = (u32 *)&short_input;
3851                 msg_len = sizeof(short_input);
3852
3853                 /* Sync memory write before updating doorbell */
3854                 wmb();
3855
3856                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3857         }
3858
3859         /* Write request msg to hwrm channel */
3860         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
3861
3862         for (i = msg_len; i < max_req_len; i += 4)
3863                 writel(0, bp->bar0 + bar_offset + i);
3864
3865         /* Ring channel doorbell */
3866         writel(1, bp->bar0 + doorbell_offset);
3867
3868         if (!timeout)
3869                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3870         /* convert timeout to usec */
3871         timeout *= 1000;
3872
3873         i = 0;
3874         /* Short timeout for the first few iterations:
3875          * number of loops = number of loops for short timeout +
3876          * number of loops for standard timeout.
3877          */
3878         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3879         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3880         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
3881         resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
3882
3883         if (intr_process) {
3884                 u16 seq_id = bp->hwrm_intr_seq_id;
3885
3886                 /* Wait until hwrm response cmpl interrupt is processed */
3887                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
3888                        i++ < tmo_count) {
3889                         /* on first few passes, just barely sleep */
3890                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3891                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3892                                              HWRM_SHORT_MAX_TIMEOUT);
3893                         else
3894                                 usleep_range(HWRM_MIN_TIMEOUT,
3895                                              HWRM_MAX_TIMEOUT);
3896                 }
3897
3898                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
3899                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3900                                    le16_to_cpu(req->req_type));
3901                         return -1;
3902                 }
3903                 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3904                       HWRM_RESP_LEN_SFT;
3905                 valid = resp_addr + len - 1;
3906         } else {
3907                 int j;
3908
3909                 /* Check if response len is updated */
3910                 for (i = 0; i < tmo_count; i++) {
3911                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3912                               HWRM_RESP_LEN_SFT;
3913                         if (len)
3914                                 break;
3915                         /* on first few passes, just barely sleep */
3916                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3917                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3918                                              HWRM_SHORT_MAX_TIMEOUT);
3919                         else
3920                                 usleep_range(HWRM_MIN_TIMEOUT,
3921                                              HWRM_MAX_TIMEOUT);
3922                 }
3923
3924                 if (i >= tmo_count) {
3925                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3926                                    HWRM_TOTAL_TIMEOUT(i),
3927                                    le16_to_cpu(req->req_type),
3928                                    le16_to_cpu(req->seq_id), len);
3929                         return -1;
3930                 }
3931
3932                 /* Last byte of resp contains valid bit */
3933                 valid = resp_addr + len - 1;
3934                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
3935                         /* make sure we read from updated DMA memory */
3936                         dma_rmb();
3937                         if (*valid)
3938                                 break;
3939                         usleep_range(1, 5);
3940                 }
3941
3942                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
3943                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3944                                    HWRM_TOTAL_TIMEOUT(i),
3945                                    le16_to_cpu(req->req_type),
3946                                    le16_to_cpu(req->seq_id), len, *valid);
3947                         return -1;
3948                 }
3949         }
3950
3951         /* Zero valid bit for compatibility.  Valid bit in an older spec
3952          * may become a new field in a newer spec.  We must make sure that
3953          * a new field not implemented by old spec will read zero.
3954          */
3955         *valid = 0;
3956         rc = le16_to_cpu(resp->error_code);
3957         if (rc && !silent)
3958                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3959                            le16_to_cpu(resp->req_type),
3960                            le16_to_cpu(resp->seq_id), rc);
3961         return rc;
3962 }
3963
3964 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3965 {
3966         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3967 }
3968
3969 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3970                               int timeout)
3971 {
3972         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3973 }
3974
3975 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3976 {
3977         int rc;
3978
3979         mutex_lock(&bp->hwrm_cmd_lock);
3980         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3981         mutex_unlock(&bp->hwrm_cmd_lock);
3982         return rc;
3983 }
3984
3985 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3986                              int timeout)
3987 {
3988         int rc;
3989
3990         mutex_lock(&bp->hwrm_cmd_lock);
3991         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3992         mutex_unlock(&bp->hwrm_cmd_lock);
3993         return rc;
3994 }
3995
3996 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3997                                      int bmap_size)
3998 {
3999         struct hwrm_func_drv_rgtr_input req = {0};
4000         DECLARE_BITMAP(async_events_bmap, 256);
4001         u32 *events = (u32 *)async_events_bmap;
4002         int i;
4003
4004         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4005
4006         req.enables =
4007                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4008
4009         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4010         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
4011                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4012
4013         if (bmap && bmap_size) {
4014                 for (i = 0; i < bmap_size; i++) {
4015                         if (test_bit(i, bmap))
4016                                 __set_bit(i, async_events_bmap);
4017                 }
4018         }
4019
4020         for (i = 0; i < 8; i++)
4021                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4022
4023         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4024 }
4025
4026 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4027 {
4028         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4029         struct hwrm_func_drv_rgtr_input req = {0};
4030         int rc;
4031
4032         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4033
4034         req.enables =
4035                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4036                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
4037
4038         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4039         req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
4040         req.ver_maj_8b = DRV_VER_MAJ;
4041         req.ver_min_8b = DRV_VER_MIN;
4042         req.ver_upd_8b = DRV_VER_UPD;
4043         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4044         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4045         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4046
4047         if (BNXT_PF(bp)) {
4048                 u32 data[8];
4049                 int i;
4050
4051                 memset(data, 0, sizeof(data));
4052                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4053                         u16 cmd = bnxt_vf_req_snif[i];
4054                         unsigned int bit, idx;
4055
4056                         idx = cmd / 32;
4057                         bit = cmd % 32;
4058                         data[idx] |= 1 << bit;
4059                 }
4060
4061                 for (i = 0; i < 8; i++)
4062                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4063
4064                 req.enables |=
4065                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4066         }
4067
4068         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4069                 req.flags |= cpu_to_le32(
4070                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4071
4072         mutex_lock(&bp->hwrm_cmd_lock);
4073         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4074         if (rc)
4075                 rc = -EIO;
4076         else if (resp->flags &
4077                  cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4078                 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4079         mutex_unlock(&bp->hwrm_cmd_lock);
4080         return rc;
4081 }
4082
4083 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4084 {
4085         struct hwrm_func_drv_unrgtr_input req = {0};
4086
4087         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4088         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4089 }
4090
4091 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4092 {
4093         u32 rc = 0;
4094         struct hwrm_tunnel_dst_port_free_input req = {0};
4095
4096         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4097         req.tunnel_type = tunnel_type;
4098
4099         switch (tunnel_type) {
4100         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4101                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4102                 break;
4103         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4104                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4105                 break;
4106         default:
4107                 break;
4108         }
4109
4110         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4111         if (rc)
4112                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4113                            rc);
4114         return rc;
4115 }
4116
4117 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4118                                            u8 tunnel_type)
4119 {
4120         u32 rc = 0;
4121         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4122         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4123
4124         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4125
4126         req.tunnel_type = tunnel_type;
4127         req.tunnel_dst_port_val = port;
4128
4129         mutex_lock(&bp->hwrm_cmd_lock);
4130         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4131         if (rc) {
4132                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4133                            rc);
4134                 goto err_out;
4135         }
4136
4137         switch (tunnel_type) {
4138         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4139                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4140                 break;
4141         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4142                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4143                 break;
4144         default:
4145                 break;
4146         }
4147
4148 err_out:
4149         mutex_unlock(&bp->hwrm_cmd_lock);
4150         return rc;
4151 }
4152
4153 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4154 {
4155         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4156         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4157
4158         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4159         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4160
4161         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4162         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4163         req.mask = cpu_to_le32(vnic->rx_mask);
4164         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4165 }
4166
4167 #ifdef CONFIG_RFS_ACCEL
4168 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4169                                             struct bnxt_ntuple_filter *fltr)
4170 {
4171         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4172
4173         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4174         req.ntuple_filter_id = fltr->filter_id;
4175         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4176 }
4177
4178 #define BNXT_NTP_FLTR_FLAGS                                     \
4179         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4180          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4181          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4182          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4183          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4184          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4185          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4186          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4187          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4188          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4189          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4190          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4191          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4192          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4193
4194 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4195                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4196
4197 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4198                                              struct bnxt_ntuple_filter *fltr)
4199 {
4200         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4201         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4202         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4203         struct flow_keys *keys = &fltr->fkeys;
4204         int rc = 0;
4205
4206         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4207         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4208
4209         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4210
4211         req.ethertype = htons(ETH_P_IP);
4212         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4213         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4214         req.ip_protocol = keys->basic.ip_proto;
4215
4216         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4217                 int i;
4218
4219                 req.ethertype = htons(ETH_P_IPV6);
4220                 req.ip_addr_type =
4221                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4222                 *(struct in6_addr *)&req.src_ipaddr[0] =
4223                         keys->addrs.v6addrs.src;
4224                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4225                         keys->addrs.v6addrs.dst;
4226                 for (i = 0; i < 4; i++) {
4227                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4228                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4229                 }
4230         } else {
4231                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4232                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4233                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4234                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4235         }
4236         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4237                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4238                 req.tunnel_type =
4239                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4240         }
4241
4242         req.src_port = keys->ports.src;
4243         req.src_port_mask = cpu_to_be16(0xffff);
4244         req.dst_port = keys->ports.dst;
4245         req.dst_port_mask = cpu_to_be16(0xffff);
4246
4247         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4248         mutex_lock(&bp->hwrm_cmd_lock);
4249         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4250         if (!rc) {
4251                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4252                 fltr->filter_id = resp->ntuple_filter_id;
4253         }
4254         mutex_unlock(&bp->hwrm_cmd_lock);
4255         return rc;
4256 }
4257 #endif
4258
4259 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4260                                      u8 *mac_addr)
4261 {
4262         u32 rc = 0;
4263         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4264         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4265
4266         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4267         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4268         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4269                 req.flags |=
4270                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4271         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4272         req.enables =
4273                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4274                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4275                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4276         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4277         req.l2_addr_mask[0] = 0xff;
4278         req.l2_addr_mask[1] = 0xff;
4279         req.l2_addr_mask[2] = 0xff;
4280         req.l2_addr_mask[3] = 0xff;
4281         req.l2_addr_mask[4] = 0xff;
4282         req.l2_addr_mask[5] = 0xff;
4283
4284         mutex_lock(&bp->hwrm_cmd_lock);
4285         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4286         if (!rc)
4287                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4288                                                         resp->l2_filter_id;
4289         mutex_unlock(&bp->hwrm_cmd_lock);
4290         return rc;
4291 }
4292
4293 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4294 {
4295         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4296         int rc = 0;
4297
4298         /* Any associated ntuple filters will also be cleared by firmware. */
4299         mutex_lock(&bp->hwrm_cmd_lock);
4300         for (i = 0; i < num_of_vnics; i++) {
4301                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4302
4303                 for (j = 0; j < vnic->uc_filter_count; j++) {
4304                         struct hwrm_cfa_l2_filter_free_input req = {0};
4305
4306                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4307                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4308
4309                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4310
4311                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4312                                                 HWRM_CMD_TIMEOUT);
4313                 }
4314                 vnic->uc_filter_count = 0;
4315         }
4316         mutex_unlock(&bp->hwrm_cmd_lock);
4317
4318         return rc;
4319 }
4320
4321 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4322 {
4323         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4324         struct hwrm_vnic_tpa_cfg_input req = {0};
4325
4326         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4327                 return 0;
4328
4329         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4330
4331         if (tpa_flags) {
4332                 u16 mss = bp->dev->mtu - 40;
4333                 u32 nsegs, n, segs = 0, flags;
4334
4335                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4336                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4337                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4338                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4339                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4340                 if (tpa_flags & BNXT_FLAG_GRO)
4341                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4342
4343                 req.flags = cpu_to_le32(flags);
4344
4345                 req.enables =
4346                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4347                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4348                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4349
4350                 /* Number of segs are log2 units, and first packet is not
4351                  * included as part of this units.
4352                  */
4353                 if (mss <= BNXT_RX_PAGE_SIZE) {
4354                         n = BNXT_RX_PAGE_SIZE / mss;
4355                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4356                 } else {
4357                         n = mss / BNXT_RX_PAGE_SIZE;
4358                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4359                                 n++;
4360                         nsegs = (MAX_SKB_FRAGS - n) / n;
4361                 }
4362
4363                 segs = ilog2(nsegs);
4364                 req.max_agg_segs = cpu_to_le16(segs);
4365                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
4366
4367                 req.min_agg_len = cpu_to_le32(512);
4368         }
4369         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4370
4371         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4372 }
4373
4374 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4375 {
4376         struct bnxt_ring_grp_info *grp_info;
4377
4378         grp_info = &bp->grp_info[ring->grp_idx];
4379         return grp_info->cp_fw_ring_id;
4380 }
4381
4382 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4383 {
4384         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4385                 struct bnxt_napi *bnapi = rxr->bnapi;
4386                 struct bnxt_cp_ring_info *cpr;
4387
4388                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4389                 return cpr->cp_ring_struct.fw_ring_id;
4390         } else {
4391                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4392         }
4393 }
4394
4395 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4396 {
4397         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4398                 struct bnxt_napi *bnapi = txr->bnapi;
4399                 struct bnxt_cp_ring_info *cpr;
4400
4401                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4402                 return cpr->cp_ring_struct.fw_ring_id;
4403         } else {
4404                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4405         }
4406 }
4407
4408 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4409 {
4410         u32 i, j, max_rings;
4411         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4412         struct hwrm_vnic_rss_cfg_input req = {0};
4413
4414         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4415             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4416                 return 0;
4417
4418         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4419         if (set_rss) {
4420                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4421                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4422                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4423                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4424                                 max_rings = bp->rx_nr_rings - 1;
4425                         else
4426                                 max_rings = bp->rx_nr_rings;
4427                 } else {
4428                         max_rings = 1;
4429                 }
4430
4431                 /* Fill the RSS indirection table with ring group ids */
4432                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4433                         if (j == max_rings)
4434                                 j = 0;
4435                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4436                 }
4437
4438                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4439                 req.hash_key_tbl_addr =
4440                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
4441         }
4442         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4443         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4444 }
4445
4446 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4447 {
4448         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4449         u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4450         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4451         struct hwrm_vnic_rss_cfg_input req = {0};
4452
4453         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4454         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4455         if (!set_rss) {
4456                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4457                 return 0;
4458         }
4459         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4460         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4461         req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4462         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4463         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4464         for (i = 0, k = 0; i < nr_ctxs; i++) {
4465                 __le16 *ring_tbl = vnic->rss_table;
4466                 int rc;
4467
4468                 req.ring_table_pair_index = i;
4469                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4470                 for (j = 0; j < 64; j++) {
4471                         u16 ring_id;
4472
4473                         ring_id = rxr->rx_ring_struct.fw_ring_id;
4474                         *ring_tbl++ = cpu_to_le16(ring_id);
4475                         ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4476                         *ring_tbl++ = cpu_to_le16(ring_id);
4477                         rxr++;
4478                         k++;
4479                         if (k == max_rings) {
4480                                 k = 0;
4481                                 rxr = &bp->rx_ring[0];
4482                         }
4483                 }
4484                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4485                 if (rc)
4486                         return -EIO;
4487         }
4488         return 0;
4489 }
4490
4491 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4492 {
4493         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4494         struct hwrm_vnic_plcmodes_cfg_input req = {0};
4495
4496         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4497         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4498                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4499                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4500         req.enables =
4501                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4502                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4503         /* thresholds not implemented in firmware yet */
4504         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4505         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4506         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4507         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4508 }
4509
4510 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4511                                         u16 ctx_idx)
4512 {
4513         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4514
4515         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4516         req.rss_cos_lb_ctx_id =
4517                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4518
4519         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4520         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4521 }
4522
4523 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4524 {
4525         int i, j;
4526
4527         for (i = 0; i < bp->nr_vnics; i++) {
4528                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4529
4530                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4531                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4532                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4533                 }
4534         }
4535         bp->rsscos_nr_ctxs = 0;
4536 }
4537
4538 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4539 {
4540         int rc;
4541         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4542         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4543                                                 bp->hwrm_cmd_resp_addr;
4544
4545         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4546                                -1);
4547
4548         mutex_lock(&bp->hwrm_cmd_lock);
4549         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4550         if (!rc)
4551                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4552                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
4553         mutex_unlock(&bp->hwrm_cmd_lock);
4554
4555         return rc;
4556 }
4557
4558 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4559 {
4560         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4561                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4562         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4563 }
4564
4565 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4566 {
4567         unsigned int ring = 0, grp_idx;
4568         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4569         struct hwrm_vnic_cfg_input req = {0};
4570         u16 def_vlan = 0;
4571
4572         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4573
4574         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4575                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4576
4577                 req.default_rx_ring_id =
4578                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4579                 req.default_cmpl_ring_id =
4580                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4581                 req.enables =
4582                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4583                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4584                 goto vnic_mru;
4585         }
4586         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4587         /* Only RSS support for now TBD: COS & LB */
4588         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4589                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4590                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4591                                            VNIC_CFG_REQ_ENABLES_MRU);
4592         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4593                 req.rss_rule =
4594                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4595                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4596                                            VNIC_CFG_REQ_ENABLES_MRU);
4597                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4598         } else {
4599                 req.rss_rule = cpu_to_le16(0xffff);
4600         }
4601
4602         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4603             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4604                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4605                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4606         } else {
4607                 req.cos_rule = cpu_to_le16(0xffff);
4608         }
4609
4610         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4611                 ring = 0;
4612         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4613                 ring = vnic_id - 1;
4614         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4615                 ring = bp->rx_nr_rings - 1;
4616
4617         grp_idx = bp->rx_ring[ring].bnapi->index;
4618         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4619         req.lb_rule = cpu_to_le16(0xffff);
4620 vnic_mru:
4621         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4622                               VLAN_HLEN);
4623
4624         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4625 #ifdef CONFIG_BNXT_SRIOV
4626         if (BNXT_VF(bp))
4627                 def_vlan = bp->vf.vlan;
4628 #endif
4629         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4630                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4631         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4632                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
4633
4634         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4635 }
4636
4637 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4638 {
4639         u32 rc = 0;
4640
4641         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4642                 struct hwrm_vnic_free_input req = {0};
4643
4644                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4645                 req.vnic_id =
4646                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4647
4648                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4649                 if (rc)
4650                         return rc;
4651                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4652         }
4653         return rc;
4654 }
4655
4656 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4657 {
4658         u16 i;
4659
4660         for (i = 0; i < bp->nr_vnics; i++)
4661                 bnxt_hwrm_vnic_free_one(bp, i);
4662 }
4663
4664 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4665                                 unsigned int start_rx_ring_idx,
4666                                 unsigned int nr_rings)
4667 {
4668         int rc = 0;
4669         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4670         struct hwrm_vnic_alloc_input req = {0};
4671         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4672         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4673
4674         if (bp->flags & BNXT_FLAG_CHIP_P5)
4675                 goto vnic_no_ring_grps;
4676
4677         /* map ring groups to this vnic */
4678         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4679                 grp_idx = bp->rx_ring[i].bnapi->index;
4680                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4681                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4682                                    j, nr_rings);
4683                         break;
4684                 }
4685                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
4686         }
4687
4688 vnic_no_ring_grps:
4689         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4690                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
4691         if (vnic_id == 0)
4692                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4693
4694         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4695
4696         mutex_lock(&bp->hwrm_cmd_lock);
4697         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4698         if (!rc)
4699                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
4700         mutex_unlock(&bp->hwrm_cmd_lock);
4701         return rc;
4702 }
4703
4704 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4705 {
4706         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4707         struct hwrm_vnic_qcaps_input req = {0};
4708         int rc;
4709
4710         if (bp->hwrm_spec_code < 0x10600)
4711                 return 0;
4712
4713         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4714         mutex_lock(&bp->hwrm_cmd_lock);
4715         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4716         if (!rc) {
4717                 u32 flags = le32_to_cpu(resp->flags);
4718
4719                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4720                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4721                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4722                 if (flags &
4723                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4724                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
4725         }
4726         mutex_unlock(&bp->hwrm_cmd_lock);
4727         return rc;
4728 }
4729
4730 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4731 {
4732         u16 i;
4733         u32 rc = 0;
4734
4735         if (bp->flags & BNXT_FLAG_CHIP_P5)
4736                 return 0;
4737
4738         mutex_lock(&bp->hwrm_cmd_lock);
4739         for (i = 0; i < bp->rx_nr_rings; i++) {
4740                 struct hwrm_ring_grp_alloc_input req = {0};
4741                 struct hwrm_ring_grp_alloc_output *resp =
4742                                         bp->hwrm_cmd_resp_addr;
4743                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4744
4745                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4746
4747                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4748                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4749                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4750                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4751
4752                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4753                                         HWRM_CMD_TIMEOUT);
4754                 if (rc)
4755                         break;
4756
4757                 bp->grp_info[grp_idx].fw_grp_id =
4758                         le32_to_cpu(resp->ring_group_id);
4759         }
4760         mutex_unlock(&bp->hwrm_cmd_lock);
4761         return rc;
4762 }
4763
4764 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4765 {
4766         u16 i;
4767         u32 rc = 0;
4768         struct hwrm_ring_grp_free_input req = {0};
4769
4770         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
4771                 return 0;
4772
4773         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4774
4775         mutex_lock(&bp->hwrm_cmd_lock);
4776         for (i = 0; i < bp->cp_nr_rings; i++) {
4777                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4778                         continue;
4779                 req.ring_group_id =
4780                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
4781
4782                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4783                                         HWRM_CMD_TIMEOUT);
4784                 if (rc)
4785                         break;
4786                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4787         }
4788         mutex_unlock(&bp->hwrm_cmd_lock);
4789         return rc;
4790 }
4791
4792 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4793                                     struct bnxt_ring_struct *ring,
4794                                     u32 ring_type, u32 map_index)
4795 {
4796         int rc = 0, err = 0;
4797         struct hwrm_ring_alloc_input req = {0};
4798         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4799         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
4800         struct bnxt_ring_grp_info *grp_info;
4801         u16 ring_id;
4802
4803         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4804
4805         req.enables = 0;
4806         if (rmem->nr_pages > 1) {
4807                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
4808                 /* Page size is in log2 units */
4809                 req.page_size = BNXT_PAGE_SHIFT;
4810                 req.page_tbl_depth = 1;
4811         } else {
4812                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
4813         }
4814         req.fbo = 0;
4815         /* Association of ring index with doorbell index and MSIX number */
4816         req.logical_id = cpu_to_le16(map_index);
4817
4818         switch (ring_type) {
4819         case HWRM_RING_ALLOC_TX: {
4820                 struct bnxt_tx_ring_info *txr;
4821
4822                 txr = container_of(ring, struct bnxt_tx_ring_info,
4823                                    tx_ring_struct);
4824                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4825                 /* Association of transmit ring with completion ring */
4826                 grp_info = &bp->grp_info[ring->grp_idx];
4827                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
4828                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4829                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4830                 req.queue_id = cpu_to_le16(ring->queue_id);
4831                 break;
4832         }
4833         case HWRM_RING_ALLOC_RX:
4834                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4835                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4836                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4837                         u16 flags = 0;
4838
4839                         /* Association of rx ring with stats context */
4840                         grp_info = &bp->grp_info[ring->grp_idx];
4841                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4842                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4843                         req.enables |= cpu_to_le32(
4844                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4845                         if (NET_IP_ALIGN == 2)
4846                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4847                         req.flags = cpu_to_le16(flags);
4848                 }
4849                 break;
4850         case HWRM_RING_ALLOC_AGG:
4851                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4852                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4853                         /* Association of agg ring with rx ring */
4854                         grp_info = &bp->grp_info[ring->grp_idx];
4855                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4856                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4857                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4858                         req.enables |= cpu_to_le32(
4859                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4860                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4861                 } else {
4862                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4863                 }
4864                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4865                 break;
4866         case HWRM_RING_ALLOC_CMPL:
4867                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4868                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4869                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4870                         /* Association of cp ring with nq */
4871                         grp_info = &bp->grp_info[map_index];
4872                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4873                         req.cq_handle = cpu_to_le64(ring->handle);
4874                         req.enables |= cpu_to_le32(
4875                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4876                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4877                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4878                 }
4879                 break;
4880         case HWRM_RING_ALLOC_NQ:
4881                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4882                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4883                 if (bp->flags & BNXT_FLAG_USING_MSIX)
4884                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4885                 break;
4886         default:
4887                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4888                            ring_type);
4889                 return -1;
4890         }
4891
4892         mutex_lock(&bp->hwrm_cmd_lock);
4893         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4894         err = le16_to_cpu(resp->error_code);
4895         ring_id = le16_to_cpu(resp->ring_id);
4896         mutex_unlock(&bp->hwrm_cmd_lock);
4897
4898         if (rc || err) {
4899                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4900                            ring_type, rc, err);
4901                 return -EIO;
4902         }
4903         ring->fw_ring_id = ring_id;
4904         return rc;
4905 }
4906
4907 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4908 {
4909         int rc;
4910
4911         if (BNXT_PF(bp)) {
4912                 struct hwrm_func_cfg_input req = {0};
4913
4914                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4915                 req.fid = cpu_to_le16(0xffff);
4916                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4917                 req.async_event_cr = cpu_to_le16(idx);
4918                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4919         } else {
4920                 struct hwrm_func_vf_cfg_input req = {0};
4921
4922                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4923                 req.enables =
4924                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4925                 req.async_event_cr = cpu_to_le16(idx);
4926                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4927         }
4928         return rc;
4929 }
4930
4931 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4932                         u32 map_idx, u32 xid)
4933 {
4934         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4935                 if (BNXT_PF(bp))
4936                         db->doorbell = bp->bar1 + 0x10000;
4937                 else
4938                         db->doorbell = bp->bar1 + 0x4000;
4939                 switch (ring_type) {
4940                 case HWRM_RING_ALLOC_TX:
4941                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4942                         break;
4943                 case HWRM_RING_ALLOC_RX:
4944                 case HWRM_RING_ALLOC_AGG:
4945                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4946                         break;
4947                 case HWRM_RING_ALLOC_CMPL:
4948                         db->db_key64 = DBR_PATH_L2;
4949                         break;
4950                 case HWRM_RING_ALLOC_NQ:
4951                         db->db_key64 = DBR_PATH_L2;
4952                         break;
4953                 }
4954                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4955         } else {
4956                 db->doorbell = bp->bar1 + map_idx * 0x80;
4957                 switch (ring_type) {
4958                 case HWRM_RING_ALLOC_TX:
4959                         db->db_key32 = DB_KEY_TX;
4960                         break;
4961                 case HWRM_RING_ALLOC_RX:
4962                 case HWRM_RING_ALLOC_AGG:
4963                         db->db_key32 = DB_KEY_RX;
4964                         break;
4965                 case HWRM_RING_ALLOC_CMPL:
4966                         db->db_key32 = DB_KEY_CP;
4967                         break;
4968                 }
4969         }
4970 }
4971
4972 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4973 {
4974         int i, rc = 0;
4975         u32 type;
4976
4977         if (bp->flags & BNXT_FLAG_CHIP_P5)
4978                 type = HWRM_RING_ALLOC_NQ;
4979         else
4980                 type = HWRM_RING_ALLOC_CMPL;
4981         for (i = 0; i < bp->cp_nr_rings; i++) {
4982                 struct bnxt_napi *bnapi = bp->bnapi[i];
4983                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4984                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4985                 u32 map_idx = ring->map_idx;
4986                 unsigned int vector;
4987
4988                 vector = bp->irq_tbl[map_idx].vector;
4989                 disable_irq_nosync(vector);
4990                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
4991                 if (rc) {
4992                         enable_irq(vector);
4993                         goto err_out;
4994                 }
4995                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4996                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4997                 enable_irq(vector);
4998                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4999
5000                 if (!i) {
5001                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5002                         if (rc)
5003                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5004                 }
5005         }
5006
5007         type = HWRM_RING_ALLOC_TX;
5008         for (i = 0; i < bp->tx_nr_rings; i++) {
5009                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5010                 struct bnxt_ring_struct *ring;
5011                 u32 map_idx;
5012
5013                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5014                         struct bnxt_napi *bnapi = txr->bnapi;
5015                         struct bnxt_cp_ring_info *cpr, *cpr2;
5016                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5017
5018                         cpr = &bnapi->cp_ring;
5019                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5020                         ring = &cpr2->cp_ring_struct;
5021                         ring->handle = BNXT_TX_HDL;
5022                         map_idx = bnapi->index;
5023                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5024                         if (rc)
5025                                 goto err_out;
5026                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5027                                     ring->fw_ring_id);
5028                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5029                 }
5030                 ring = &txr->tx_ring_struct;
5031                 map_idx = i;
5032                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5033                 if (rc)
5034                         goto err_out;
5035                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5036         }
5037
5038         type = HWRM_RING_ALLOC_RX;
5039         for (i = 0; i < bp->rx_nr_rings; i++) {
5040                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5041                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5042                 struct bnxt_napi *bnapi = rxr->bnapi;
5043                 u32 map_idx = bnapi->index;
5044
5045                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5046                 if (rc)
5047                         goto err_out;
5048                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5049                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5050                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5051                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5052                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5053                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5054                         struct bnxt_cp_ring_info *cpr2;
5055
5056                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5057                         ring = &cpr2->cp_ring_struct;
5058                         ring->handle = BNXT_RX_HDL;
5059                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5060                         if (rc)
5061                                 goto err_out;
5062                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5063                                     ring->fw_ring_id);
5064                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5065                 }
5066         }
5067
5068         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5069                 type = HWRM_RING_ALLOC_AGG;
5070                 for (i = 0; i < bp->rx_nr_rings; i++) {
5071                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5072                         struct bnxt_ring_struct *ring =
5073                                                 &rxr->rx_agg_ring_struct;
5074                         u32 grp_idx = ring->grp_idx;
5075                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5076
5077                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5078                         if (rc)
5079                                 goto err_out;
5080
5081                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5082                                     ring->fw_ring_id);
5083                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5084                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5085                 }
5086         }
5087 err_out:
5088         return rc;
5089 }
5090
5091 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5092                                    struct bnxt_ring_struct *ring,
5093                                    u32 ring_type, int cmpl_ring_id)
5094 {
5095         int rc;
5096         struct hwrm_ring_free_input req = {0};
5097         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5098         u16 error_code;
5099
5100         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5101         req.ring_type = ring_type;
5102         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5103
5104         mutex_lock(&bp->hwrm_cmd_lock);
5105         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5106         error_code = le16_to_cpu(resp->error_code);
5107         mutex_unlock(&bp->hwrm_cmd_lock);
5108
5109         if (rc || error_code) {
5110                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5111                            ring_type, rc, error_code);
5112                 return -EIO;
5113         }
5114         return 0;
5115 }
5116
5117 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5118 {
5119         u32 type;
5120         int i;
5121
5122         if (!bp->bnapi)
5123                 return;
5124
5125         for (i = 0; i < bp->tx_nr_rings; i++) {
5126                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5127                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5128                 u32 cmpl_ring_id;
5129
5130                 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5131                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5132                         hwrm_ring_free_send_msg(bp, ring,
5133                                                 RING_FREE_REQ_RING_TYPE_TX,
5134                                                 close_path ? cmpl_ring_id :
5135                                                 INVALID_HW_RING_ID);
5136                         ring->fw_ring_id = INVALID_HW_RING_ID;
5137                 }
5138         }
5139
5140         for (i = 0; i < bp->rx_nr_rings; i++) {
5141                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5142                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5143                 u32 grp_idx = rxr->bnapi->index;
5144                 u32 cmpl_ring_id;
5145
5146                 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5147                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5148                         hwrm_ring_free_send_msg(bp, ring,
5149                                                 RING_FREE_REQ_RING_TYPE_RX,
5150                                                 close_path ? cmpl_ring_id :
5151                                                 INVALID_HW_RING_ID);
5152                         ring->fw_ring_id = INVALID_HW_RING_ID;
5153                         bp->grp_info[grp_idx].rx_fw_ring_id =
5154                                 INVALID_HW_RING_ID;
5155                 }
5156         }
5157
5158         if (bp->flags & BNXT_FLAG_CHIP_P5)
5159                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5160         else
5161                 type = RING_FREE_REQ_RING_TYPE_RX;
5162         for (i = 0; i < bp->rx_nr_rings; i++) {
5163                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5164                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5165                 u32 grp_idx = rxr->bnapi->index;
5166                 u32 cmpl_ring_id;
5167
5168                 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5169                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5170                         hwrm_ring_free_send_msg(bp, ring, type,
5171                                                 close_path ? cmpl_ring_id :
5172                                                 INVALID_HW_RING_ID);
5173                         ring->fw_ring_id = INVALID_HW_RING_ID;
5174                         bp->grp_info[grp_idx].agg_fw_ring_id =
5175                                 INVALID_HW_RING_ID;
5176                 }
5177         }
5178
5179         /* The completion rings are about to be freed.  After that the
5180          * IRQ doorbell will not work anymore.  So we need to disable
5181          * IRQ here.
5182          */
5183         bnxt_disable_int_sync(bp);
5184
5185         if (bp->flags & BNXT_FLAG_CHIP_P5)
5186                 type = RING_FREE_REQ_RING_TYPE_NQ;
5187         else
5188                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5189         for (i = 0; i < bp->cp_nr_rings; i++) {
5190                 struct bnxt_napi *bnapi = bp->bnapi[i];
5191                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5192                 struct bnxt_ring_struct *ring;
5193                 int j;
5194
5195                 for (j = 0; j < 2; j++) {
5196                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5197
5198                         if (cpr2) {
5199                                 ring = &cpr2->cp_ring_struct;
5200                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5201                                         continue;
5202                                 hwrm_ring_free_send_msg(bp, ring,
5203                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5204                                         INVALID_HW_RING_ID);
5205                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5206                         }
5207                 }
5208                 ring = &cpr->cp_ring_struct;
5209                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5210                         hwrm_ring_free_send_msg(bp, ring, type,
5211                                                 INVALID_HW_RING_ID);
5212                         ring->fw_ring_id = INVALID_HW_RING_ID;
5213                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5214                 }
5215         }
5216 }
5217
5218 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5219                            bool shared);
5220
5221 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5222 {
5223         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5224         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5225         struct hwrm_func_qcfg_input req = {0};
5226         int rc;
5227
5228         if (bp->hwrm_spec_code < 0x10601)
5229                 return 0;
5230
5231         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5232         req.fid = cpu_to_le16(0xffff);
5233         mutex_lock(&bp->hwrm_cmd_lock);
5234         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5235         if (rc) {
5236                 mutex_unlock(&bp->hwrm_cmd_lock);
5237                 return -EIO;
5238         }
5239
5240         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5241         if (BNXT_NEW_RM(bp)) {
5242                 u16 cp, stats;
5243
5244                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5245                 hw_resc->resv_hw_ring_grps =
5246                         le32_to_cpu(resp->alloc_hw_ring_grps);
5247                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5248                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5249                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5250                 hw_resc->resv_irqs = cp;
5251                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5252                         int rx = hw_resc->resv_rx_rings;
5253                         int tx = hw_resc->resv_tx_rings;
5254
5255                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5256                                 rx >>= 1;
5257                         if (cp < (rx + tx)) {
5258                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5259                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5260                                         rx <<= 1;
5261                                 hw_resc->resv_rx_rings = rx;
5262                                 hw_resc->resv_tx_rings = tx;
5263                         }
5264                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5265                         hw_resc->resv_hw_ring_grps = rx;
5266                 }
5267                 hw_resc->resv_cp_rings = cp;
5268                 hw_resc->resv_stat_ctxs = stats;
5269         }
5270         mutex_unlock(&bp->hwrm_cmd_lock);
5271         return 0;
5272 }
5273
5274 /* Caller must hold bp->hwrm_cmd_lock */
5275 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5276 {
5277         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5278         struct hwrm_func_qcfg_input req = {0};
5279         int rc;
5280
5281         if (bp->hwrm_spec_code < 0x10601)
5282                 return 0;
5283
5284         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5285         req.fid = cpu_to_le16(fid);
5286         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5287         if (!rc)
5288                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5289
5290         return rc;
5291 }
5292
5293 static bool bnxt_rfs_supported(struct bnxt *bp);
5294
5295 static void
5296 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5297                              int tx_rings, int rx_rings, int ring_grps,
5298                              int cp_rings, int stats, int vnics)
5299 {
5300         u32 enables = 0;
5301
5302         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5303         req->fid = cpu_to_le16(0xffff);
5304         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5305         req->num_tx_rings = cpu_to_le16(tx_rings);
5306         if (BNXT_NEW_RM(bp)) {
5307                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5308                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5309                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5310                         enables |= tx_rings + ring_grps ?
5311                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5312                                    FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5313                         enables |= rx_rings ?
5314                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5315                 } else {
5316                         enables |= cp_rings ?
5317                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5318                                    FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5319                         enables |= ring_grps ?
5320                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5321                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5322                 }
5323                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5324
5325                 req->num_rx_rings = cpu_to_le16(rx_rings);
5326                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5327                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5328                         req->num_msix = cpu_to_le16(cp_rings);
5329                         req->num_rsscos_ctxs =
5330                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5331                 } else {
5332                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5333                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5334                         req->num_rsscos_ctxs = cpu_to_le16(1);
5335                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5336                             bnxt_rfs_supported(bp))
5337                                 req->num_rsscos_ctxs =
5338                                         cpu_to_le16(ring_grps + 1);
5339                 }
5340                 req->num_stat_ctxs = cpu_to_le16(stats);
5341                 req->num_vnics = cpu_to_le16(vnics);
5342         }
5343         req->enables = cpu_to_le32(enables);
5344 }
5345
5346 static void
5347 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5348                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5349                              int rx_rings, int ring_grps, int cp_rings,
5350                              int stats, int vnics)
5351 {
5352         u32 enables = 0;
5353
5354         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5355         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5356         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5357                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5358         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5359                 enables |= tx_rings + ring_grps ?
5360                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5361                            FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5362         } else {
5363                 enables |= cp_rings ?
5364                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5365                            FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5366                 enables |= ring_grps ?
5367                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5368         }
5369         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5370         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5371
5372         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5373         req->num_tx_rings = cpu_to_le16(tx_rings);
5374         req->num_rx_rings = cpu_to_le16(rx_rings);
5375         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5376                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5377                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5378         } else {
5379                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5380                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5381                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5382         }
5383         req->num_stat_ctxs = cpu_to_le16(stats);
5384         req->num_vnics = cpu_to_le16(vnics);
5385
5386         req->enables = cpu_to_le32(enables);
5387 }
5388
5389 static int
5390 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5391                            int ring_grps, int cp_rings, int stats, int vnics)
5392 {
5393         struct hwrm_func_cfg_input req = {0};
5394         int rc;
5395
5396         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5397                                      cp_rings, stats, vnics);
5398         if (!req.enables)
5399                 return 0;
5400
5401         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5402         if (rc)
5403                 return -ENOMEM;
5404
5405         if (bp->hwrm_spec_code < 0x10601)
5406                 bp->hw_resc.resv_tx_rings = tx_rings;
5407
5408         rc = bnxt_hwrm_get_rings(bp);
5409         return rc;
5410 }
5411
5412 static int
5413 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5414                            int ring_grps, int cp_rings, int stats, int vnics)
5415 {
5416         struct hwrm_func_vf_cfg_input req = {0};
5417         int rc;
5418
5419         if (!BNXT_NEW_RM(bp)) {
5420                 bp->hw_resc.resv_tx_rings = tx_rings;
5421                 return 0;
5422         }
5423
5424         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5425                                      cp_rings, stats, vnics);
5426         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5427         if (rc)
5428                 return -ENOMEM;
5429
5430         rc = bnxt_hwrm_get_rings(bp);
5431         return rc;
5432 }
5433
5434 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5435                                    int cp, int stat, int vnic)
5436 {
5437         if (BNXT_PF(bp))
5438                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5439                                                   vnic);
5440         else
5441                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5442                                                   vnic);
5443 }
5444
5445 int bnxt_nq_rings_in_use(struct bnxt *bp)
5446 {
5447         int cp = bp->cp_nr_rings;
5448         int ulp_msix, ulp_base;
5449
5450         ulp_msix = bnxt_get_ulp_msix_num(bp);
5451         if (ulp_msix) {
5452                 ulp_base = bnxt_get_ulp_msix_base(bp);
5453                 cp += ulp_msix;
5454                 if ((ulp_base + ulp_msix) > cp)
5455                         cp = ulp_base + ulp_msix;
5456         }
5457         return cp;
5458 }
5459
5460 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5461 {
5462         int cp;
5463
5464         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5465                 return bnxt_nq_rings_in_use(bp);
5466
5467         cp = bp->tx_nr_rings + bp->rx_nr_rings;
5468         return cp;
5469 }
5470
5471 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5472 {
5473         return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
5474 }
5475
5476 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5477 {
5478         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5479         int cp = bnxt_cp_rings_in_use(bp);
5480         int nq = bnxt_nq_rings_in_use(bp);
5481         int rx = bp->rx_nr_rings, stat;
5482         int vnic = 1, grp = rx;
5483
5484         if (bp->hwrm_spec_code < 0x10601)
5485                 return false;
5486
5487         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5488                 return true;
5489
5490         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5491                 vnic = rx + 1;
5492         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5493                 rx <<= 1;
5494         stat = bnxt_get_func_stat_ctxs(bp);
5495         if (BNXT_NEW_RM(bp) &&
5496             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5497              hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
5498              hw_resc->resv_stat_ctxs != stat ||
5499              (hw_resc->resv_hw_ring_grps != grp &&
5500               !(bp->flags & BNXT_FLAG_CHIP_P5))))
5501                 return true;
5502         return false;
5503 }
5504
5505 static int __bnxt_reserve_rings(struct bnxt *bp)
5506 {
5507         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5508         int cp = bnxt_nq_rings_in_use(bp);
5509         int tx = bp->tx_nr_rings;
5510         int rx = bp->rx_nr_rings;
5511         int grp, rx_rings, rc;
5512         int vnic = 1, stat;
5513         bool sh = false;
5514
5515         if (!bnxt_need_reserve_rings(bp))
5516                 return 0;
5517
5518         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5519                 sh = true;
5520         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5521                 vnic = rx + 1;
5522         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5523                 rx <<= 1;
5524         grp = bp->rx_nr_rings;
5525         stat = bnxt_get_func_stat_ctxs(bp);
5526
5527         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5528         if (rc)
5529                 return rc;
5530
5531         tx = hw_resc->resv_tx_rings;
5532         if (BNXT_NEW_RM(bp)) {
5533                 rx = hw_resc->resv_rx_rings;
5534                 cp = hw_resc->resv_irqs;
5535                 grp = hw_resc->resv_hw_ring_grps;
5536                 vnic = hw_resc->resv_vnics;
5537                 stat = hw_resc->resv_stat_ctxs;
5538         }
5539
5540         rx_rings = rx;
5541         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5542                 if (rx >= 2) {
5543                         rx_rings = rx >> 1;
5544                 } else {
5545                         if (netif_running(bp->dev))
5546                                 return -ENOMEM;
5547
5548                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5549                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5550                         bp->dev->hw_features &= ~NETIF_F_LRO;
5551                         bp->dev->features &= ~NETIF_F_LRO;
5552                         bnxt_set_ring_params(bp);
5553                 }
5554         }
5555         rx_rings = min_t(int, rx_rings, grp);
5556         cp = min_t(int, cp, bp->cp_nr_rings);
5557         if (stat > bnxt_get_ulp_stat_ctxs(bp))
5558                 stat -= bnxt_get_ulp_stat_ctxs(bp);
5559         cp = min_t(int, cp, stat);
5560         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5561         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5562                 rx = rx_rings << 1;
5563         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5564         bp->tx_nr_rings = tx;
5565         bp->rx_nr_rings = rx_rings;
5566         bp->cp_nr_rings = cp;
5567
5568         if (!tx || !rx || !cp || !grp || !vnic || !stat)
5569                 return -ENOMEM;
5570
5571         return rc;
5572 }
5573
5574 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5575                                     int ring_grps, int cp_rings, int stats,
5576                                     int vnics)
5577 {
5578         struct hwrm_func_vf_cfg_input req = {0};
5579         u32 flags;
5580         int rc;
5581
5582         if (!BNXT_NEW_RM(bp))
5583                 return 0;
5584
5585         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5586                                      cp_rings, stats, vnics);
5587         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5588                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5589                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5590                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5591                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5592                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5593         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5594                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5595
5596         req.flags = cpu_to_le32(flags);
5597         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5598         if (rc)
5599                 return -ENOMEM;
5600         return 0;
5601 }
5602
5603 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5604                                     int ring_grps, int cp_rings, int stats,
5605                                     int vnics)
5606 {
5607         struct hwrm_func_cfg_input req = {0};
5608         u32 flags;
5609         int rc;
5610
5611         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5612                                      cp_rings, stats, vnics);
5613         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
5614         if (BNXT_NEW_RM(bp)) {
5615                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5616                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
5617                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5618                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
5619                 if (bp->flags & BNXT_FLAG_CHIP_P5)
5620                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
5621                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
5622                 else
5623                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5624         }
5625
5626         req.flags = cpu_to_le32(flags);
5627         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5628         if (rc)
5629                 return -ENOMEM;
5630         return 0;
5631 }
5632
5633 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5634                                  int ring_grps, int cp_rings, int stats,
5635                                  int vnics)
5636 {
5637         if (bp->hwrm_spec_code < 0x10801)
5638                 return 0;
5639
5640         if (BNXT_PF(bp))
5641                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
5642                                                 ring_grps, cp_rings, stats,
5643                                                 vnics);
5644
5645         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
5646                                         cp_rings, stats, vnics);
5647 }
5648
5649 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5650 {
5651         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5652         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5653         struct hwrm_ring_aggint_qcaps_input req = {0};
5654         int rc;
5655
5656         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5657         coal_cap->num_cmpl_dma_aggr_max = 63;
5658         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5659         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5660         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5661         coal_cap->int_lat_tmr_min_max = 65535;
5662         coal_cap->int_lat_tmr_max_max = 65535;
5663         coal_cap->num_cmpl_aggr_int_max = 65535;
5664         coal_cap->timer_units = 80;
5665
5666         if (bp->hwrm_spec_code < 0x10902)
5667                 return;
5668
5669         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5670         mutex_lock(&bp->hwrm_cmd_lock);
5671         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5672         if (!rc) {
5673                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
5674                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
5675                 coal_cap->num_cmpl_dma_aggr_max =
5676                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5677                 coal_cap->num_cmpl_dma_aggr_during_int_max =
5678                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5679                 coal_cap->cmpl_aggr_dma_tmr_max =
5680                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5681                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5682                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5683                 coal_cap->int_lat_tmr_min_max =
5684                         le16_to_cpu(resp->int_lat_tmr_min_max);
5685                 coal_cap->int_lat_tmr_max_max =
5686                         le16_to_cpu(resp->int_lat_tmr_max_max);
5687                 coal_cap->num_cmpl_aggr_int_max =
5688                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
5689                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5690         }
5691         mutex_unlock(&bp->hwrm_cmd_lock);
5692 }
5693
5694 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5695 {
5696         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5697
5698         return usec * 1000 / coal_cap->timer_units;
5699 }
5700
5701 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5702         struct bnxt_coal *hw_coal,
5703         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5704 {
5705         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5706         u32 cmpl_params = coal_cap->cmpl_params;
5707         u16 val, tmr, max, flags = 0;
5708
5709         max = hw_coal->bufs_per_record * 128;
5710         if (hw_coal->budget)
5711                 max = hw_coal->bufs_per_record * hw_coal->budget;
5712         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
5713
5714         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5715         req->num_cmpl_aggr_int = cpu_to_le16(val);
5716
5717         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
5718         req->num_cmpl_dma_aggr = cpu_to_le16(val);
5719
5720         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5721                       coal_cap->num_cmpl_dma_aggr_during_int_max);
5722         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5723
5724         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5725         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
5726         req->int_lat_tmr_max = cpu_to_le16(tmr);
5727
5728         /* min timer set to 1/2 of interrupt timer */
5729         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5730                 val = tmr / 2;
5731                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5732                 req->int_lat_tmr_min = cpu_to_le16(val);
5733                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5734         }
5735
5736         /* buf timer set to 1/4 of interrupt timer */
5737         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
5738         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5739
5740         if (cmpl_params &
5741             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5742                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5743                 val = clamp_t(u16, tmr, 1,
5744                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5745                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5746                 req->enables |=
5747                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5748         }
5749
5750         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5751                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5752         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5753             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
5754                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
5755         req->flags = cpu_to_le16(flags);
5756         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
5757 }
5758
5759 /* Caller holds bp->hwrm_cmd_lock */
5760 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5761                                    struct bnxt_coal *hw_coal)
5762 {
5763         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5764         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5765         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5766         u32 nq_params = coal_cap->nq_params;
5767         u16 tmr;
5768
5769         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5770                 return 0;
5771
5772         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5773                                -1, -1);
5774         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5775         req.flags =
5776                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5777
5778         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5779         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5780         req.int_lat_tmr_min = cpu_to_le16(tmr);
5781         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5782         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5783 }
5784
5785 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5786 {
5787         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5788         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5789         struct bnxt_coal coal;
5790
5791         /* Tick values in micro seconds.
5792          * 1 coal_buf x bufs_per_record = 1 completion record.
5793          */
5794         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5795
5796         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5797         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5798
5799         if (!bnapi->rx_ring)
5800                 return -ENODEV;
5801
5802         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5803                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5804
5805         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
5806
5807         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
5808
5809         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5810                                  HWRM_CMD_TIMEOUT);
5811 }
5812
5813 int bnxt_hwrm_set_coal(struct bnxt *bp)
5814 {
5815         int i, rc = 0;
5816         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5817                                                            req_tx = {0}, *req;
5818
5819         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5820                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5821         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5822                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5823
5824         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5825         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
5826
5827         mutex_lock(&bp->hwrm_cmd_lock);
5828         for (i = 0; i < bp->cp_nr_rings; i++) {
5829                 struct bnxt_napi *bnapi = bp->bnapi[i];
5830                 struct bnxt_coal *hw_coal;
5831                 u16 ring_id;
5832
5833                 req = &req_rx;
5834                 if (!bnapi->rx_ring) {
5835                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5836                         req = &req_tx;
5837                 } else {
5838                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5839                 }
5840                 req->ring_id = cpu_to_le16(ring_id);
5841
5842                 rc = _hwrm_send_message(bp, req, sizeof(*req),
5843                                         HWRM_CMD_TIMEOUT);
5844                 if (rc)
5845                         break;
5846
5847                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5848                         continue;
5849
5850                 if (bnapi->rx_ring && bnapi->tx_ring) {
5851                         req = &req_tx;
5852                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5853                         req->ring_id = cpu_to_le16(ring_id);
5854                         rc = _hwrm_send_message(bp, req, sizeof(*req),
5855                                                 HWRM_CMD_TIMEOUT);
5856                         if (rc)
5857                                 break;
5858                 }
5859                 if (bnapi->rx_ring)
5860                         hw_coal = &bp->rx_coal;
5861                 else
5862                         hw_coal = &bp->tx_coal;
5863                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
5864         }
5865         mutex_unlock(&bp->hwrm_cmd_lock);
5866         return rc;
5867 }
5868
5869 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5870 {
5871         int rc = 0, i;
5872         struct hwrm_stat_ctx_free_input req = {0};
5873
5874         if (!bp->bnapi)
5875                 return 0;
5876
5877         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5878                 return 0;
5879
5880         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5881
5882         mutex_lock(&bp->hwrm_cmd_lock);
5883         for (i = 0; i < bp->cp_nr_rings; i++) {
5884                 struct bnxt_napi *bnapi = bp->bnapi[i];
5885                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5886
5887                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5888                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5889
5890                         rc = _hwrm_send_message(bp, &req, sizeof(req),
5891                                                 HWRM_CMD_TIMEOUT);
5892                         if (rc)
5893                                 break;
5894
5895                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5896                 }
5897         }
5898         mutex_unlock(&bp->hwrm_cmd_lock);
5899         return rc;
5900 }
5901
5902 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5903 {
5904         int rc = 0, i;
5905         struct hwrm_stat_ctx_alloc_input req = {0};
5906         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5907
5908         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5909                 return 0;
5910
5911         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5912
5913         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
5914
5915         mutex_lock(&bp->hwrm_cmd_lock);
5916         for (i = 0; i < bp->cp_nr_rings; i++) {
5917                 struct bnxt_napi *bnapi = bp->bnapi[i];
5918                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5919
5920                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5921
5922                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5923                                         HWRM_CMD_TIMEOUT);
5924                 if (rc)
5925                         break;
5926
5927                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5928
5929                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5930         }
5931         mutex_unlock(&bp->hwrm_cmd_lock);
5932         return rc;
5933 }
5934
5935 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5936 {
5937         struct hwrm_func_qcfg_input req = {0};
5938         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5939         u16 flags;
5940         int rc;
5941
5942         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5943         req.fid = cpu_to_le16(0xffff);
5944         mutex_lock(&bp->hwrm_cmd_lock);
5945         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5946         if (rc)
5947                 goto func_qcfg_exit;
5948
5949 #ifdef CONFIG_BNXT_SRIOV
5950         if (BNXT_VF(bp)) {
5951                 struct bnxt_vf_info *vf = &bp->vf;
5952
5953                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5954         }
5955 #endif
5956         flags = le16_to_cpu(resp->flags);
5957         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5958                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
5959                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
5960                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
5961                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
5962         }
5963         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5964                 bp->flags |= BNXT_FLAG_MULTI_HOST;
5965
5966         switch (resp->port_partition_type) {
5967         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5968         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5969         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5970                 bp->port_partition_type = resp->port_partition_type;
5971                 break;
5972         }
5973         if (bp->hwrm_spec_code < 0x10707 ||
5974             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5975                 bp->br_mode = BRIDGE_MODE_VEB;
5976         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5977                 bp->br_mode = BRIDGE_MODE_VEPA;
5978         else
5979                 bp->br_mode = BRIDGE_MODE_UNDEF;
5980
5981         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5982         if (!bp->max_mtu)
5983                 bp->max_mtu = BNXT_MAX_MTU;
5984
5985 func_qcfg_exit:
5986         mutex_unlock(&bp->hwrm_cmd_lock);
5987         return rc;
5988 }
5989
5990 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5991 {
5992         struct hwrm_func_backing_store_qcaps_input req = {0};
5993         struct hwrm_func_backing_store_qcaps_output *resp =
5994                 bp->hwrm_cmd_resp_addr;
5995         int rc;
5996
5997         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5998                 return 0;
5999
6000         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6001         mutex_lock(&bp->hwrm_cmd_lock);
6002         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6003         if (!rc) {
6004                 struct bnxt_ctx_pg_info *ctx_pg;
6005                 struct bnxt_ctx_mem_info *ctx;
6006                 int i;
6007
6008                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6009                 if (!ctx) {
6010                         rc = -ENOMEM;
6011                         goto ctx_err;
6012                 }
6013                 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6014                 if (!ctx_pg) {
6015                         kfree(ctx);
6016                         rc = -ENOMEM;
6017                         goto ctx_err;
6018                 }
6019                 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6020                         ctx->tqm_mem[i] = ctx_pg;
6021
6022                 bp->ctx = ctx;
6023                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6024                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6025                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6026                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6027                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6028                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6029                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6030                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6031                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6032                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6033                 ctx->vnic_max_vnic_entries =
6034                         le16_to_cpu(resp->vnic_max_vnic_entries);
6035                 ctx->vnic_max_ring_table_entries =
6036                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6037                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6038                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6039                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6040                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6041                 ctx->tqm_min_entries_per_ring =
6042                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6043                 ctx->tqm_max_entries_per_ring =
6044                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6045                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6046                 if (!ctx->tqm_entries_multiple)
6047                         ctx->tqm_entries_multiple = 1;
6048                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6049                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6050                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6051                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6052         } else {
6053                 rc = 0;
6054         }
6055 ctx_err:
6056         mutex_unlock(&bp->hwrm_cmd_lock);
6057         return rc;
6058 }
6059
6060 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6061                                   __le64 *pg_dir)
6062 {
6063         u8 pg_size = 0;
6064
6065         if (BNXT_PAGE_SHIFT == 13)
6066                 pg_size = 1 << 4;
6067         else if (BNXT_PAGE_SIZE == 16)
6068                 pg_size = 2 << 4;
6069
6070         *pg_attr = pg_size;
6071         if (rmem->depth >= 1) {
6072                 if (rmem->depth == 2)
6073                         *pg_attr |= 2;
6074                 else
6075                         *pg_attr |= 1;
6076                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6077         } else {
6078                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6079         }
6080 }
6081
6082 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6083         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6084          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6085          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6086          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6087          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6088
6089 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6090 {
6091         struct hwrm_func_backing_store_cfg_input req = {0};
6092         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6093         struct bnxt_ctx_pg_info *ctx_pg;
6094         __le32 *num_entries;
6095         __le64 *pg_dir;
6096         u8 *pg_attr;
6097         int i, rc;
6098         u32 ena;
6099
6100         if (!ctx)
6101                 return 0;
6102
6103         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6104         req.enables = cpu_to_le32(enables);
6105
6106         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6107                 ctx_pg = &ctx->qp_mem;
6108                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6109                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6110                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6111                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6112                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6113                                       &req.qpc_pg_size_qpc_lvl,
6114                                       &req.qpc_page_dir);
6115         }
6116         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6117                 ctx_pg = &ctx->srq_mem;
6118                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6119                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6120                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6121                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6122                                       &req.srq_pg_size_srq_lvl,
6123                                       &req.srq_page_dir);
6124         }
6125         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6126                 ctx_pg = &ctx->cq_mem;
6127                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6128                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6129                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6130                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6131                                       &req.cq_page_dir);
6132         }
6133         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6134                 ctx_pg = &ctx->vnic_mem;
6135                 req.vnic_num_vnic_entries =
6136                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6137                 req.vnic_num_ring_table_entries =
6138                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6139                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6140                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6141                                       &req.vnic_pg_size_vnic_lvl,
6142                                       &req.vnic_page_dir);
6143         }
6144         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6145                 ctx_pg = &ctx->stat_mem;
6146                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6147                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6148                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6149                                       &req.stat_pg_size_stat_lvl,
6150                                       &req.stat_page_dir);
6151         }
6152         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6153                 ctx_pg = &ctx->mrav_mem;
6154                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6155                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6156                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6157                                       &req.mrav_pg_size_mrav_lvl,
6158                                       &req.mrav_page_dir);
6159         }
6160         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6161                 ctx_pg = &ctx->tim_mem;
6162                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6163                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6164                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6165                                       &req.tim_pg_size_tim_lvl,
6166                                       &req.tim_page_dir);
6167         }
6168         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6169              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6170              pg_dir = &req.tqm_sp_page_dir,
6171              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6172              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6173                 if (!(enables & ena))
6174                         continue;
6175
6176                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6177                 ctx_pg = ctx->tqm_mem[i];
6178                 *num_entries = cpu_to_le32(ctx_pg->entries);
6179                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6180         }
6181         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6182         if (rc)
6183                 rc = -EIO;
6184         return rc;
6185 }
6186
6187 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6188                                   struct bnxt_ctx_pg_info *ctx_pg)
6189 {
6190         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6191
6192         rmem->page_size = BNXT_PAGE_SIZE;
6193         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6194         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6195         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6196         if (rmem->depth >= 1)
6197                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6198         return bnxt_alloc_ring(bp, rmem);
6199 }
6200
6201 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6202                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6203                                   u8 depth)
6204 {
6205         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6206         int rc;
6207
6208         if (!mem_size)
6209                 return 0;
6210
6211         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6212         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6213                 ctx_pg->nr_pages = 0;
6214                 return -EINVAL;
6215         }
6216         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6217                 int nr_tbls, i;
6218
6219                 rmem->depth = 2;
6220                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6221                                              GFP_KERNEL);
6222                 if (!ctx_pg->ctx_pg_tbl)
6223                         return -ENOMEM;
6224                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6225                 rmem->nr_pages = nr_tbls;
6226                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6227                 if (rc)
6228                         return rc;
6229                 for (i = 0; i < nr_tbls; i++) {
6230                         struct bnxt_ctx_pg_info *pg_tbl;
6231
6232                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6233                         if (!pg_tbl)
6234                                 return -ENOMEM;
6235                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6236                         rmem = &pg_tbl->ring_mem;
6237                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6238                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6239                         rmem->depth = 1;
6240                         rmem->nr_pages = MAX_CTX_PAGES;
6241                         if (i == (nr_tbls - 1)) {
6242                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6243
6244                                 if (rem)
6245                                         rmem->nr_pages = rem;
6246                         }
6247                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6248                         if (rc)
6249                                 break;
6250                 }
6251         } else {
6252                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6253                 if (rmem->nr_pages > 1 || depth)
6254                         rmem->depth = 1;
6255                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6256         }
6257         return rc;
6258 }
6259
6260 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6261                                   struct bnxt_ctx_pg_info *ctx_pg)
6262 {
6263         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6264
6265         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6266             ctx_pg->ctx_pg_tbl) {
6267                 int i, nr_tbls = rmem->nr_pages;
6268
6269                 for (i = 0; i < nr_tbls; i++) {
6270                         struct bnxt_ctx_pg_info *pg_tbl;
6271                         struct bnxt_ring_mem_info *rmem2;
6272
6273                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6274                         if (!pg_tbl)
6275                                 continue;
6276                         rmem2 = &pg_tbl->ring_mem;
6277                         bnxt_free_ring(bp, rmem2);
6278                         ctx_pg->ctx_pg_arr[i] = NULL;
6279                         kfree(pg_tbl);
6280                         ctx_pg->ctx_pg_tbl[i] = NULL;
6281                 }
6282                 kfree(ctx_pg->ctx_pg_tbl);
6283                 ctx_pg->ctx_pg_tbl = NULL;
6284         }
6285         bnxt_free_ring(bp, rmem);
6286         ctx_pg->nr_pages = 0;
6287 }
6288
6289 static void bnxt_free_ctx_mem(struct bnxt *bp)
6290 {
6291         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6292         int i;
6293
6294         if (!ctx)
6295                 return;
6296
6297         if (ctx->tqm_mem[0]) {
6298                 for (i = 0; i < bp->max_q + 1; i++)
6299                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6300                 kfree(ctx->tqm_mem[0]);
6301                 ctx->tqm_mem[0] = NULL;
6302         }
6303
6304         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6305         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6306         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6307         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6308         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6309         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6310         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6311         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6312 }
6313
6314 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6315 {
6316         struct bnxt_ctx_pg_info *ctx_pg;
6317         struct bnxt_ctx_mem_info *ctx;
6318         u32 mem_size, ena, entries;
6319         u32 extra_srqs = 0;
6320         u32 extra_qps = 0;
6321         u8 pg_lvl = 1;
6322         int i, rc;
6323
6324         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6325         if (rc) {
6326                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6327                            rc);
6328                 return rc;
6329         }
6330         ctx = bp->ctx;
6331         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6332                 return 0;
6333
6334         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
6335                 pg_lvl = 2;
6336                 extra_qps = 65536;
6337                 extra_srqs = 8192;
6338         }
6339
6340         ctx_pg = &ctx->qp_mem;
6341         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6342                           extra_qps;
6343         mem_size = ctx->qp_entry_size * ctx_pg->entries;
6344         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6345         if (rc)
6346                 return rc;
6347
6348         ctx_pg = &ctx->srq_mem;
6349         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6350         mem_size = ctx->srq_entry_size * ctx_pg->entries;
6351         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6352         if (rc)
6353                 return rc;
6354
6355         ctx_pg = &ctx->cq_mem;
6356         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6357         mem_size = ctx->cq_entry_size * ctx_pg->entries;
6358         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6359         if (rc)
6360                 return rc;
6361
6362         ctx_pg = &ctx->vnic_mem;
6363         ctx_pg->entries = ctx->vnic_max_vnic_entries +
6364                           ctx->vnic_max_ring_table_entries;
6365         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6366         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6367         if (rc)
6368                 return rc;
6369
6370         ctx_pg = &ctx->stat_mem;
6371         ctx_pg->entries = ctx->stat_max_entries;
6372         mem_size = ctx->stat_entry_size * ctx_pg->entries;
6373         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6374         if (rc)
6375                 return rc;
6376
6377         ena = 0;
6378         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6379                 goto skip_rdma;
6380
6381         ctx_pg = &ctx->mrav_mem;
6382         ctx_pg->entries = extra_qps * 4;
6383         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6384         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6385         if (rc)
6386                 return rc;
6387         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6388
6389         ctx_pg = &ctx->tim_mem;
6390         ctx_pg->entries = ctx->qp_mem.entries;
6391         mem_size = ctx->tim_entry_size * ctx_pg->entries;
6392         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6393         if (rc)
6394                 return rc;
6395         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6396
6397 skip_rdma:
6398         entries = ctx->qp_max_l2_entries + extra_qps;
6399         entries = roundup(entries, ctx->tqm_entries_multiple);
6400         entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6401                           ctx->tqm_max_entries_per_ring);
6402         for (i = 0; i < bp->max_q + 1; i++) {
6403                 ctx_pg = ctx->tqm_mem[i];
6404                 ctx_pg->entries = entries;
6405                 mem_size = ctx->tqm_entry_size * entries;
6406                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6407                 if (rc)
6408                         return rc;
6409                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6410         }
6411         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6412         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6413         if (rc)
6414                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6415                            rc);
6416         else
6417                 ctx->flags |= BNXT_CTX_FLAG_INITED;
6418
6419         return 0;
6420 }
6421
6422 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6423 {
6424         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6425         struct hwrm_func_resource_qcaps_input req = {0};
6426         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6427         int rc;
6428
6429         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6430         req.fid = cpu_to_le16(0xffff);
6431
6432         mutex_lock(&bp->hwrm_cmd_lock);
6433         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6434                                        HWRM_CMD_TIMEOUT);
6435         if (rc) {
6436                 rc = -EIO;
6437                 goto hwrm_func_resc_qcaps_exit;
6438         }
6439
6440         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6441         if (!all)
6442                 goto hwrm_func_resc_qcaps_exit;
6443
6444         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6445         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6446         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6447         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6448         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6449         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6450         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6451         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6452         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6453         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6454         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6455         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6456         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6457         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6458         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6459         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6460
6461         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6462                 u16 max_msix = le16_to_cpu(resp->max_msix);
6463
6464                 hw_resc->max_nqs = max_msix;
6465                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6466         }
6467
6468         if (BNXT_PF(bp)) {
6469                 struct bnxt_pf_info *pf = &bp->pf;
6470
6471                 pf->vf_resv_strategy =
6472                         le16_to_cpu(resp->vf_reservation_strategy);
6473                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6474                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6475         }
6476 hwrm_func_resc_qcaps_exit:
6477         mutex_unlock(&bp->hwrm_cmd_lock);
6478         return rc;
6479 }
6480
6481 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6482 {
6483         int rc = 0;
6484         struct hwrm_func_qcaps_input req = {0};
6485         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6486         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6487         u32 flags;
6488
6489         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6490         req.fid = cpu_to_le16(0xffff);
6491
6492         mutex_lock(&bp->hwrm_cmd_lock);
6493         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6494         if (rc)
6495                 goto hwrm_func_qcaps_exit;
6496
6497         flags = le32_to_cpu(resp->flags);
6498         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6499                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6500         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6501                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6502
6503         bp->tx_push_thresh = 0;
6504         if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6505                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6506
6507         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6508         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6509         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6510         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6511         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6512         if (!hw_resc->max_hw_ring_grps)
6513                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6514         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6515         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6516         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6517
6518         if (BNXT_PF(bp)) {
6519                 struct bnxt_pf_info *pf = &bp->pf;
6520
6521                 pf->fw_fid = le16_to_cpu(resp->fid);
6522                 pf->port_id = le16_to_cpu(resp->port_id);
6523                 bp->dev->dev_port = pf->port_id;
6524                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6525                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6526                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6527                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6528                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6529                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6530                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6531                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6532                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6533                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6534                         bp->flags |= BNXT_FLAG_WOL_CAP;
6535         } else {
6536 #ifdef CONFIG_BNXT_SRIOV
6537                 struct bnxt_vf_info *vf = &bp->vf;
6538
6539                 vf->fw_fid = le16_to_cpu(resp->fid);
6540                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
6541 #endif
6542         }
6543
6544 hwrm_func_qcaps_exit:
6545         mutex_unlock(&bp->hwrm_cmd_lock);
6546         return rc;
6547 }
6548
6549 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6550
6551 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6552 {
6553         int rc;
6554
6555         rc = __bnxt_hwrm_func_qcaps(bp);
6556         if (rc)
6557                 return rc;
6558         rc = bnxt_hwrm_queue_qportcfg(bp);
6559         if (rc) {
6560                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6561                 return rc;
6562         }
6563         if (bp->hwrm_spec_code >= 0x10803) {
6564                 rc = bnxt_alloc_ctx_mem(bp);
6565                 if (rc)
6566                         return rc;
6567                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
6568                 if (!rc)
6569                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
6570         }
6571         return 0;
6572 }
6573
6574 static int bnxt_hwrm_func_reset(struct bnxt *bp)
6575 {
6576         struct hwrm_func_reset_input req = {0};
6577
6578         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6579         req.enables = 0;
6580
6581         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6582 }
6583
6584 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6585 {
6586         int rc = 0;
6587         struct hwrm_queue_qportcfg_input req = {0};
6588         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
6589         u8 i, j, *qptr;
6590         bool no_rdma;
6591
6592         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6593
6594         mutex_lock(&bp->hwrm_cmd_lock);
6595         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6596         if (rc)
6597                 goto qportcfg_exit;
6598
6599         if (!resp->max_configurable_queues) {
6600                 rc = -EINVAL;
6601                 goto qportcfg_exit;
6602         }
6603         bp->max_tc = resp->max_configurable_queues;
6604         bp->max_lltc = resp->max_configurable_lossless_queues;
6605         if (bp->max_tc > BNXT_MAX_QUEUE)
6606                 bp->max_tc = BNXT_MAX_QUEUE;
6607
6608         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6609         qptr = &resp->queue_id0;
6610         for (i = 0, j = 0; i < bp->max_tc; i++) {
6611                 bp->q_info[j].queue_id = *qptr;
6612                 bp->q_ids[i] = *qptr++;
6613                 bp->q_info[j].queue_profile = *qptr++;
6614                 bp->tc_to_qidx[j] = j;
6615                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6616                     (no_rdma && BNXT_PF(bp)))
6617                         j++;
6618         }
6619         bp->max_q = bp->max_tc;
6620         bp->max_tc = max_t(u8, j, 1);
6621
6622         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6623                 bp->max_tc = 1;
6624
6625         if (bp->max_lltc > bp->max_tc)
6626                 bp->max_lltc = bp->max_tc;
6627
6628 qportcfg_exit:
6629         mutex_unlock(&bp->hwrm_cmd_lock);
6630         return rc;
6631 }
6632
6633 static int bnxt_hwrm_ver_get(struct bnxt *bp)
6634 {
6635         int rc;
6636         struct hwrm_ver_get_input req = {0};
6637         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
6638         u32 dev_caps_cfg;
6639
6640         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
6641         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6642         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6643         req.hwrm_intf_min = HWRM_VERSION_MINOR;
6644         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6645         mutex_lock(&bp->hwrm_cmd_lock);
6646         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6647         if (rc)
6648                 goto hwrm_ver_get_exit;
6649
6650         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6651
6652         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6653                              resp->hwrm_intf_min_8b << 8 |
6654                              resp->hwrm_intf_upd_8b;
6655         if (resp->hwrm_intf_maj_8b < 1) {
6656                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
6657                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6658                             resp->hwrm_intf_upd_8b);
6659                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
6660         }
6661         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
6662                  resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6663                  resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
6664
6665         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6666         if (!bp->hwrm_cmd_timeout)
6667                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6668
6669         if (resp->hwrm_intf_maj_8b >= 1) {
6670                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
6671                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6672         }
6673         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6674                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
6675
6676         bp->chip_num = le16_to_cpu(resp->chip_num);
6677         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6678             !resp->chip_metal)
6679                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
6680
6681         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6682         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6683             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
6684                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
6685
6686         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
6687                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
6688
6689         if (dev_caps_cfg &
6690             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
6691                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
6692
6693         if (dev_caps_cfg &
6694             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
6695                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
6696
6697 hwrm_ver_get_exit:
6698         mutex_unlock(&bp->hwrm_cmd_lock);
6699         return rc;
6700 }
6701
6702 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6703 {
6704         struct hwrm_fw_set_time_input req = {0};
6705         struct tm tm;
6706         time64_t now = ktime_get_real_seconds();
6707
6708         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6709             bp->hwrm_spec_code < 0x10400)
6710                 return -EOPNOTSUPP;
6711
6712         time64_to_tm(now, 0, &tm);
6713         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6714         req.year = cpu_to_le16(1900 + tm.tm_year);
6715         req.month = 1 + tm.tm_mon;
6716         req.day = tm.tm_mday;
6717         req.hour = tm.tm_hour;
6718         req.minute = tm.tm_min;
6719         req.second = tm.tm_sec;
6720         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6721 }
6722
6723 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6724 {
6725         int rc;
6726         struct bnxt_pf_info *pf = &bp->pf;
6727         struct hwrm_port_qstats_input req = {0};
6728
6729         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6730                 return 0;
6731
6732         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6733         req.port_id = cpu_to_le16(pf->port_id);
6734         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6735         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6736         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6737         return rc;
6738 }
6739
6740 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6741 {
6742         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
6743         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
6744         struct hwrm_port_qstats_ext_input req = {0};
6745         struct bnxt_pf_info *pf = &bp->pf;
6746         int rc;
6747
6748         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6749                 return 0;
6750
6751         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6752         req.port_id = cpu_to_le16(pf->port_id);
6753         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6754         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
6755         req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6756         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6757         mutex_lock(&bp->hwrm_cmd_lock);
6758         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6759         if (!rc) {
6760                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6761                 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6762         } else {
6763                 bp->fw_rx_stats_ext_size = 0;
6764                 bp->fw_tx_stats_ext_size = 0;
6765         }
6766         if (bp->fw_tx_stats_ext_size <=
6767             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
6768                 mutex_unlock(&bp->hwrm_cmd_lock);
6769                 bp->pri2cos_valid = 0;
6770                 return rc;
6771         }
6772
6773         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
6774         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
6775
6776         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
6777         if (!rc) {
6778                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
6779                 u8 *pri2cos;
6780                 int i, j;
6781
6782                 resp2 = bp->hwrm_cmd_resp_addr;
6783                 pri2cos = &resp2->pri0_cos_queue_id;
6784                 for (i = 0; i < 8; i++) {
6785                         u8 queue_id = pri2cos[i];
6786
6787                         for (j = 0; j < bp->max_q; j++) {
6788                                 if (bp->q_ids[j] == queue_id)
6789                                         bp->pri2cos[i] = j;
6790                         }
6791                 }
6792                 bp->pri2cos_valid = 1;
6793         }
6794         mutex_unlock(&bp->hwrm_cmd_lock);
6795         return rc;
6796 }
6797
6798 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6799 {
6800         if (bp->vxlan_port_cnt) {
6801                 bnxt_hwrm_tunnel_dst_port_free(
6802                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6803         }
6804         bp->vxlan_port_cnt = 0;
6805         if (bp->nge_port_cnt) {
6806                 bnxt_hwrm_tunnel_dst_port_free(
6807                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6808         }
6809         bp->nge_port_cnt = 0;
6810 }
6811
6812 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6813 {
6814         int rc, i;
6815         u32 tpa_flags = 0;
6816
6817         if (set_tpa)
6818                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6819         for (i = 0; i < bp->nr_vnics; i++) {
6820                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6821                 if (rc) {
6822                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
6823                                    i, rc);
6824                         return rc;
6825                 }
6826         }
6827         return 0;
6828 }
6829
6830 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6831 {
6832         int i;
6833
6834         for (i = 0; i < bp->nr_vnics; i++)
6835                 bnxt_hwrm_vnic_set_rss(bp, i, false);
6836 }
6837
6838 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6839                                     bool irq_re_init)
6840 {
6841         if (bp->vnic_info) {
6842                 bnxt_hwrm_clear_vnic_filter(bp);
6843                 /* clear all RSS setting before free vnic ctx */
6844                 bnxt_hwrm_clear_vnic_rss(bp);
6845                 bnxt_hwrm_vnic_ctx_free(bp);
6846                 /* before free the vnic, undo the vnic tpa settings */
6847                 if (bp->flags & BNXT_FLAG_TPA)
6848                         bnxt_set_tpa(bp, false);
6849                 bnxt_hwrm_vnic_free(bp);
6850         }
6851         bnxt_hwrm_ring_free(bp, close_path);
6852         bnxt_hwrm_ring_grp_free(bp);
6853         if (irq_re_init) {
6854                 bnxt_hwrm_stat_ctx_free(bp);
6855                 bnxt_hwrm_free_tunnel_ports(bp);
6856         }
6857 }
6858
6859 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6860 {
6861         struct hwrm_func_cfg_input req = {0};
6862         int rc;
6863
6864         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6865         req.fid = cpu_to_le16(0xffff);
6866         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6867         if (br_mode == BRIDGE_MODE_VEB)
6868                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6869         else if (br_mode == BRIDGE_MODE_VEPA)
6870                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6871         else
6872                 return -EINVAL;
6873         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6874         if (rc)
6875                 rc = -EIO;
6876         return rc;
6877 }
6878
6879 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6880 {
6881         struct hwrm_func_cfg_input req = {0};
6882         int rc;
6883
6884         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6885                 return 0;
6886
6887         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6888         req.fid = cpu_to_le16(0xffff);
6889         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
6890         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
6891         if (size == 128)
6892                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
6893
6894         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6895         if (rc)
6896                 rc = -EIO;
6897         return rc;
6898 }
6899
6900 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6901 {
6902         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
6903         int rc;
6904
6905         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6906                 goto skip_rss_ctx;
6907
6908         /* allocate context for vnic */
6909         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
6910         if (rc) {
6911                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6912                            vnic_id, rc);
6913                 goto vnic_setup_err;
6914         }
6915         bp->rsscos_nr_ctxs++;
6916
6917         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6918                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6919                 if (rc) {
6920                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6921                                    vnic_id, rc);
6922                         goto vnic_setup_err;
6923                 }
6924                 bp->rsscos_nr_ctxs++;
6925         }
6926
6927 skip_rss_ctx:
6928         /* configure default vnic, ring grp */
6929         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6930         if (rc) {
6931                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6932                            vnic_id, rc);
6933                 goto vnic_setup_err;
6934         }
6935
6936         /* Enable RSS hashing on vnic */
6937         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6938         if (rc) {
6939                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6940                            vnic_id, rc);
6941                 goto vnic_setup_err;
6942         }
6943
6944         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6945                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6946                 if (rc) {
6947                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6948                                    vnic_id, rc);
6949                 }
6950         }
6951
6952 vnic_setup_err:
6953         return rc;
6954 }
6955
6956 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6957 {
6958         int rc, i, nr_ctxs;
6959
6960         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6961         for (i = 0; i < nr_ctxs; i++) {
6962                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6963                 if (rc) {
6964                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6965                                    vnic_id, i, rc);
6966                         break;
6967                 }
6968                 bp->rsscos_nr_ctxs++;
6969         }
6970         if (i < nr_ctxs)
6971                 return -ENOMEM;
6972
6973         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6974         if (rc) {
6975                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6976                            vnic_id, rc);
6977                 return rc;
6978         }
6979         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6980         if (rc) {
6981                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6982                            vnic_id, rc);
6983                 return rc;
6984         }
6985         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6986                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6987                 if (rc) {
6988                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6989                                    vnic_id, rc);
6990                 }
6991         }
6992         return rc;
6993 }
6994
6995 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6996 {
6997         if (bp->flags & BNXT_FLAG_CHIP_P5)
6998                 return __bnxt_setup_vnic_p5(bp, vnic_id);
6999         else
7000                 return __bnxt_setup_vnic(bp, vnic_id);
7001 }
7002
7003 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7004 {
7005 #ifdef CONFIG_RFS_ACCEL
7006         int i, rc = 0;
7007
7008         for (i = 0; i < bp->rx_nr_rings; i++) {
7009                 struct bnxt_vnic_info *vnic;
7010                 u16 vnic_id = i + 1;
7011                 u16 ring_id = i;
7012
7013                 if (vnic_id >= bp->nr_vnics)
7014                         break;
7015
7016                 vnic = &bp->vnic_info[vnic_id];
7017                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7018                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7019                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7020                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7021                 if (rc) {
7022                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7023                                    vnic_id, rc);
7024                         break;
7025                 }
7026                 rc = bnxt_setup_vnic(bp, vnic_id);
7027                 if (rc)
7028                         break;
7029         }
7030         return rc;
7031 #else
7032         return 0;
7033 #endif
7034 }
7035
7036 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7037 static bool bnxt_promisc_ok(struct bnxt *bp)
7038 {
7039 #ifdef CONFIG_BNXT_SRIOV
7040         if (BNXT_VF(bp) && !bp->vf.vlan)
7041                 return false;
7042 #endif
7043         return true;
7044 }
7045
7046 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7047 {
7048         unsigned int rc = 0;
7049
7050         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7051         if (rc) {
7052                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7053                            rc);
7054                 return rc;
7055         }
7056
7057         rc = bnxt_hwrm_vnic_cfg(bp, 1);
7058         if (rc) {
7059                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7060                            rc);
7061                 return rc;
7062         }
7063         return rc;
7064 }
7065
7066 static int bnxt_cfg_rx_mode(struct bnxt *);
7067 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7068
7069 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7070 {
7071         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7072         int rc = 0;
7073         unsigned int rx_nr_rings = bp->rx_nr_rings;
7074
7075         if (irq_re_init) {
7076                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7077                 if (rc) {
7078                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7079                                    rc);
7080                         goto err_out;
7081                 }
7082         }
7083
7084         rc = bnxt_hwrm_ring_alloc(bp);
7085         if (rc) {
7086                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7087                 goto err_out;
7088         }
7089
7090         rc = bnxt_hwrm_ring_grp_alloc(bp);
7091         if (rc) {
7092                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7093                 goto err_out;
7094         }
7095
7096         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7097                 rx_nr_rings--;
7098
7099         /* default vnic 0 */
7100         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7101         if (rc) {
7102                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7103                 goto err_out;
7104         }
7105
7106         rc = bnxt_setup_vnic(bp, 0);
7107         if (rc)
7108                 goto err_out;
7109
7110         if (bp->flags & BNXT_FLAG_RFS) {
7111                 rc = bnxt_alloc_rfs_vnics(bp);
7112                 if (rc)
7113                         goto err_out;
7114         }
7115
7116         if (bp->flags & BNXT_FLAG_TPA) {
7117                 rc = bnxt_set_tpa(bp, true);
7118                 if (rc)
7119                         goto err_out;
7120         }
7121
7122         if (BNXT_VF(bp))
7123                 bnxt_update_vf_mac(bp);
7124
7125         /* Filter for default vnic 0 */
7126         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7127         if (rc) {
7128                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7129                 goto err_out;
7130         }
7131         vnic->uc_filter_count = 1;
7132
7133         vnic->rx_mask = 0;
7134         if (bp->dev->flags & IFF_BROADCAST)
7135                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7136
7137         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7138                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7139
7140         if (bp->dev->flags & IFF_ALLMULTI) {
7141                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7142                 vnic->mc_list_count = 0;
7143         } else {
7144                 u32 mask = 0;
7145
7146                 bnxt_mc_list_updated(bp, &mask);
7147                 vnic->rx_mask |= mask;
7148         }
7149
7150         rc = bnxt_cfg_rx_mode(bp);
7151         if (rc)
7152                 goto err_out;
7153
7154         rc = bnxt_hwrm_set_coal(bp);
7155         if (rc)
7156                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7157                                 rc);
7158
7159         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7160                 rc = bnxt_setup_nitroa0_vnic(bp);
7161                 if (rc)
7162                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7163                                    rc);
7164         }
7165
7166         if (BNXT_VF(bp)) {
7167                 bnxt_hwrm_func_qcfg(bp);
7168                 netdev_update_features(bp->dev);
7169         }
7170
7171         return 0;
7172
7173 err_out:
7174         bnxt_hwrm_resource_free(bp, 0, true);
7175
7176         return rc;
7177 }
7178
7179 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7180 {
7181         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7182         return 0;
7183 }
7184
7185 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7186 {
7187         bnxt_init_cp_rings(bp);
7188         bnxt_init_rx_rings(bp);
7189         bnxt_init_tx_rings(bp);
7190         bnxt_init_ring_grps(bp, irq_re_init);
7191         bnxt_init_vnics(bp);
7192
7193         return bnxt_init_chip(bp, irq_re_init);
7194 }
7195
7196 static int bnxt_set_real_num_queues(struct bnxt *bp)
7197 {
7198         int rc;
7199         struct net_device *dev = bp->dev;
7200
7201         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7202                                           bp->tx_nr_rings_xdp);
7203         if (rc)
7204                 return rc;
7205
7206         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7207         if (rc)
7208                 return rc;
7209
7210 #ifdef CONFIG_RFS_ACCEL
7211         if (bp->flags & BNXT_FLAG_RFS)
7212                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7213 #endif
7214
7215         return rc;
7216 }
7217
7218 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7219                            bool shared)
7220 {
7221         int _rx = *rx, _tx = *tx;
7222
7223         if (shared) {
7224                 *rx = min_t(int, _rx, max);
7225                 *tx = min_t(int, _tx, max);
7226         } else {
7227                 if (max < 2)
7228                         return -ENOMEM;
7229
7230                 while (_rx + _tx > max) {
7231                         if (_rx > _tx && _rx > 1)
7232                                 _rx--;
7233                         else if (_tx > 1)
7234                                 _tx--;
7235                 }
7236                 *rx = _rx;
7237                 *tx = _tx;
7238         }
7239         return 0;
7240 }
7241
7242 static void bnxt_setup_msix(struct bnxt *bp)
7243 {
7244         const int len = sizeof(bp->irq_tbl[0].name);
7245         struct net_device *dev = bp->dev;
7246         int tcs, i;
7247
7248         tcs = netdev_get_num_tc(dev);
7249         if (tcs > 1) {
7250                 int i, off, count;
7251
7252                 for (i = 0; i < tcs; i++) {
7253                         count = bp->tx_nr_rings_per_tc;
7254                         off = i * count;
7255                         netdev_set_tc_queue(dev, i, count, off);
7256                 }
7257         }
7258
7259         for (i = 0; i < bp->cp_nr_rings; i++) {
7260                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7261                 char *attr;
7262
7263                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7264                         attr = "TxRx";
7265                 else if (i < bp->rx_nr_rings)
7266                         attr = "rx";
7267                 else
7268                         attr = "tx";
7269
7270                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7271                          attr, i);
7272                 bp->irq_tbl[map_idx].handler = bnxt_msix;
7273         }
7274 }
7275
7276 static void bnxt_setup_inta(struct bnxt *bp)
7277 {
7278         const int len = sizeof(bp->irq_tbl[0].name);
7279
7280         if (netdev_get_num_tc(bp->dev))
7281                 netdev_reset_tc(bp->dev);
7282
7283         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7284                  0);
7285         bp->irq_tbl[0].handler = bnxt_inta;
7286 }
7287
7288 static int bnxt_setup_int_mode(struct bnxt *bp)
7289 {
7290         int rc;
7291
7292         if (bp->flags & BNXT_FLAG_USING_MSIX)
7293                 bnxt_setup_msix(bp);
7294         else
7295                 bnxt_setup_inta(bp);
7296
7297         rc = bnxt_set_real_num_queues(bp);
7298         return rc;
7299 }
7300
7301 #ifdef CONFIG_RFS_ACCEL
7302 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7303 {
7304         return bp->hw_resc.max_rsscos_ctxs;
7305 }
7306
7307 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7308 {
7309         return bp->hw_resc.max_vnics;
7310 }
7311 #endif
7312
7313 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7314 {
7315         return bp->hw_resc.max_stat_ctxs;
7316 }
7317
7318 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7319 {
7320         return bp->hw_resc.max_cp_rings;
7321 }
7322
7323 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7324 {
7325         unsigned int cp = bp->hw_resc.max_cp_rings;
7326
7327         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7328                 cp -= bnxt_get_ulp_msix_num(bp);
7329
7330         return cp;
7331 }
7332
7333 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7334 {
7335         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7336
7337         if (bp->flags & BNXT_FLAG_CHIP_P5)
7338                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7339
7340         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7341 }
7342
7343 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7344 {
7345         bp->hw_resc.max_irqs = max_irqs;
7346 }
7347
7348 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7349 {
7350         unsigned int cp;
7351
7352         cp = bnxt_get_max_func_cp_rings_for_en(bp);
7353         if (bp->flags & BNXT_FLAG_CHIP_P5)
7354                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7355         else
7356                 return cp - bp->cp_nr_rings;
7357 }
7358
7359 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7360 {
7361         unsigned int stat;
7362
7363         stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
7364         stat -= bp->cp_nr_rings;
7365         return stat;
7366 }
7367
7368 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7369 {
7370         int max_cp = bnxt_get_max_func_cp_rings(bp);
7371         int max_irq = bnxt_get_max_func_irqs(bp);
7372         int total_req = bp->cp_nr_rings + num;
7373         int max_idx, avail_msix;
7374
7375         max_idx = bp->total_irqs;
7376         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7377                 max_idx = min_t(int, bp->total_irqs, max_cp);
7378         avail_msix = max_idx - bp->cp_nr_rings;
7379         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
7380                 return avail_msix;
7381
7382         if (max_irq < total_req) {
7383                 num = max_irq - bp->cp_nr_rings;
7384                 if (num <= 0)
7385                         return 0;
7386         }
7387         return num;
7388 }
7389
7390 static int bnxt_get_num_msix(struct bnxt *bp)
7391 {
7392         if (!BNXT_NEW_RM(bp))
7393                 return bnxt_get_max_func_irqs(bp);
7394
7395         return bnxt_nq_rings_in_use(bp);
7396 }
7397
7398 static int bnxt_init_msix(struct bnxt *bp)
7399 {
7400         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
7401         struct msix_entry *msix_ent;
7402
7403         total_vecs = bnxt_get_num_msix(bp);
7404         max = bnxt_get_max_func_irqs(bp);
7405         if (total_vecs > max)
7406                 total_vecs = max;
7407
7408         if (!total_vecs)
7409                 return 0;
7410
7411         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7412         if (!msix_ent)
7413                 return -ENOMEM;
7414
7415         for (i = 0; i < total_vecs; i++) {
7416                 msix_ent[i].entry = i;
7417                 msix_ent[i].vector = 0;
7418         }
7419
7420         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7421                 min = 2;
7422
7423         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
7424         ulp_msix = bnxt_get_ulp_msix_num(bp);
7425         if (total_vecs < 0 || total_vecs < ulp_msix) {
7426                 rc = -ENODEV;
7427                 goto msix_setup_exit;
7428         }
7429
7430         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7431         if (bp->irq_tbl) {
7432                 for (i = 0; i < total_vecs; i++)
7433                         bp->irq_tbl[i].vector = msix_ent[i].vector;
7434
7435                 bp->total_irqs = total_vecs;
7436                 /* Trim rings based upon num of vectors allocated */
7437                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
7438                                      total_vecs - ulp_msix, min == 1);
7439                 if (rc)
7440                         goto msix_setup_exit;
7441
7442                 bp->cp_nr_rings = (min == 1) ?
7443                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7444                                   bp->tx_nr_rings + bp->rx_nr_rings;
7445
7446         } else {
7447                 rc = -ENOMEM;
7448                 goto msix_setup_exit;
7449         }
7450         bp->flags |= BNXT_FLAG_USING_MSIX;
7451         kfree(msix_ent);
7452         return 0;
7453
7454 msix_setup_exit:
7455         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7456         kfree(bp->irq_tbl);
7457         bp->irq_tbl = NULL;
7458         pci_disable_msix(bp->pdev);
7459         kfree(msix_ent);
7460         return rc;
7461 }
7462
7463 static int bnxt_init_inta(struct bnxt *bp)
7464 {
7465         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
7466         if (!bp->irq_tbl)
7467                 return -ENOMEM;
7468
7469         bp->total_irqs = 1;
7470         bp->rx_nr_rings = 1;
7471         bp->tx_nr_rings = 1;
7472         bp->cp_nr_rings = 1;
7473         bp->flags |= BNXT_FLAG_SHARED_RINGS;
7474         bp->irq_tbl[0].vector = bp->pdev->irq;
7475         return 0;
7476 }
7477
7478 static int bnxt_init_int_mode(struct bnxt *bp)
7479 {
7480         int rc = 0;
7481
7482         if (bp->flags & BNXT_FLAG_MSIX_CAP)
7483                 rc = bnxt_init_msix(bp);
7484
7485         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
7486                 /* fallback to INTA */
7487                 rc = bnxt_init_inta(bp);
7488         }
7489         return rc;
7490 }
7491
7492 static void bnxt_clear_int_mode(struct bnxt *bp)
7493 {
7494         if (bp->flags & BNXT_FLAG_USING_MSIX)
7495                 pci_disable_msix(bp->pdev);
7496
7497         kfree(bp->irq_tbl);
7498         bp->irq_tbl = NULL;
7499         bp->flags &= ~BNXT_FLAG_USING_MSIX;
7500 }
7501
7502 int bnxt_reserve_rings(struct bnxt *bp)
7503 {
7504         int tcs = netdev_get_num_tc(bp->dev);
7505         bool reinit_irq = false;
7506         int rc;
7507
7508         if (!bnxt_need_reserve_rings(bp))
7509                 return 0;
7510
7511         if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
7512                 bnxt_ulp_irq_stop(bp);
7513                 bnxt_clear_int_mode(bp);
7514                 reinit_irq = true;
7515         }
7516         rc = __bnxt_reserve_rings(bp);
7517         if (reinit_irq) {
7518                 if (!rc)
7519                         rc = bnxt_init_int_mode(bp);
7520                 bnxt_ulp_irq_restart(bp, rc);
7521         }
7522         if (rc) {
7523                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
7524                 return rc;
7525         }
7526         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7527                 netdev_err(bp->dev, "tx ring reservation failure\n");
7528                 netdev_reset_tc(bp->dev);
7529                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7530                 return -ENOMEM;
7531         }
7532         return 0;
7533 }
7534
7535 static void bnxt_free_irq(struct bnxt *bp)
7536 {
7537         struct bnxt_irq *irq;
7538         int i;
7539
7540 #ifdef CONFIG_RFS_ACCEL
7541         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7542         bp->dev->rx_cpu_rmap = NULL;
7543 #endif
7544         if (!bp->irq_tbl || !bp->bnapi)
7545                 return;
7546
7547         for (i = 0; i < bp->cp_nr_rings; i++) {
7548                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7549
7550                 irq = &bp->irq_tbl[map_idx];
7551                 if (irq->requested) {
7552                         if (irq->have_cpumask) {
7553                                 irq_set_affinity_hint(irq->vector, NULL);
7554                                 free_cpumask_var(irq->cpu_mask);
7555                                 irq->have_cpumask = 0;
7556                         }
7557                         free_irq(irq->vector, bp->bnapi[i]);
7558                 }
7559
7560                 irq->requested = 0;
7561         }
7562 }
7563
7564 static int bnxt_request_irq(struct bnxt *bp)
7565 {
7566         int i, j, rc = 0;
7567         unsigned long flags = 0;
7568 #ifdef CONFIG_RFS_ACCEL
7569         struct cpu_rmap *rmap;
7570 #endif
7571
7572         rc = bnxt_setup_int_mode(bp);
7573         if (rc) {
7574                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7575                            rc);
7576                 return rc;
7577         }
7578 #ifdef CONFIG_RFS_ACCEL
7579         rmap = bp->dev->rx_cpu_rmap;
7580 #endif
7581         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7582                 flags = IRQF_SHARED;
7583
7584         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
7585                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7586                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7587
7588 #ifdef CONFIG_RFS_ACCEL
7589                 if (rmap && bp->bnapi[i]->rx_ring) {
7590                         rc = irq_cpu_rmap_add(rmap, irq->vector);
7591                         if (rc)
7592                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
7593                                             j);
7594                         j++;
7595                 }
7596 #endif
7597                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7598                                  bp->bnapi[i]);
7599                 if (rc)
7600                         break;
7601
7602                 irq->requested = 1;
7603
7604                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7605                         int numa_node = dev_to_node(&bp->pdev->dev);
7606
7607                         irq->have_cpumask = 1;
7608                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7609                                         irq->cpu_mask);
7610                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7611                         if (rc) {
7612                                 netdev_warn(bp->dev,
7613                                             "Set affinity failed, IRQ = %d\n",
7614                                             irq->vector);
7615                                 break;
7616                         }
7617                 }
7618         }
7619         return rc;
7620 }
7621
7622 static void bnxt_del_napi(struct bnxt *bp)
7623 {
7624         int i;
7625
7626         if (!bp->bnapi)
7627                 return;
7628
7629         for (i = 0; i < bp->cp_nr_rings; i++) {
7630                 struct bnxt_napi *bnapi = bp->bnapi[i];
7631
7632                 napi_hash_del(&bnapi->napi);
7633                 netif_napi_del(&bnapi->napi);
7634         }
7635         /* We called napi_hash_del() before netif_napi_del(), we need
7636          * to respect an RCU grace period before freeing napi structures.
7637          */
7638         synchronize_net();
7639 }
7640
7641 static void bnxt_init_napi(struct bnxt *bp)
7642 {
7643         int i;
7644         unsigned int cp_nr_rings = bp->cp_nr_rings;
7645         struct bnxt_napi *bnapi;
7646
7647         if (bp->flags & BNXT_FLAG_USING_MSIX) {
7648                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7649
7650                 if (bp->flags & BNXT_FLAG_CHIP_P5)
7651                         poll_fn = bnxt_poll_p5;
7652                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7653                         cp_nr_rings--;
7654                 for (i = 0; i < cp_nr_rings; i++) {
7655                         bnapi = bp->bnapi[i];
7656                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
7657                 }
7658                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7659                         bnapi = bp->bnapi[cp_nr_rings];
7660                         netif_napi_add(bp->dev, &bnapi->napi,
7661                                        bnxt_poll_nitroa0, 64);
7662                 }
7663         } else {
7664                 bnapi = bp->bnapi[0];
7665                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
7666         }
7667 }
7668
7669 static void bnxt_disable_napi(struct bnxt *bp)
7670 {
7671         int i;
7672
7673         if (!bp->bnapi)
7674                 return;
7675
7676         for (i = 0; i < bp->cp_nr_rings; i++) {
7677                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7678
7679                 if (bp->bnapi[i]->rx_ring)
7680                         cancel_work_sync(&cpr->dim.work);
7681
7682                 napi_disable(&bp->bnapi[i]->napi);
7683         }
7684 }
7685
7686 static void bnxt_enable_napi(struct bnxt *bp)
7687 {
7688         int i;
7689
7690         for (i = 0; i < bp->cp_nr_rings; i++) {
7691                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7692                 bp->bnapi[i]->in_reset = false;
7693
7694                 if (bp->bnapi[i]->rx_ring) {
7695                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7696                         cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7697                 }
7698                 napi_enable(&bp->bnapi[i]->napi);
7699         }
7700 }
7701
7702 void bnxt_tx_disable(struct bnxt *bp)
7703 {
7704         int i;
7705         struct bnxt_tx_ring_info *txr;
7706
7707         if (bp->tx_ring) {
7708                 for (i = 0; i < bp->tx_nr_rings; i++) {
7709                         txr = &bp->tx_ring[i];
7710                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
7711                 }
7712         }
7713         /* Stop all TX queues */
7714         netif_tx_disable(bp->dev);
7715         netif_carrier_off(bp->dev);
7716 }
7717
7718 void bnxt_tx_enable(struct bnxt *bp)
7719 {
7720         int i;
7721         struct bnxt_tx_ring_info *txr;
7722
7723         for (i = 0; i < bp->tx_nr_rings; i++) {
7724                 txr = &bp->tx_ring[i];
7725                 txr->dev_state = 0;
7726         }
7727         netif_tx_wake_all_queues(bp->dev);
7728         if (bp->link_info.link_up)
7729                 netif_carrier_on(bp->dev);
7730 }
7731
7732 static void bnxt_report_link(struct bnxt *bp)
7733 {
7734         if (bp->link_info.link_up) {
7735                 const char *duplex;
7736                 const char *flow_ctrl;
7737                 u32 speed;
7738                 u16 fec;
7739
7740                 netif_carrier_on(bp->dev);
7741                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7742                         duplex = "full";
7743                 else
7744                         duplex = "half";
7745                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7746                         flow_ctrl = "ON - receive & transmit";
7747                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7748                         flow_ctrl = "ON - transmit";
7749                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7750                         flow_ctrl = "ON - receive";
7751                 else
7752                         flow_ctrl = "none";
7753                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
7754                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
7755                             speed, duplex, flow_ctrl);
7756                 if (bp->flags & BNXT_FLAG_EEE_CAP)
7757                         netdev_info(bp->dev, "EEE is %s\n",
7758                                     bp->eee.eee_active ? "active" :
7759                                                          "not active");
7760                 fec = bp->link_info.fec_cfg;
7761                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7762                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7763                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7764                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7765                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
7766         } else {
7767                 netif_carrier_off(bp->dev);
7768                 netdev_err(bp->dev, "NIC Link is Down\n");
7769         }
7770 }
7771
7772 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7773 {
7774         int rc = 0;
7775         struct hwrm_port_phy_qcaps_input req = {0};
7776         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7777         struct bnxt_link_info *link_info = &bp->link_info;
7778
7779         if (bp->hwrm_spec_code < 0x10201)
7780                 return 0;
7781
7782         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7783
7784         mutex_lock(&bp->hwrm_cmd_lock);
7785         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7786         if (rc)
7787                 goto hwrm_phy_qcaps_exit;
7788
7789         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
7790                 struct ethtool_eee *eee = &bp->eee;
7791                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7792
7793                 bp->flags |= BNXT_FLAG_EEE_CAP;
7794                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7795                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7796                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7797                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7798                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7799         }
7800         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7801                 if (bp->test_info)
7802                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7803         }
7804         if (resp->supported_speeds_auto_mode)
7805                 link_info->support_auto_speeds =
7806                         le16_to_cpu(resp->supported_speeds_auto_mode);
7807
7808         bp->port_count = resp->port_cnt;
7809
7810 hwrm_phy_qcaps_exit:
7811         mutex_unlock(&bp->hwrm_cmd_lock);
7812         return rc;
7813 }
7814
7815 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7816 {
7817         int rc = 0;
7818         struct bnxt_link_info *link_info = &bp->link_info;
7819         struct hwrm_port_phy_qcfg_input req = {0};
7820         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7821         u8 link_up = link_info->link_up;
7822         u16 diff;
7823
7824         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7825
7826         mutex_lock(&bp->hwrm_cmd_lock);
7827         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7828         if (rc) {
7829                 mutex_unlock(&bp->hwrm_cmd_lock);
7830                 return rc;
7831         }
7832
7833         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7834         link_info->phy_link_status = resp->link;
7835         link_info->duplex = resp->duplex_cfg;
7836         if (bp->hwrm_spec_code >= 0x10800)
7837                 link_info->duplex = resp->duplex_state;
7838         link_info->pause = resp->pause;
7839         link_info->auto_mode = resp->auto_mode;
7840         link_info->auto_pause_setting = resp->auto_pause;
7841         link_info->lp_pause = resp->link_partner_adv_pause;
7842         link_info->force_pause_setting = resp->force_pause;
7843         link_info->duplex_setting = resp->duplex_cfg;
7844         if (link_info->phy_link_status == BNXT_LINK_LINK)
7845                 link_info->link_speed = le16_to_cpu(resp->link_speed);
7846         else
7847                 link_info->link_speed = 0;
7848         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
7849         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7850         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
7851         link_info->lp_auto_link_speeds =
7852                 le16_to_cpu(resp->link_partner_adv_speeds);
7853         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7854         link_info->phy_ver[0] = resp->phy_maj;
7855         link_info->phy_ver[1] = resp->phy_min;
7856         link_info->phy_ver[2] = resp->phy_bld;
7857         link_info->media_type = resp->media_type;
7858         link_info->phy_type = resp->phy_type;
7859         link_info->transceiver = resp->xcvr_pkg_type;
7860         link_info->phy_addr = resp->eee_config_phy_addr &
7861                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
7862         link_info->module_status = resp->module_status;
7863
7864         if (bp->flags & BNXT_FLAG_EEE_CAP) {
7865                 struct ethtool_eee *eee = &bp->eee;
7866                 u16 fw_speeds;
7867
7868                 eee->eee_active = 0;
7869                 if (resp->eee_config_phy_addr &
7870                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7871                         eee->eee_active = 1;
7872                         fw_speeds = le16_to_cpu(
7873                                 resp->link_partner_adv_eee_link_speed_mask);
7874                         eee->lp_advertised =
7875                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7876                 }
7877
7878                 /* Pull initial EEE config */
7879                 if (!chng_link_state) {
7880                         if (resp->eee_config_phy_addr &
7881                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7882                                 eee->eee_enabled = 1;
7883
7884                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7885                         eee->advertised =
7886                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7887
7888                         if (resp->eee_config_phy_addr &
7889                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7890                                 __le32 tmr;
7891
7892                                 eee->tx_lpi_enabled = 1;
7893                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7894                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7895                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7896                         }
7897                 }
7898         }
7899
7900         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7901         if (bp->hwrm_spec_code >= 0x10504)
7902                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7903
7904         /* TODO: need to add more logic to report VF link */
7905         if (chng_link_state) {
7906                 if (link_info->phy_link_status == BNXT_LINK_LINK)
7907                         link_info->link_up = 1;
7908                 else
7909                         link_info->link_up = 0;
7910                 if (link_up != link_info->link_up)
7911                         bnxt_report_link(bp);
7912         } else {
7913                 /* alwasy link down if not require to update link state */
7914                 link_info->link_up = 0;
7915         }
7916         mutex_unlock(&bp->hwrm_cmd_lock);
7917
7918         if (!BNXT_SINGLE_PF(bp))
7919                 return 0;
7920
7921         diff = link_info->support_auto_speeds ^ link_info->advertising;
7922         if ((link_info->support_auto_speeds | diff) !=
7923             link_info->support_auto_speeds) {
7924                 /* An advertised speed is no longer supported, so we need to
7925                  * update the advertisement settings.  Caller holds RTNL
7926                  * so we can modify link settings.
7927                  */
7928                 link_info->advertising = link_info->support_auto_speeds;
7929                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
7930                         bnxt_hwrm_set_link_setting(bp, true, false);
7931         }
7932         return 0;
7933 }
7934
7935 static void bnxt_get_port_module_status(struct bnxt *bp)
7936 {
7937         struct bnxt_link_info *link_info = &bp->link_info;
7938         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7939         u8 module_status;
7940
7941         if (bnxt_update_link(bp, true))
7942                 return;
7943
7944         module_status = link_info->module_status;
7945         switch (module_status) {
7946         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7947         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7948         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7949                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7950                             bp->pf.port_id);
7951                 if (bp->hwrm_spec_code >= 0x10201) {
7952                         netdev_warn(bp->dev, "Module part number %s\n",
7953                                     resp->phy_vendor_partnumber);
7954                 }
7955                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7956                         netdev_warn(bp->dev, "TX is disabled\n");
7957                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7958                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7959         }
7960 }
7961
7962 static void
7963 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7964 {
7965         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
7966                 if (bp->hwrm_spec_code >= 0x10201)
7967                         req->auto_pause =
7968                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
7969                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7970                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7971                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7972                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
7973                 req->enables |=
7974                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7975         } else {
7976                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7977                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7978                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7979                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7980                 req->enables |=
7981                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
7982                 if (bp->hwrm_spec_code >= 0x10201) {
7983                         req->auto_pause = req->force_pause;
7984                         req->enables |= cpu_to_le32(
7985                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7986                 }
7987         }
7988 }
7989
7990 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7991                                       struct hwrm_port_phy_cfg_input *req)
7992 {
7993         u8 autoneg = bp->link_info.autoneg;
7994         u16 fw_link_speed = bp->link_info.req_link_speed;
7995         u16 advertising = bp->link_info.advertising;
7996
7997         if (autoneg & BNXT_AUTONEG_SPEED) {
7998                 req->auto_mode |=
7999                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8000
8001                 req->enables |= cpu_to_le32(
8002                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8003                 req->auto_link_speed_mask = cpu_to_le16(advertising);
8004
8005                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8006                 req->flags |=
8007                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8008         } else {
8009                 req->force_link_speed = cpu_to_le16(fw_link_speed);
8010                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8011         }
8012
8013         /* tell chimp that the setting takes effect immediately */
8014         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8015 }
8016
8017 int bnxt_hwrm_set_pause(struct bnxt *bp)
8018 {
8019         struct hwrm_port_phy_cfg_input req = {0};
8020         int rc;
8021
8022         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8023         bnxt_hwrm_set_pause_common(bp, &req);
8024
8025         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8026             bp->link_info.force_link_chng)
8027                 bnxt_hwrm_set_link_common(bp, &req);
8028
8029         mutex_lock(&bp->hwrm_cmd_lock);
8030         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8031         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8032                 /* since changing of pause setting doesn't trigger any link
8033                  * change event, the driver needs to update the current pause
8034                  * result upon successfully return of the phy_cfg command
8035                  */
8036                 bp->link_info.pause =
8037                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8038                 bp->link_info.auto_pause_setting = 0;
8039                 if (!bp->link_info.force_link_chng)
8040                         bnxt_report_link(bp);
8041         }
8042         bp->link_info.force_link_chng = false;
8043         mutex_unlock(&bp->hwrm_cmd_lock);
8044         return rc;
8045 }
8046
8047 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8048                               struct hwrm_port_phy_cfg_input *req)
8049 {
8050         struct ethtool_eee *eee = &bp->eee;
8051
8052         if (eee->eee_enabled) {
8053                 u16 eee_speeds;
8054                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8055
8056                 if (eee->tx_lpi_enabled)
8057                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8058                 else
8059                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8060
8061                 req->flags |= cpu_to_le32(flags);
8062                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8063                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8064                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8065         } else {
8066                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8067         }
8068 }
8069
8070 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8071 {
8072         struct hwrm_port_phy_cfg_input req = {0};
8073
8074         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8075         if (set_pause)
8076                 bnxt_hwrm_set_pause_common(bp, &req);
8077
8078         bnxt_hwrm_set_link_common(bp, &req);
8079
8080         if (set_eee)
8081                 bnxt_hwrm_set_eee(bp, &req);
8082         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8083 }
8084
8085 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8086 {
8087         struct hwrm_port_phy_cfg_input req = {0};
8088
8089         if (!BNXT_SINGLE_PF(bp))
8090                 return 0;
8091
8092         if (pci_num_vf(bp->pdev))
8093                 return 0;
8094
8095         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8096         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8097         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8098 }
8099
8100 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8101 {
8102         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8103         struct hwrm_func_drv_if_change_input req = {0};
8104         bool resc_reinit = false;
8105         int rc;
8106
8107         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8108                 return 0;
8109
8110         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8111         if (up)
8112                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8113         mutex_lock(&bp->hwrm_cmd_lock);
8114         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8115         if (!rc && (resp->flags &
8116                     cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
8117                 resc_reinit = true;
8118         mutex_unlock(&bp->hwrm_cmd_lock);
8119
8120         if (up && resc_reinit && BNXT_NEW_RM(bp)) {
8121                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8122
8123                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8124                 hw_resc->resv_cp_rings = 0;
8125                 hw_resc->resv_stat_ctxs = 0;
8126                 hw_resc->resv_irqs = 0;
8127                 hw_resc->resv_tx_rings = 0;
8128                 hw_resc->resv_rx_rings = 0;
8129                 hw_resc->resv_hw_ring_grps = 0;
8130                 hw_resc->resv_vnics = 0;
8131                 bp->tx_nr_rings = 0;
8132                 bp->rx_nr_rings = 0;
8133         }
8134         return rc;
8135 }
8136
8137 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8138 {
8139         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8140         struct hwrm_port_led_qcaps_input req = {0};
8141         struct bnxt_pf_info *pf = &bp->pf;
8142         int rc;
8143
8144         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8145                 return 0;
8146
8147         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8148         req.port_id = cpu_to_le16(pf->port_id);
8149         mutex_lock(&bp->hwrm_cmd_lock);
8150         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8151         if (rc) {
8152                 mutex_unlock(&bp->hwrm_cmd_lock);
8153                 return rc;
8154         }
8155         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8156                 int i;
8157
8158                 bp->num_leds = resp->num_leds;
8159                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8160                                                  bp->num_leds);
8161                 for (i = 0; i < bp->num_leds; i++) {
8162                         struct bnxt_led_info *led = &bp->leds[i];
8163                         __le16 caps = led->led_state_caps;
8164
8165                         if (!led->led_group_id ||
8166                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
8167                                 bp->num_leds = 0;
8168                                 break;
8169                         }
8170                 }
8171         }
8172         mutex_unlock(&bp->hwrm_cmd_lock);
8173         return 0;
8174 }
8175
8176 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8177 {
8178         struct hwrm_wol_filter_alloc_input req = {0};
8179         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8180         int rc;
8181
8182         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8183         req.port_id = cpu_to_le16(bp->pf.port_id);
8184         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8185         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8186         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8187         mutex_lock(&bp->hwrm_cmd_lock);
8188         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8189         if (!rc)
8190                 bp->wol_filter_id = resp->wol_filter_id;
8191         mutex_unlock(&bp->hwrm_cmd_lock);
8192         return rc;
8193 }
8194
8195 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8196 {
8197         struct hwrm_wol_filter_free_input req = {0};
8198         int rc;
8199
8200         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8201         req.port_id = cpu_to_le16(bp->pf.port_id);
8202         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8203         req.wol_filter_id = bp->wol_filter_id;
8204         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8205         return rc;
8206 }
8207
8208 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8209 {
8210         struct hwrm_wol_filter_qcfg_input req = {0};
8211         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8212         u16 next_handle = 0;
8213         int rc;
8214
8215         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8216         req.port_id = cpu_to_le16(bp->pf.port_id);
8217         req.handle = cpu_to_le16(handle);
8218         mutex_lock(&bp->hwrm_cmd_lock);
8219         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8220         if (!rc) {
8221                 next_handle = le16_to_cpu(resp->next_handle);
8222                 if (next_handle != 0) {
8223                         if (resp->wol_type ==
8224                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8225                                 bp->wol = 1;
8226                                 bp->wol_filter_id = resp->wol_filter_id;
8227                         }
8228                 }
8229         }
8230         mutex_unlock(&bp->hwrm_cmd_lock);
8231         return next_handle;
8232 }
8233
8234 static void bnxt_get_wol_settings(struct bnxt *bp)
8235 {
8236         u16 handle = 0;
8237
8238         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8239                 return;
8240
8241         do {
8242                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8243         } while (handle && handle != 0xffff);
8244 }
8245
8246 #ifdef CONFIG_BNXT_HWMON
8247 static ssize_t bnxt_show_temp(struct device *dev,
8248                               struct device_attribute *devattr, char *buf)
8249 {
8250         struct hwrm_temp_monitor_query_input req = {0};
8251         struct hwrm_temp_monitor_query_output *resp;
8252         struct bnxt *bp = dev_get_drvdata(dev);
8253         u32 temp = 0;
8254
8255         resp = bp->hwrm_cmd_resp_addr;
8256         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8257         mutex_lock(&bp->hwrm_cmd_lock);
8258         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8259                 temp = resp->temp * 1000; /* display millidegree */
8260         mutex_unlock(&bp->hwrm_cmd_lock);
8261
8262         return sprintf(buf, "%u\n", temp);
8263 }
8264 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8265
8266 static struct attribute *bnxt_attrs[] = {
8267         &sensor_dev_attr_temp1_input.dev_attr.attr,
8268         NULL
8269 };
8270 ATTRIBUTE_GROUPS(bnxt);
8271
8272 static void bnxt_hwmon_close(struct bnxt *bp)
8273 {
8274         if (bp->hwmon_dev) {
8275                 hwmon_device_unregister(bp->hwmon_dev);
8276                 bp->hwmon_dev = NULL;
8277         }
8278 }
8279
8280 static void bnxt_hwmon_open(struct bnxt *bp)
8281 {
8282         struct pci_dev *pdev = bp->pdev;
8283
8284         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8285                                                           DRV_MODULE_NAME, bp,
8286                                                           bnxt_groups);
8287         if (IS_ERR(bp->hwmon_dev)) {
8288                 bp->hwmon_dev = NULL;
8289                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8290         }
8291 }
8292 #else
8293 static void bnxt_hwmon_close(struct bnxt *bp)
8294 {
8295 }
8296
8297 static void bnxt_hwmon_open(struct bnxt *bp)
8298 {
8299 }
8300 #endif
8301
8302 static bool bnxt_eee_config_ok(struct bnxt *bp)
8303 {
8304         struct ethtool_eee *eee = &bp->eee;
8305         struct bnxt_link_info *link_info = &bp->link_info;
8306
8307         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8308                 return true;
8309
8310         if (eee->eee_enabled) {
8311                 u32 advertising =
8312                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8313
8314                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8315                         eee->eee_enabled = 0;
8316                         return false;
8317                 }
8318                 if (eee->advertised & ~advertising) {
8319                         eee->advertised = advertising & eee->supported;
8320                         return false;
8321                 }
8322         }
8323         return true;
8324 }
8325
8326 static int bnxt_update_phy_setting(struct bnxt *bp)
8327 {
8328         int rc;
8329         bool update_link = false;
8330         bool update_pause = false;
8331         bool update_eee = false;
8332         struct bnxt_link_info *link_info = &bp->link_info;
8333
8334         rc = bnxt_update_link(bp, true);
8335         if (rc) {
8336                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8337                            rc);
8338                 return rc;
8339         }
8340         if (!BNXT_SINGLE_PF(bp))
8341                 return 0;
8342
8343         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8344             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8345             link_info->req_flow_ctrl)
8346                 update_pause = true;
8347         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8348             link_info->force_pause_setting != link_info->req_flow_ctrl)
8349                 update_pause = true;
8350         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8351                 if (BNXT_AUTO_MODE(link_info->auto_mode))
8352                         update_link = true;
8353                 if (link_info->req_link_speed != link_info->force_link_speed)
8354                         update_link = true;
8355                 if (link_info->req_duplex != link_info->duplex_setting)
8356                         update_link = true;
8357         } else {
8358                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8359                         update_link = true;
8360                 if (link_info->advertising != link_info->auto_link_speeds)
8361                         update_link = true;
8362         }
8363
8364         /* The last close may have shutdown the link, so need to call
8365          * PHY_CFG to bring it back up.
8366          */
8367         if (!netif_carrier_ok(bp->dev))
8368                 update_link = true;
8369
8370         if (!bnxt_eee_config_ok(bp))
8371                 update_eee = true;
8372
8373         if (update_link)
8374                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
8375         else if (update_pause)
8376                 rc = bnxt_hwrm_set_pause(bp);
8377         if (rc) {
8378                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8379                            rc);
8380                 return rc;
8381         }
8382
8383         return rc;
8384 }
8385
8386 /* Common routine to pre-map certain register block to different GRC window.
8387  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8388  * in PF and 3 windows in VF that can be customized to map in different
8389  * register blocks.
8390  */
8391 static void bnxt_preset_reg_win(struct bnxt *bp)
8392 {
8393         if (BNXT_PF(bp)) {
8394                 /* CAG registers map to GRC window #4 */
8395                 writel(BNXT_CAG_REG_BASE,
8396                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8397         }
8398 }
8399
8400 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8401
8402 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8403 {
8404         int rc = 0;
8405
8406         bnxt_preset_reg_win(bp);
8407         netif_carrier_off(bp->dev);
8408         if (irq_re_init) {
8409                 /* Reserve rings now if none were reserved at driver probe. */
8410                 rc = bnxt_init_dflt_ring_mode(bp);
8411                 if (rc) {
8412                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8413                         return rc;
8414                 }
8415         }
8416         rc = bnxt_reserve_rings(bp);
8417         if (rc)
8418                 return rc;
8419         if ((bp->flags & BNXT_FLAG_RFS) &&
8420             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8421                 /* disable RFS if falling back to INTA */
8422                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8423                 bp->flags &= ~BNXT_FLAG_RFS;
8424         }
8425
8426         rc = bnxt_alloc_mem(bp, irq_re_init);
8427         if (rc) {
8428                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8429                 goto open_err_free_mem;
8430         }
8431
8432         if (irq_re_init) {
8433                 bnxt_init_napi(bp);
8434                 rc = bnxt_request_irq(bp);
8435                 if (rc) {
8436                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
8437                         goto open_err_irq;
8438                 }
8439         }
8440
8441         bnxt_enable_napi(bp);
8442         bnxt_debug_dev_init(bp);
8443
8444         rc = bnxt_init_nic(bp, irq_re_init);
8445         if (rc) {
8446                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8447                 goto open_err;
8448         }
8449
8450         if (link_re_init) {
8451                 mutex_lock(&bp->link_lock);
8452                 rc = bnxt_update_phy_setting(bp);
8453                 mutex_unlock(&bp->link_lock);
8454                 if (rc) {
8455                         netdev_warn(bp->dev, "failed to update phy settings\n");
8456                         if (BNXT_SINGLE_PF(bp)) {
8457                                 bp->link_info.phy_retry = true;
8458                                 bp->link_info.phy_retry_expires =
8459                                         jiffies + 5 * HZ;
8460                         }
8461                 }
8462         }
8463
8464         if (irq_re_init)
8465                 udp_tunnel_get_rx_info(bp->dev);
8466
8467         set_bit(BNXT_STATE_OPEN, &bp->state);
8468         bnxt_enable_int(bp);
8469         /* Enable TX queues */
8470         bnxt_tx_enable(bp);
8471         mod_timer(&bp->timer, jiffies + bp->current_interval);
8472         /* Poll link status and check for SFP+ module status */
8473         bnxt_get_port_module_status(bp);
8474
8475         /* VF-reps may need to be re-opened after the PF is re-opened */
8476         if (BNXT_PF(bp))
8477                 bnxt_vf_reps_open(bp);
8478         return 0;
8479
8480 open_err:
8481         bnxt_debug_dev_exit(bp);
8482         bnxt_disable_napi(bp);
8483
8484 open_err_irq:
8485         bnxt_del_napi(bp);
8486
8487 open_err_free_mem:
8488         bnxt_free_skbs(bp);
8489         bnxt_free_irq(bp);
8490         bnxt_free_mem(bp, true);
8491         return rc;
8492 }
8493
8494 /* rtnl_lock held */
8495 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8496 {
8497         int rc = 0;
8498
8499         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8500         if (rc) {
8501                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8502                 dev_close(bp->dev);
8503         }
8504         return rc;
8505 }
8506
8507 /* rtnl_lock held, open the NIC half way by allocating all resources, but
8508  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
8509  * self tests.
8510  */
8511 int bnxt_half_open_nic(struct bnxt *bp)
8512 {
8513         int rc = 0;
8514
8515         rc = bnxt_alloc_mem(bp, false);
8516         if (rc) {
8517                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8518                 goto half_open_err;
8519         }
8520         rc = bnxt_init_nic(bp, false);
8521         if (rc) {
8522                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8523                 goto half_open_err;
8524         }
8525         return 0;
8526
8527 half_open_err:
8528         bnxt_free_skbs(bp);
8529         bnxt_free_mem(bp, false);
8530         dev_close(bp->dev);
8531         return rc;
8532 }
8533
8534 /* rtnl_lock held, this call can only be made after a previous successful
8535  * call to bnxt_half_open_nic().
8536  */
8537 void bnxt_half_close_nic(struct bnxt *bp)
8538 {
8539         bnxt_hwrm_resource_free(bp, false, false);
8540         bnxt_free_skbs(bp);
8541         bnxt_free_mem(bp, false);
8542 }
8543
8544 static int bnxt_open(struct net_device *dev)
8545 {
8546         struct bnxt *bp = netdev_priv(dev);
8547         int rc;
8548
8549         bnxt_hwrm_if_change(bp, true);
8550         rc = __bnxt_open_nic(bp, true, true);
8551         if (rc)
8552                 bnxt_hwrm_if_change(bp, false);
8553
8554         bnxt_hwmon_open(bp);
8555
8556         return rc;
8557 }
8558
8559 static bool bnxt_drv_busy(struct bnxt *bp)
8560 {
8561         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8562                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8563 }
8564
8565 static void bnxt_get_ring_stats(struct bnxt *bp,
8566                                 struct rtnl_link_stats64 *stats);
8567
8568 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8569                              bool link_re_init)
8570 {
8571         /* Close the VF-reps before closing PF */
8572         if (BNXT_PF(bp))
8573                 bnxt_vf_reps_close(bp);
8574
8575         /* Change device state to avoid TX queue wake up's */
8576         bnxt_tx_disable(bp);
8577
8578         clear_bit(BNXT_STATE_OPEN, &bp->state);
8579         smp_mb__after_atomic();
8580         while (bnxt_drv_busy(bp))
8581                 msleep(20);
8582
8583         /* Flush rings and and disable interrupts */
8584         bnxt_shutdown_nic(bp, irq_re_init);
8585
8586         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8587
8588         bnxt_debug_dev_exit(bp);
8589         bnxt_disable_napi(bp);
8590         del_timer_sync(&bp->timer);
8591         bnxt_free_skbs(bp);
8592
8593         /* Save ring stats before shutdown */
8594         if (bp->bnapi)
8595                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
8596         if (irq_re_init) {
8597                 bnxt_free_irq(bp);
8598                 bnxt_del_napi(bp);
8599         }
8600         bnxt_free_mem(bp, irq_re_init);
8601 }
8602
8603 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8604 {
8605         int rc = 0;
8606
8607 #ifdef CONFIG_BNXT_SRIOV
8608         if (bp->sriov_cfg) {
8609                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8610                                                       !bp->sriov_cfg,
8611                                                       BNXT_SRIOV_CFG_WAIT_TMO);
8612                 if (rc)
8613                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8614         }
8615 #endif
8616         __bnxt_close_nic(bp, irq_re_init, link_re_init);
8617         return rc;
8618 }
8619
8620 static int bnxt_close(struct net_device *dev)
8621 {
8622         struct bnxt *bp = netdev_priv(dev);
8623
8624         bnxt_hwmon_close(bp);
8625         bnxt_close_nic(bp, true, true);
8626         bnxt_hwrm_shutdown_link(bp);
8627         bnxt_hwrm_if_change(bp, false);
8628         return 0;
8629 }
8630
8631 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
8632                                    u16 *val)
8633 {
8634         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
8635         struct hwrm_port_phy_mdio_read_input req = {0};
8636         int rc;
8637
8638         if (bp->hwrm_spec_code < 0x10a00)
8639                 return -EOPNOTSUPP;
8640
8641         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
8642         req.port_id = cpu_to_le16(bp->pf.port_id);
8643         req.phy_addr = phy_addr;
8644         req.reg_addr = cpu_to_le16(reg & 0x1f);
8645         if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
8646                 req.cl45_mdio = 1;
8647                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
8648                 req.dev_addr = mdio_phy_id_devad(phy_addr);
8649                 req.reg_addr = cpu_to_le16(reg);
8650         }
8651
8652         mutex_lock(&bp->hwrm_cmd_lock);
8653         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8654         if (!rc)
8655                 *val = le16_to_cpu(resp->reg_data);
8656         mutex_unlock(&bp->hwrm_cmd_lock);
8657         return rc;
8658 }
8659
8660 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
8661                                     u16 val)
8662 {
8663         struct hwrm_port_phy_mdio_write_input req = {0};
8664
8665         if (bp->hwrm_spec_code < 0x10a00)
8666                 return -EOPNOTSUPP;
8667
8668         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
8669         req.port_id = cpu_to_le16(bp->pf.port_id);
8670         req.phy_addr = phy_addr;
8671         req.reg_addr = cpu_to_le16(reg & 0x1f);
8672         if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
8673                 req.cl45_mdio = 1;
8674                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
8675                 req.dev_addr = mdio_phy_id_devad(phy_addr);
8676                 req.reg_addr = cpu_to_le16(reg);
8677         }
8678         req.reg_data = cpu_to_le16(val);
8679
8680         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8681 }
8682
8683 /* rtnl_lock held */
8684 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8685 {
8686         struct mii_ioctl_data *mdio = if_mii(ifr);
8687         struct bnxt *bp = netdev_priv(dev);
8688         int rc;
8689
8690         switch (cmd) {
8691         case SIOCGMIIPHY:
8692                 mdio->phy_id = bp->link_info.phy_addr;
8693
8694                 /* fallthru */
8695         case SIOCGMIIREG: {
8696                 u16 mii_regval = 0;
8697
8698                 if (!netif_running(dev))
8699                         return -EAGAIN;
8700
8701                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
8702                                              &mii_regval);
8703                 mdio->val_out = mii_regval;
8704                 return rc;
8705         }
8706
8707         case SIOCSMIIREG:
8708                 if (!netif_running(dev))
8709                         return -EAGAIN;
8710
8711                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
8712                                                 mdio->val_in);
8713
8714         default:
8715                 /* do nothing */
8716                 break;
8717         }
8718         return -EOPNOTSUPP;
8719 }
8720
8721 static void bnxt_get_ring_stats(struct bnxt *bp,
8722                                 struct rtnl_link_stats64 *stats)
8723 {
8724         int i;
8725
8726
8727         for (i = 0; i < bp->cp_nr_rings; i++) {
8728                 struct bnxt_napi *bnapi = bp->bnapi[i];
8729                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8730                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8731
8732                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8733                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8734                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8735
8736                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8737                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8738                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8739
8740                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8741                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8742                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8743
8744                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8745                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8746                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8747
8748                 stats->rx_missed_errors +=
8749                         le64_to_cpu(hw_stats->rx_discard_pkts);
8750
8751                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8752
8753                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8754         }
8755 }
8756
8757 static void bnxt_add_prev_stats(struct bnxt *bp,
8758                                 struct rtnl_link_stats64 *stats)
8759 {
8760         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
8761
8762         stats->rx_packets += prev_stats->rx_packets;
8763         stats->tx_packets += prev_stats->tx_packets;
8764         stats->rx_bytes += prev_stats->rx_bytes;
8765         stats->tx_bytes += prev_stats->tx_bytes;
8766         stats->rx_missed_errors += prev_stats->rx_missed_errors;
8767         stats->multicast += prev_stats->multicast;
8768         stats->tx_dropped += prev_stats->tx_dropped;
8769 }
8770
8771 static void
8772 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8773 {
8774         struct bnxt *bp = netdev_priv(dev);
8775
8776         set_bit(BNXT_STATE_READ_STATS, &bp->state);
8777         /* Make sure bnxt_close_nic() sees that we are reading stats before
8778          * we check the BNXT_STATE_OPEN flag.
8779          */
8780         smp_mb__after_atomic();
8781         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8782                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8783                 *stats = bp->net_stats_prev;
8784                 return;
8785         }
8786
8787         bnxt_get_ring_stats(bp, stats);
8788         bnxt_add_prev_stats(bp, stats);
8789
8790         if (bp->flags & BNXT_FLAG_PORT_STATS) {
8791                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8792                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8793
8794                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8795                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8796                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8797                                           le64_to_cpu(rx->rx_ovrsz_frames) +
8798                                           le64_to_cpu(rx->rx_runt_frames);
8799                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8800                                    le64_to_cpu(rx->rx_jbr_frames);
8801                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8802                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8803                 stats->tx_errors = le64_to_cpu(tx->tx_err);
8804         }
8805         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
8806 }
8807
8808 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8809 {
8810         struct net_device *dev = bp->dev;
8811         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8812         struct netdev_hw_addr *ha;
8813         u8 *haddr;
8814         int mc_count = 0;
8815         bool update = false;
8816         int off = 0;
8817
8818         netdev_for_each_mc_addr(ha, dev) {
8819                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8820                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8821                         vnic->mc_list_count = 0;
8822                         return false;
8823                 }
8824                 haddr = ha->addr;
8825                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8826                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8827                         update = true;
8828                 }
8829                 off += ETH_ALEN;
8830                 mc_count++;
8831         }
8832         if (mc_count)
8833                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8834
8835         if (mc_count != vnic->mc_list_count) {
8836                 vnic->mc_list_count = mc_count;
8837                 update = true;
8838         }
8839         return update;
8840 }
8841
8842 static bool bnxt_uc_list_updated(struct bnxt *bp)
8843 {
8844         struct net_device *dev = bp->dev;
8845         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8846         struct netdev_hw_addr *ha;
8847         int off = 0;
8848
8849         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8850                 return true;
8851
8852         netdev_for_each_uc_addr(ha, dev) {
8853                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8854                         return true;
8855
8856                 off += ETH_ALEN;
8857         }
8858         return false;
8859 }
8860
8861 static void bnxt_set_rx_mode(struct net_device *dev)
8862 {
8863         struct bnxt *bp = netdev_priv(dev);
8864         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8865         u32 mask = vnic->rx_mask;
8866         bool mc_update = false;
8867         bool uc_update;
8868
8869         if (!netif_running(dev))
8870                 return;
8871
8872         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8873                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
8874                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8875                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
8876
8877         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8878                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8879
8880         uc_update = bnxt_uc_list_updated(bp);
8881
8882         if (dev->flags & IFF_BROADCAST)
8883                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8884         if (dev->flags & IFF_ALLMULTI) {
8885                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8886                 vnic->mc_list_count = 0;
8887         } else {
8888                 mc_update = bnxt_mc_list_updated(bp, &mask);
8889         }
8890
8891         if (mask != vnic->rx_mask || uc_update || mc_update) {
8892                 vnic->rx_mask = mask;
8893
8894                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
8895                 bnxt_queue_sp_work(bp);
8896         }
8897 }
8898
8899 static int bnxt_cfg_rx_mode(struct bnxt *bp)
8900 {
8901         struct net_device *dev = bp->dev;
8902         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8903         struct netdev_hw_addr *ha;
8904         int i, off = 0, rc;
8905         bool uc_update;
8906
8907         netif_addr_lock_bh(dev);
8908         uc_update = bnxt_uc_list_updated(bp);
8909         netif_addr_unlock_bh(dev);
8910
8911         if (!uc_update)
8912                 goto skip_uc;
8913
8914         mutex_lock(&bp->hwrm_cmd_lock);
8915         for (i = 1; i < vnic->uc_filter_count; i++) {
8916                 struct hwrm_cfa_l2_filter_free_input req = {0};
8917
8918                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8919                                        -1);
8920
8921                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8922
8923                 rc = _hwrm_send_message(bp, &req, sizeof(req),
8924                                         HWRM_CMD_TIMEOUT);
8925         }
8926         mutex_unlock(&bp->hwrm_cmd_lock);
8927
8928         vnic->uc_filter_count = 1;
8929
8930         netif_addr_lock_bh(dev);
8931         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8932                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8933         } else {
8934                 netdev_for_each_uc_addr(ha, dev) {
8935                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8936                         off += ETH_ALEN;
8937                         vnic->uc_filter_count++;
8938                 }
8939         }
8940         netif_addr_unlock_bh(dev);
8941
8942         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8943                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8944                 if (rc) {
8945                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8946                                    rc);
8947                         vnic->uc_filter_count = i;
8948                         return rc;
8949                 }
8950         }
8951
8952 skip_uc:
8953         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8954         if (rc)
8955                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8956                            rc);
8957
8958         return rc;
8959 }
8960
8961 static bool bnxt_can_reserve_rings(struct bnxt *bp)
8962 {
8963 #ifdef CONFIG_BNXT_SRIOV
8964         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
8965                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8966
8967                 /* No minimum rings were provisioned by the PF.  Don't
8968                  * reserve rings by default when device is down.
8969                  */
8970                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8971                         return true;
8972
8973                 if (!netif_running(bp->dev))
8974                         return false;
8975         }
8976 #endif
8977         return true;
8978 }
8979
8980 /* If the chip and firmware supports RFS */
8981 static bool bnxt_rfs_supported(struct bnxt *bp)
8982 {
8983         if (bp->flags & BNXT_FLAG_CHIP_P5)
8984                 return false;
8985         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8986                 return true;
8987         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8988                 return true;
8989         return false;
8990 }
8991
8992 /* If runtime conditions support RFS */
8993 static bool bnxt_rfs_capable(struct bnxt *bp)
8994 {
8995 #ifdef CONFIG_RFS_ACCEL
8996         int vnics, max_vnics, max_rss_ctxs;
8997
8998         if (bp->flags & BNXT_FLAG_CHIP_P5)
8999                 return false;
9000         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9001                 return false;
9002
9003         vnics = 1 + bp->rx_nr_rings;
9004         max_vnics = bnxt_get_max_func_vnics(bp);
9005         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9006
9007         /* RSS contexts not a limiting factor */
9008         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9009                 max_rss_ctxs = max_vnics;
9010         if (vnics > max_vnics || vnics > max_rss_ctxs) {
9011                 if (bp->rx_nr_rings > 1)
9012                         netdev_warn(bp->dev,
9013                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9014                                     min(max_rss_ctxs - 1, max_vnics - 1));
9015                 return false;
9016         }
9017
9018         if (!BNXT_NEW_RM(bp))
9019                 return true;
9020
9021         if (vnics == bp->hw_resc.resv_vnics)
9022                 return true;
9023
9024         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9025         if (vnics <= bp->hw_resc.resv_vnics)
9026                 return true;
9027
9028         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9029         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9030         return false;
9031 #else
9032         return false;
9033 #endif
9034 }
9035
9036 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9037                                            netdev_features_t features)
9038 {
9039         struct bnxt *bp = netdev_priv(dev);
9040
9041         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9042                 features &= ~NETIF_F_NTUPLE;
9043
9044         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9045                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9046
9047         if (!(features & NETIF_F_GRO))
9048                 features &= ~NETIF_F_GRO_HW;
9049
9050         if (features & NETIF_F_GRO_HW)
9051                 features &= ~NETIF_F_LRO;
9052
9053         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9054          * turned on or off together.
9055          */
9056         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9057             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9058                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9059                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9060                                       NETIF_F_HW_VLAN_STAG_RX);
9061                 else
9062                         features |= NETIF_F_HW_VLAN_CTAG_RX |
9063                                     NETIF_F_HW_VLAN_STAG_RX;
9064         }
9065 #ifdef CONFIG_BNXT_SRIOV
9066         if (BNXT_VF(bp)) {
9067                 if (bp->vf.vlan) {
9068                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9069                                       NETIF_F_HW_VLAN_STAG_RX);
9070                 }
9071         }
9072 #endif
9073         return features;
9074 }
9075
9076 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9077 {
9078         struct bnxt *bp = netdev_priv(dev);
9079         u32 flags = bp->flags;
9080         u32 changes;
9081         int rc = 0;
9082         bool re_init = false;
9083         bool update_tpa = false;
9084
9085         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9086         if (features & NETIF_F_GRO_HW)
9087                 flags |= BNXT_FLAG_GRO;
9088         else if (features & NETIF_F_LRO)
9089                 flags |= BNXT_FLAG_LRO;
9090
9091         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9092                 flags &= ~BNXT_FLAG_TPA;
9093
9094         if (features & NETIF_F_HW_VLAN_CTAG_RX)
9095                 flags |= BNXT_FLAG_STRIP_VLAN;
9096
9097         if (features & NETIF_F_NTUPLE)
9098                 flags |= BNXT_FLAG_RFS;
9099
9100         changes = flags ^ bp->flags;
9101         if (changes & BNXT_FLAG_TPA) {
9102                 update_tpa = true;
9103                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9104                     (flags & BNXT_FLAG_TPA) == 0)
9105                         re_init = true;
9106         }
9107
9108         if (changes & ~BNXT_FLAG_TPA)
9109                 re_init = true;
9110
9111         if (flags != bp->flags) {
9112                 u32 old_flags = bp->flags;
9113
9114                 bp->flags = flags;
9115
9116                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9117                         if (update_tpa)
9118                                 bnxt_set_ring_params(bp);
9119                         return rc;
9120                 }
9121
9122                 if (re_init) {
9123                         bnxt_close_nic(bp, false, false);
9124                         if (update_tpa)
9125                                 bnxt_set_ring_params(bp);
9126
9127                         return bnxt_open_nic(bp, false, false);
9128                 }
9129                 if (update_tpa) {
9130                         rc = bnxt_set_tpa(bp,
9131                                           (flags & BNXT_FLAG_TPA) ?
9132                                           true : false);
9133                         if (rc)
9134                                 bp->flags = old_flags;
9135                 }
9136         }
9137         return rc;
9138 }
9139
9140 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9141                                        u32 ring_id, u32 *prod, u32 *cons)
9142 {
9143         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9144         struct hwrm_dbg_ring_info_get_input req = {0};
9145         int rc;
9146
9147         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9148         req.ring_type = ring_type;
9149         req.fw_ring_id = cpu_to_le32(ring_id);
9150         mutex_lock(&bp->hwrm_cmd_lock);
9151         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9152         if (!rc) {
9153                 *prod = le32_to_cpu(resp->producer_index);
9154                 *cons = le32_to_cpu(resp->consumer_index);
9155         }
9156         mutex_unlock(&bp->hwrm_cmd_lock);
9157         return rc;
9158 }
9159
9160 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9161 {
9162         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9163         int i = bnapi->index;
9164
9165         if (!txr)
9166                 return;
9167
9168         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9169                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9170                     txr->tx_cons);
9171 }
9172
9173 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9174 {
9175         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9176         int i = bnapi->index;
9177
9178         if (!rxr)
9179                 return;
9180
9181         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9182                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9183                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9184                     rxr->rx_sw_agg_prod);
9185 }
9186
9187 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9188 {
9189         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9190         int i = bnapi->index;
9191
9192         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9193                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9194 }
9195
9196 static void bnxt_dbg_dump_states(struct bnxt *bp)
9197 {
9198         int i;
9199         struct bnxt_napi *bnapi;
9200
9201         for (i = 0; i < bp->cp_nr_rings; i++) {
9202                 bnapi = bp->bnapi[i];
9203                 if (netif_msg_drv(bp)) {
9204                         bnxt_dump_tx_sw_state(bnapi);
9205                         bnxt_dump_rx_sw_state(bnapi);
9206                         bnxt_dump_cp_sw_state(bnapi);
9207                 }
9208         }
9209 }
9210
9211 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9212 {
9213         if (!silent)
9214                 bnxt_dbg_dump_states(bp);
9215         if (netif_running(bp->dev)) {
9216                 int rc;
9217
9218                 if (!silent)
9219                         bnxt_ulp_stop(bp);
9220                 bnxt_close_nic(bp, false, false);
9221                 rc = bnxt_open_nic(bp, false, false);
9222                 if (!silent && !rc)
9223                         bnxt_ulp_start(bp);
9224         }
9225 }
9226
9227 static void bnxt_tx_timeout(struct net_device *dev)
9228 {
9229         struct bnxt *bp = netdev_priv(dev);
9230
9231         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
9232         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9233         bnxt_queue_sp_work(bp);
9234 }
9235
9236 static void bnxt_timer(struct timer_list *t)
9237 {
9238         struct bnxt *bp = from_timer(bp, t, timer);
9239         struct net_device *dev = bp->dev;
9240
9241         if (!netif_running(dev))
9242                 return;
9243
9244         if (atomic_read(&bp->intr_sem) != 0)
9245                 goto bnxt_restart_timer;
9246
9247         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9248             bp->stats_coal_ticks) {
9249                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9250                 bnxt_queue_sp_work(bp);
9251         }
9252
9253         if (bnxt_tc_flower_enabled(bp)) {
9254                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
9255                 bnxt_queue_sp_work(bp);
9256         }
9257
9258         if (bp->link_info.phy_retry) {
9259                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
9260                         bp->link_info.phy_retry = 0;
9261                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
9262                 } else {
9263                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
9264                         bnxt_queue_sp_work(bp);
9265                 }
9266         }
9267
9268         if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
9269                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
9270                 bnxt_queue_sp_work(bp);
9271         }
9272 bnxt_restart_timer:
9273         mod_timer(&bp->timer, jiffies + bp->current_interval);
9274 }
9275
9276 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
9277 {
9278         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
9279          * set.  If the device is being closed, bnxt_close() may be holding
9280          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
9281          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
9282          */
9283         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9284         rtnl_lock();
9285 }
9286
9287 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
9288 {
9289         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9290         rtnl_unlock();
9291 }
9292
9293 /* Only called from bnxt_sp_task() */
9294 static void bnxt_reset(struct bnxt *bp, bool silent)
9295 {
9296         bnxt_rtnl_lock_sp(bp);
9297         if (test_bit(BNXT_STATE_OPEN, &bp->state))
9298                 bnxt_reset_task(bp, silent);
9299         bnxt_rtnl_unlock_sp(bp);
9300 }
9301
9302 static void bnxt_chk_missed_irq(struct bnxt *bp)
9303 {
9304         int i;
9305
9306         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9307                 return;
9308
9309         for (i = 0; i < bp->cp_nr_rings; i++) {
9310                 struct bnxt_napi *bnapi = bp->bnapi[i];
9311                 struct bnxt_cp_ring_info *cpr;
9312                 u32 fw_ring_id;
9313                 int j;
9314
9315                 if (!bnapi)
9316                         continue;
9317
9318                 cpr = &bnapi->cp_ring;
9319                 for (j = 0; j < 2; j++) {
9320                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
9321                         u32 val[2];
9322
9323                         if (!cpr2 || cpr2->has_more_work ||
9324                             !bnxt_has_work(bp, cpr2))
9325                                 continue;
9326
9327                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
9328                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
9329                                 continue;
9330                         }
9331                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
9332                         bnxt_dbg_hwrm_ring_info_get(bp,
9333                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
9334                                 fw_ring_id, &val[0], &val[1]);
9335                         cpr->missed_irqs++;
9336                 }
9337         }
9338 }
9339
9340 static void bnxt_cfg_ntp_filters(struct bnxt *);
9341
9342 static void bnxt_sp_task(struct work_struct *work)
9343 {
9344         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
9345
9346         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9347         smp_mb__after_atomic();
9348         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9349                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9350                 return;
9351         }
9352
9353         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
9354                 bnxt_cfg_rx_mode(bp);
9355
9356         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
9357                 bnxt_cfg_ntp_filters(bp);
9358         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
9359                 bnxt_hwrm_exec_fwd_req(bp);
9360         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9361                 bnxt_hwrm_tunnel_dst_port_alloc(
9362                         bp, bp->vxlan_port,
9363                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9364         }
9365         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9366                 bnxt_hwrm_tunnel_dst_port_free(
9367                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
9368         }
9369         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
9370                 bnxt_hwrm_tunnel_dst_port_alloc(
9371                         bp, bp->nge_port,
9372                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9373         }
9374         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
9375                 bnxt_hwrm_tunnel_dst_port_free(
9376                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
9377         }
9378         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
9379                 bnxt_hwrm_port_qstats(bp);
9380                 bnxt_hwrm_port_qstats_ext(bp);
9381         }
9382
9383         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
9384                 int rc;
9385
9386                 mutex_lock(&bp->link_lock);
9387                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
9388                                        &bp->sp_event))
9389                         bnxt_hwrm_phy_qcaps(bp);
9390
9391                 rc = bnxt_update_link(bp, true);
9392                 mutex_unlock(&bp->link_lock);
9393                 if (rc)
9394                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
9395                                    rc);
9396         }
9397         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
9398                 int rc;
9399
9400                 mutex_lock(&bp->link_lock);
9401                 rc = bnxt_update_phy_setting(bp);
9402                 mutex_unlock(&bp->link_lock);
9403                 if (rc) {
9404                         netdev_warn(bp->dev, "update phy settings retry failed\n");
9405                 } else {
9406                         bp->link_info.phy_retry = false;
9407                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
9408                 }
9409         }
9410         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
9411                 mutex_lock(&bp->link_lock);
9412                 bnxt_get_port_module_status(bp);
9413                 mutex_unlock(&bp->link_lock);
9414         }
9415
9416         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9417                 bnxt_tc_flow_stats_work(bp);
9418
9419         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9420                 bnxt_chk_missed_irq(bp);
9421
9422         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
9423          * must be the last functions to be called before exiting.
9424          */
9425         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9426                 bnxt_reset(bp, false);
9427
9428         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9429                 bnxt_reset(bp, true);
9430
9431         smp_mb__before_atomic();
9432         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9433 }
9434
9435 /* Under rtnl_lock */
9436 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9437                      int tx_xdp)
9438 {
9439         int max_rx, max_tx, tx_sets = 1;
9440         int tx_rings_needed, stats;
9441         int rx_rings = rx;
9442         int cp, vnics, rc;
9443
9444         if (tcs)
9445                 tx_sets = tcs;
9446
9447         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9448         if (rc)
9449                 return rc;
9450
9451         if (max_rx < rx)
9452                 return -ENOMEM;
9453
9454         tx_rings_needed = tx * tx_sets + tx_xdp;
9455         if (max_tx < tx_rings_needed)
9456                 return -ENOMEM;
9457
9458         vnics = 1;
9459         if (bp->flags & BNXT_FLAG_RFS)
9460                 vnics += rx_rings;
9461
9462         if (bp->flags & BNXT_FLAG_AGG_RINGS)
9463                 rx_rings <<= 1;
9464         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
9465         stats = cp;
9466         if (BNXT_NEW_RM(bp)) {
9467                 cp += bnxt_get_ulp_msix_num(bp);
9468                 stats += bnxt_get_ulp_stat_ctxs(bp);
9469         }
9470         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9471                                      stats, vnics);
9472 }
9473
9474 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9475 {
9476         if (bp->bar2) {
9477                 pci_iounmap(pdev, bp->bar2);
9478                 bp->bar2 = NULL;
9479         }
9480
9481         if (bp->bar1) {
9482                 pci_iounmap(pdev, bp->bar1);
9483                 bp->bar1 = NULL;
9484         }
9485
9486         if (bp->bar0) {
9487                 pci_iounmap(pdev, bp->bar0);
9488                 bp->bar0 = NULL;
9489         }
9490 }
9491
9492 static void bnxt_cleanup_pci(struct bnxt *bp)
9493 {
9494         bnxt_unmap_bars(bp, bp->pdev);
9495         pci_release_regions(bp->pdev);
9496         pci_disable_device(bp->pdev);
9497 }
9498
9499 static void bnxt_init_dflt_coal(struct bnxt *bp)
9500 {
9501         struct bnxt_coal *coal;
9502
9503         /* Tick values in micro seconds.
9504          * 1 coal_buf x bufs_per_record = 1 completion record.
9505          */
9506         coal = &bp->rx_coal;
9507         coal->coal_ticks = 10;
9508         coal->coal_bufs = 30;
9509         coal->coal_ticks_irq = 1;
9510         coal->coal_bufs_irq = 2;
9511         coal->idle_thresh = 50;
9512         coal->bufs_per_record = 2;
9513         coal->budget = 64;              /* NAPI budget */
9514
9515         coal = &bp->tx_coal;
9516         coal->coal_ticks = 28;
9517         coal->coal_bufs = 30;
9518         coal->coal_ticks_irq = 2;
9519         coal->coal_bufs_irq = 2;
9520         coal->bufs_per_record = 1;
9521
9522         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9523 }
9524
9525 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9526 {
9527         int rc;
9528         struct bnxt *bp = netdev_priv(dev);
9529
9530         SET_NETDEV_DEV(dev, &pdev->dev);
9531
9532         /* enable device (incl. PCI PM wakeup), and bus-mastering */
9533         rc = pci_enable_device(pdev);
9534         if (rc) {
9535                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9536                 goto init_err;
9537         }
9538
9539         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9540                 dev_err(&pdev->dev,
9541                         "Cannot find PCI device base address, aborting\n");
9542                 rc = -ENODEV;
9543                 goto init_err_disable;
9544         }
9545
9546         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9547         if (rc) {
9548                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9549                 goto init_err_disable;
9550         }
9551
9552         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9553             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9554                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9555                 goto init_err_disable;
9556         }
9557
9558         pci_set_master(pdev);
9559
9560         bp->dev = dev;
9561         bp->pdev = pdev;
9562
9563         bp->bar0 = pci_ioremap_bar(pdev, 0);
9564         if (!bp->bar0) {
9565                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9566                 rc = -ENOMEM;
9567                 goto init_err_release;
9568         }
9569
9570         bp->bar1 = pci_ioremap_bar(pdev, 2);
9571         if (!bp->bar1) {
9572                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9573                 rc = -ENOMEM;
9574                 goto init_err_release;
9575         }
9576
9577         bp->bar2 = pci_ioremap_bar(pdev, 4);
9578         if (!bp->bar2) {
9579                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9580                 rc = -ENOMEM;
9581                 goto init_err_release;
9582         }
9583
9584         pci_enable_pcie_error_reporting(pdev);
9585
9586         INIT_WORK(&bp->sp_task, bnxt_sp_task);
9587
9588         spin_lock_init(&bp->ntp_fltr_lock);
9589 #if BITS_PER_LONG == 32
9590         spin_lock_init(&bp->db_lock);
9591 #endif
9592
9593         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9594         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9595
9596         bnxt_init_dflt_coal(bp);
9597
9598         timer_setup(&bp->timer, bnxt_timer, 0);
9599         bp->current_interval = BNXT_TIMER_INTERVAL;
9600
9601         clear_bit(BNXT_STATE_OPEN, &bp->state);
9602         return 0;
9603
9604 init_err_release:
9605         bnxt_unmap_bars(bp, pdev);
9606         pci_release_regions(pdev);
9607
9608 init_err_disable:
9609         pci_disable_device(pdev);
9610
9611 init_err:
9612         return rc;
9613 }
9614
9615 /* rtnl_lock held */
9616 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9617 {
9618         struct sockaddr *addr = p;
9619         struct bnxt *bp = netdev_priv(dev);
9620         int rc = 0;
9621
9622         if (!is_valid_ether_addr(addr->sa_data))
9623                 return -EADDRNOTAVAIL;
9624
9625         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9626                 return 0;
9627
9628         rc = bnxt_approve_mac(bp, addr->sa_data, true);
9629         if (rc)
9630                 return rc;
9631
9632         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9633         if (netif_running(dev)) {
9634                 bnxt_close_nic(bp, false, false);
9635                 rc = bnxt_open_nic(bp, false, false);
9636         }
9637
9638         return rc;
9639 }
9640
9641 /* rtnl_lock held */
9642 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9643 {
9644         struct bnxt *bp = netdev_priv(dev);
9645
9646         if (netif_running(dev))
9647                 bnxt_close_nic(bp, false, false);
9648
9649         dev->mtu = new_mtu;
9650         bnxt_set_ring_params(bp);
9651
9652         if (netif_running(dev))
9653                 return bnxt_open_nic(bp, false, false);
9654
9655         return 0;
9656 }
9657
9658 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
9659 {
9660         struct bnxt *bp = netdev_priv(dev);
9661         bool sh = false;
9662         int rc;
9663
9664         if (tc > bp->max_tc) {
9665                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
9666                            tc, bp->max_tc);
9667                 return -EINVAL;
9668         }
9669
9670         if (netdev_get_num_tc(dev) == tc)
9671                 return 0;
9672
9673         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9674                 sh = true;
9675
9676         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9677                               sh, tc, bp->tx_nr_rings_xdp);
9678         if (rc)
9679                 return rc;
9680
9681         /* Needs to close the device and do hw resource re-allocations */
9682         if (netif_running(bp->dev))
9683                 bnxt_close_nic(bp, true, false);
9684
9685         if (tc) {
9686                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9687                 netdev_set_num_tc(dev, tc);
9688         } else {
9689                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9690                 netdev_reset_tc(dev);
9691         }
9692         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
9693         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9694                                bp->tx_nr_rings + bp->rx_nr_rings;
9695
9696         if (netif_running(bp->dev))
9697                 return bnxt_open_nic(bp, true, false);
9698
9699         return 0;
9700 }
9701
9702 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9703                                   void *cb_priv)
9704 {
9705         struct bnxt *bp = cb_priv;
9706
9707         if (!bnxt_tc_flower_enabled(bp) ||
9708             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
9709                 return -EOPNOTSUPP;
9710
9711         switch (type) {
9712         case TC_SETUP_CLSFLOWER:
9713                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9714         default:
9715                 return -EOPNOTSUPP;
9716         }
9717 }
9718
9719 static int bnxt_setup_tc_block(struct net_device *dev,
9720                                struct tc_block_offload *f)
9721 {
9722         struct bnxt *bp = netdev_priv(dev);
9723
9724         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9725                 return -EOPNOTSUPP;
9726
9727         switch (f->command) {
9728         case TC_BLOCK_BIND:
9729                 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
9730                                              bp, bp, f->extack);
9731         case TC_BLOCK_UNBIND:
9732                 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9733                 return 0;
9734         default:
9735                 return -EOPNOTSUPP;
9736         }
9737 }
9738
9739 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
9740                          void *type_data)
9741 {
9742         switch (type) {
9743         case TC_SETUP_BLOCK:
9744                 return bnxt_setup_tc_block(dev, type_data);
9745         case TC_SETUP_QDISC_MQPRIO: {
9746                 struct tc_mqprio_qopt *mqprio = type_data;
9747
9748                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9749
9750                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9751         }
9752         default:
9753                 return -EOPNOTSUPP;
9754         }
9755 }
9756
9757 #ifdef CONFIG_RFS_ACCEL
9758 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9759                             struct bnxt_ntuple_filter *f2)
9760 {
9761         struct flow_keys *keys1 = &f1->fkeys;
9762         struct flow_keys *keys2 = &f2->fkeys;
9763
9764         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9765             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9766             keys1->ports.ports == keys2->ports.ports &&
9767             keys1->basic.ip_proto == keys2->basic.ip_proto &&
9768             keys1->basic.n_proto == keys2->basic.n_proto &&
9769             keys1->control.flags == keys2->control.flags &&
9770             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9771             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
9772                 return true;
9773
9774         return false;
9775 }
9776
9777 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9778                               u16 rxq_index, u32 flow_id)
9779 {
9780         struct bnxt *bp = netdev_priv(dev);
9781         struct bnxt_ntuple_filter *fltr, *new_fltr;
9782         struct flow_keys *fkeys;
9783         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
9784         int rc = 0, idx, bit_id, l2_idx = 0;
9785         struct hlist_head *head;
9786
9787         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9788                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9789                 int off = 0, j;
9790
9791                 netif_addr_lock_bh(dev);
9792                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9793                         if (ether_addr_equal(eth->h_dest,
9794                                              vnic->uc_list + off)) {
9795                                 l2_idx = j + 1;
9796                                 break;
9797                         }
9798                 }
9799                 netif_addr_unlock_bh(dev);
9800                 if (!l2_idx)
9801                         return -EINVAL;
9802         }
9803         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9804         if (!new_fltr)
9805                 return -ENOMEM;
9806
9807         fkeys = &new_fltr->fkeys;
9808         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9809                 rc = -EPROTONOSUPPORT;
9810                 goto err_free;
9811         }
9812
9813         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9814              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
9815             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9816              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9817                 rc = -EPROTONOSUPPORT;
9818                 goto err_free;
9819         }
9820         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9821             bp->hwrm_spec_code < 0x10601) {
9822                 rc = -EPROTONOSUPPORT;
9823                 goto err_free;
9824         }
9825         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9826             bp->hwrm_spec_code < 0x10601) {
9827                 rc = -EPROTONOSUPPORT;
9828                 goto err_free;
9829         }
9830
9831         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
9832         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9833
9834         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9835         head = &bp->ntp_fltr_hash_tbl[idx];
9836         rcu_read_lock();
9837         hlist_for_each_entry_rcu(fltr, head, hash) {
9838                 if (bnxt_fltr_match(fltr, new_fltr)) {
9839                         rcu_read_unlock();
9840                         rc = 0;
9841                         goto err_free;
9842                 }
9843         }
9844         rcu_read_unlock();
9845
9846         spin_lock_bh(&bp->ntp_fltr_lock);
9847         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9848                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
9849         if (bit_id < 0) {
9850                 spin_unlock_bh(&bp->ntp_fltr_lock);
9851                 rc = -ENOMEM;
9852                 goto err_free;
9853         }
9854
9855         new_fltr->sw_id = (u16)bit_id;
9856         new_fltr->flow_id = flow_id;
9857         new_fltr->l2_fltr_idx = l2_idx;
9858         new_fltr->rxq = rxq_index;
9859         hlist_add_head_rcu(&new_fltr->hash, head);
9860         bp->ntp_fltr_count++;
9861         spin_unlock_bh(&bp->ntp_fltr_lock);
9862
9863         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
9864         bnxt_queue_sp_work(bp);
9865
9866         return new_fltr->sw_id;
9867
9868 err_free:
9869         kfree(new_fltr);
9870         return rc;
9871 }
9872
9873 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9874 {
9875         int i;
9876
9877         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9878                 struct hlist_head *head;
9879                 struct hlist_node *tmp;
9880                 struct bnxt_ntuple_filter *fltr;
9881                 int rc;
9882
9883                 head = &bp->ntp_fltr_hash_tbl[i];
9884                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9885                         bool del = false;
9886
9887                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9888                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9889                                                         fltr->flow_id,
9890                                                         fltr->sw_id)) {
9891                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
9892                                                                          fltr);
9893                                         del = true;
9894                                 }
9895                         } else {
9896                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9897                                                                        fltr);
9898                                 if (rc)
9899                                         del = true;
9900                                 else
9901                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
9902                         }
9903
9904                         if (del) {
9905                                 spin_lock_bh(&bp->ntp_fltr_lock);
9906                                 hlist_del_rcu(&fltr->hash);
9907                                 bp->ntp_fltr_count--;
9908                                 spin_unlock_bh(&bp->ntp_fltr_lock);
9909                                 synchronize_rcu();
9910                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9911                                 kfree(fltr);
9912                         }
9913                 }
9914         }
9915         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9916                 netdev_info(bp->dev, "Receive PF driver unload event!");
9917 }
9918
9919 #else
9920
9921 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9922 {
9923 }
9924
9925 #endif /* CONFIG_RFS_ACCEL */
9926
9927 static void bnxt_udp_tunnel_add(struct net_device *dev,
9928                                 struct udp_tunnel_info *ti)
9929 {
9930         struct bnxt *bp = netdev_priv(dev);
9931
9932         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9933                 return;
9934
9935         if (!netif_running(dev))
9936                 return;
9937
9938         switch (ti->type) {
9939         case UDP_TUNNEL_TYPE_VXLAN:
9940                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9941                         return;
9942
9943                 bp->vxlan_port_cnt++;
9944                 if (bp->vxlan_port_cnt == 1) {
9945                         bp->vxlan_port = ti->port;
9946                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
9947                         bnxt_queue_sp_work(bp);
9948                 }
9949                 break;
9950         case UDP_TUNNEL_TYPE_GENEVE:
9951                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9952                         return;
9953
9954                 bp->nge_port_cnt++;
9955                 if (bp->nge_port_cnt == 1) {
9956                         bp->nge_port = ti->port;
9957                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9958                 }
9959                 break;
9960         default:
9961                 return;
9962         }
9963
9964         bnxt_queue_sp_work(bp);
9965 }
9966
9967 static void bnxt_udp_tunnel_del(struct net_device *dev,
9968                                 struct udp_tunnel_info *ti)
9969 {
9970         struct bnxt *bp = netdev_priv(dev);
9971
9972         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9973                 return;
9974
9975         if (!netif_running(dev))
9976                 return;
9977
9978         switch (ti->type) {
9979         case UDP_TUNNEL_TYPE_VXLAN:
9980                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9981                         return;
9982                 bp->vxlan_port_cnt--;
9983
9984                 if (bp->vxlan_port_cnt != 0)
9985                         return;
9986
9987                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9988                 break;
9989         case UDP_TUNNEL_TYPE_GENEVE:
9990                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9991                         return;
9992                 bp->nge_port_cnt--;
9993
9994                 if (bp->nge_port_cnt != 0)
9995                         return;
9996
9997                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9998                 break;
9999         default:
10000                 return;
10001         }
10002
10003         bnxt_queue_sp_work(bp);
10004 }
10005
10006 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
10007                                struct net_device *dev, u32 filter_mask,
10008                                int nlflags)
10009 {
10010         struct bnxt *bp = netdev_priv(dev);
10011
10012         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
10013                                        nlflags, filter_mask, NULL);
10014 }
10015
10016 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
10017                                u16 flags, struct netlink_ext_ack *extack)
10018 {
10019         struct bnxt *bp = netdev_priv(dev);
10020         struct nlattr *attr, *br_spec;
10021         int rem, rc = 0;
10022
10023         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
10024                 return -EOPNOTSUPP;
10025
10026         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
10027         if (!br_spec)
10028                 return -EINVAL;
10029
10030         nla_for_each_nested(attr, br_spec, rem) {
10031                 u16 mode;
10032
10033                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
10034                         continue;
10035
10036                 if (nla_len(attr) < sizeof(mode))
10037                         return -EINVAL;
10038
10039                 mode = nla_get_u16(attr);
10040                 if (mode == bp->br_mode)
10041                         break;
10042
10043                 rc = bnxt_hwrm_set_br_mode(bp, mode);
10044                 if (!rc)
10045                         bp->br_mode = mode;
10046                 break;
10047         }
10048         return rc;
10049 }
10050
10051 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
10052                                    size_t len)
10053 {
10054         struct bnxt *bp = netdev_priv(dev);
10055         int rc;
10056
10057         /* The PF and it's VF-reps only support the switchdev framework */
10058         if (!BNXT_PF(bp))
10059                 return -EOPNOTSUPP;
10060
10061         rc = snprintf(buf, len, "p%d", bp->pf.port_id);
10062
10063         if (rc >= len)
10064                 return -EOPNOTSUPP;
10065         return 0;
10066 }
10067
10068 int bnxt_get_port_parent_id(struct net_device *dev,
10069                             struct netdev_phys_item_id *ppid)
10070 {
10071         struct bnxt *bp = netdev_priv(dev);
10072
10073         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
10074                 return -EOPNOTSUPP;
10075
10076         /* The PF and it's VF-reps only support the switchdev framework */
10077         if (!BNXT_PF(bp))
10078                 return -EOPNOTSUPP;
10079
10080         ppid->id_len = sizeof(bp->switch_id);
10081         memcpy(ppid->id, bp->switch_id, ppid->id_len);
10082
10083         return 0;
10084 }
10085
10086 static const struct net_device_ops bnxt_netdev_ops = {
10087         .ndo_open               = bnxt_open,
10088         .ndo_start_xmit         = bnxt_start_xmit,
10089         .ndo_stop               = bnxt_close,
10090         .ndo_get_stats64        = bnxt_get_stats64,
10091         .ndo_set_rx_mode        = bnxt_set_rx_mode,
10092         .ndo_do_ioctl           = bnxt_ioctl,
10093         .ndo_validate_addr      = eth_validate_addr,
10094         .ndo_set_mac_address    = bnxt_change_mac_addr,
10095         .ndo_change_mtu         = bnxt_change_mtu,
10096         .ndo_fix_features       = bnxt_fix_features,
10097         .ndo_set_features       = bnxt_set_features,
10098         .ndo_tx_timeout         = bnxt_tx_timeout,
10099 #ifdef CONFIG_BNXT_SRIOV
10100         .ndo_get_vf_config      = bnxt_get_vf_config,
10101         .ndo_set_vf_mac         = bnxt_set_vf_mac,
10102         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
10103         .ndo_set_vf_rate        = bnxt_set_vf_bw,
10104         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
10105         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
10106         .ndo_set_vf_trust       = bnxt_set_vf_trust,
10107 #endif
10108         .ndo_setup_tc           = bnxt_setup_tc,
10109 #ifdef CONFIG_RFS_ACCEL
10110         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
10111 #endif
10112         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
10113         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
10114         .ndo_bpf                = bnxt_xdp,
10115         .ndo_bridge_getlink     = bnxt_bridge_getlink,
10116         .ndo_bridge_setlink     = bnxt_bridge_setlink,
10117         .ndo_get_port_parent_id = bnxt_get_port_parent_id,
10118         .ndo_get_phys_port_name = bnxt_get_phys_port_name
10119 };
10120
10121 static void bnxt_remove_one(struct pci_dev *pdev)
10122 {
10123         struct net_device *dev = pci_get_drvdata(pdev);
10124         struct bnxt *bp = netdev_priv(dev);
10125
10126         if (BNXT_PF(bp)) {
10127                 bnxt_sriov_disable(bp);
10128                 bnxt_dl_unregister(bp);
10129         }
10130
10131         pci_disable_pcie_error_reporting(pdev);
10132         unregister_netdev(dev);
10133         bnxt_shutdown_tc(bp);
10134         bnxt_cancel_sp_work(bp);
10135         bp->sp_event = 0;
10136
10137         bnxt_clear_int_mode(bp);
10138         bnxt_hwrm_func_drv_unrgtr(bp);
10139         bnxt_free_hwrm_resources(bp);
10140         bnxt_free_hwrm_short_cmd_req(bp);
10141         bnxt_ethtool_free(bp);
10142         bnxt_dcb_free(bp);
10143         kfree(bp->edev);
10144         bp->edev = NULL;
10145         bnxt_free_ctx_mem(bp);
10146         kfree(bp->ctx);
10147         bp->ctx = NULL;
10148         bnxt_cleanup_pci(bp);
10149         bnxt_free_port_stats(bp);
10150         free_netdev(dev);
10151 }
10152
10153 static int bnxt_probe_phy(struct bnxt *bp)
10154 {
10155         int rc = 0;
10156         struct bnxt_link_info *link_info = &bp->link_info;
10157
10158         rc = bnxt_hwrm_phy_qcaps(bp);
10159         if (rc) {
10160                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
10161                            rc);
10162                 return rc;
10163         }
10164         mutex_init(&bp->link_lock);
10165
10166         rc = bnxt_update_link(bp, false);
10167         if (rc) {
10168                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
10169                            rc);
10170                 return rc;
10171         }
10172
10173         /* Older firmware does not have supported_auto_speeds, so assume
10174          * that all supported speeds can be autonegotiated.
10175          */
10176         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
10177                 link_info->support_auto_speeds = link_info->support_speeds;
10178
10179         /*initialize the ethool setting copy with NVM settings */
10180         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10181                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10182                 if (bp->hwrm_spec_code >= 0x10201) {
10183                         if (link_info->auto_pause_setting &
10184                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10185                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10186                 } else {
10187                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10188                 }
10189                 link_info->advertising = link_info->auto_link_speeds;
10190         } else {
10191                 link_info->req_link_speed = link_info->force_link_speed;
10192                 link_info->req_duplex = link_info->duplex_setting;
10193         }
10194         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10195                 link_info->req_flow_ctrl =
10196                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10197         else
10198                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10199         return rc;
10200 }
10201
10202 static int bnxt_get_max_irq(struct pci_dev *pdev)
10203 {
10204         u16 ctrl;
10205
10206         if (!pdev->msix_cap)
10207                 return 1;
10208
10209         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
10210         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
10211 }
10212
10213 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10214                                 int *max_cp)
10215 {
10216         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10217         int max_ring_grps = 0, max_irq;
10218
10219         *max_tx = hw_resc->max_tx_rings;
10220         *max_rx = hw_resc->max_rx_rings;
10221         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
10222         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
10223                         bnxt_get_ulp_msix_num(bp),
10224                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
10225         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10226                 *max_cp = min_t(int, *max_cp, max_irq);
10227         max_ring_grps = hw_resc->max_hw_ring_grps;
10228         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
10229                 *max_cp -= 1;
10230                 *max_rx -= 2;
10231         }
10232         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10233                 *max_rx >>= 1;
10234         if (bp->flags & BNXT_FLAG_CHIP_P5) {
10235                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
10236                 /* On P5 chips, max_cp output param should be available NQs */
10237                 *max_cp = max_irq;
10238         }
10239         *max_rx = min_t(int, *max_rx, max_ring_grps);
10240 }
10241
10242 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
10243 {
10244         int rx, tx, cp;
10245
10246         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
10247         *max_rx = rx;
10248         *max_tx = tx;
10249         if (!rx || !tx || !cp)
10250                 return -ENOMEM;
10251
10252         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
10253 }
10254
10255 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
10256                                bool shared)
10257 {
10258         int rc;
10259
10260         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10261         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
10262                 /* Not enough rings, try disabling agg rings. */
10263                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
10264                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
10265                 if (rc) {
10266                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
10267                         bp->flags |= BNXT_FLAG_AGG_RINGS;
10268                         return rc;
10269                 }
10270                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
10271                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10272                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10273                 bnxt_set_ring_params(bp);
10274         }
10275
10276         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
10277                 int max_cp, max_stat, max_irq;
10278
10279                 /* Reserve minimum resources for RoCE */
10280                 max_cp = bnxt_get_max_func_cp_rings(bp);
10281                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
10282                 max_irq = bnxt_get_max_func_irqs(bp);
10283                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
10284                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
10285                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
10286                         return 0;
10287
10288                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
10289                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
10290                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
10291                 max_cp = min_t(int, max_cp, max_irq);
10292                 max_cp = min_t(int, max_cp, max_stat);
10293                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
10294                 if (rc)
10295                         rc = 0;
10296         }
10297         return rc;
10298 }
10299
10300 /* In initial default shared ring setting, each shared ring must have a
10301  * RX/TX ring pair.
10302  */
10303 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
10304 {
10305         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
10306         bp->rx_nr_rings = bp->cp_nr_rings;
10307         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
10308         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10309 }
10310
10311 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
10312 {
10313         int dflt_rings, max_rx_rings, max_tx_rings, rc;
10314
10315         if (!bnxt_can_reserve_rings(bp))
10316                 return 0;
10317
10318         if (sh)
10319                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
10320         dflt_rings = netif_get_num_default_rss_queues();
10321         /* Reduce default rings on multi-port cards so that total default
10322          * rings do not exceed CPU count.
10323          */
10324         if (bp->port_count > 1) {
10325                 int max_rings =
10326                         max_t(int, num_online_cpus() / bp->port_count, 1);
10327
10328                 dflt_rings = min_t(int, dflt_rings, max_rings);
10329         }
10330         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
10331         if (rc)
10332                 return rc;
10333         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
10334         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
10335         if (sh)
10336                 bnxt_trim_dflt_sh_rings(bp);
10337         else
10338                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
10339         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10340
10341         rc = __bnxt_reserve_rings(bp);
10342         if (rc)
10343                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
10344         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10345         if (sh)
10346                 bnxt_trim_dflt_sh_rings(bp);
10347
10348         /* Rings may have been trimmed, re-reserve the trimmed rings. */
10349         if (bnxt_need_reserve_rings(bp)) {
10350                 rc = __bnxt_reserve_rings(bp);
10351                 if (rc)
10352                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
10353                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10354         }
10355         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
10356                 bp->rx_nr_rings++;
10357                 bp->cp_nr_rings++;
10358         }
10359         return rc;
10360 }
10361
10362 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
10363 {
10364         int rc;
10365
10366         if (bp->tx_nr_rings)
10367                 return 0;
10368
10369         bnxt_ulp_irq_stop(bp);
10370         bnxt_clear_int_mode(bp);
10371         rc = bnxt_set_dflt_rings(bp, true);
10372         if (rc) {
10373                 netdev_err(bp->dev, "Not enough rings available.\n");
10374                 goto init_dflt_ring_err;
10375         }
10376         rc = bnxt_init_int_mode(bp);
10377         if (rc)
10378                 goto init_dflt_ring_err;
10379
10380         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10381         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
10382                 bp->flags |= BNXT_FLAG_RFS;
10383                 bp->dev->features |= NETIF_F_NTUPLE;
10384         }
10385 init_dflt_ring_err:
10386         bnxt_ulp_irq_restart(bp, rc);
10387         return rc;
10388 }
10389
10390 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
10391 {
10392         int rc;
10393
10394         ASSERT_RTNL();
10395         bnxt_hwrm_func_qcaps(bp);
10396
10397         if (netif_running(bp->dev))
10398                 __bnxt_close_nic(bp, true, false);
10399
10400         bnxt_ulp_irq_stop(bp);
10401         bnxt_clear_int_mode(bp);
10402         rc = bnxt_init_int_mode(bp);
10403         bnxt_ulp_irq_restart(bp, rc);
10404
10405         if (netif_running(bp->dev)) {
10406                 if (rc)
10407                         dev_close(bp->dev);
10408                 else
10409                         rc = bnxt_open_nic(bp, true, false);
10410         }
10411
10412         return rc;
10413 }
10414
10415 static int bnxt_init_mac_addr(struct bnxt *bp)
10416 {
10417         int rc = 0;
10418
10419         if (BNXT_PF(bp)) {
10420                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10421         } else {
10422 #ifdef CONFIG_BNXT_SRIOV
10423                 struct bnxt_vf_info *vf = &bp->vf;
10424                 bool strict_approval = true;
10425
10426                 if (is_valid_ether_addr(vf->mac_addr)) {
10427                         /* overwrite netdev dev_addr with admin VF MAC */
10428                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
10429                         /* Older PF driver or firmware may not approve this
10430                          * correctly.
10431                          */
10432                         strict_approval = false;
10433                 } else {
10434                         eth_hw_addr_random(bp->dev);
10435                 }
10436                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
10437 #endif
10438         }
10439         return rc;
10440 }
10441
10442 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10443 {
10444         static int version_printed;
10445         struct net_device *dev;
10446         struct bnxt *bp;
10447         int rc, max_irqs;
10448
10449         if (pci_is_bridge(pdev))
10450                 return -ENODEV;
10451
10452         if (version_printed++ == 0)
10453                 pr_info("%s", version);
10454
10455         max_irqs = bnxt_get_max_irq(pdev);
10456         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10457         if (!dev)
10458                 return -ENOMEM;
10459
10460         bp = netdev_priv(dev);
10461         bnxt_set_max_func_irqs(bp, max_irqs);
10462
10463         if (bnxt_vf_pciid(ent->driver_data))
10464                 bp->flags |= BNXT_FLAG_VF;
10465
10466         if (pdev->msix_cap)
10467                 bp->flags |= BNXT_FLAG_MSIX_CAP;
10468
10469         rc = bnxt_init_board(pdev, dev);
10470         if (rc < 0)
10471                 goto init_err_free;
10472
10473         dev->netdev_ops = &bnxt_netdev_ops;
10474         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10475         dev->ethtool_ops = &bnxt_ethtool_ops;
10476         pci_set_drvdata(pdev, dev);
10477
10478         rc = bnxt_alloc_hwrm_resources(bp);
10479         if (rc)
10480                 goto init_err_pci_clean;
10481
10482         mutex_init(&bp->hwrm_cmd_lock);
10483         rc = bnxt_hwrm_ver_get(bp);
10484         if (rc)
10485                 goto init_err_pci_clean;
10486
10487         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10488                 rc = bnxt_alloc_kong_hwrm_resources(bp);
10489                 if (rc)
10490                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10491         }
10492
10493         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10494             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10495                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10496                 if (rc)
10497                         goto init_err_pci_clean;
10498         }
10499
10500         if (BNXT_CHIP_P5(bp))
10501                 bp->flags |= BNXT_FLAG_CHIP_P5;
10502
10503         rc = bnxt_hwrm_func_reset(bp);
10504         if (rc)
10505                 goto init_err_pci_clean;
10506
10507         bnxt_hwrm_fw_set_time(bp);
10508
10509         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10510                            NETIF_F_TSO | NETIF_F_TSO6 |
10511                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10512                            NETIF_F_GSO_IPXIP4 |
10513                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10514                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
10515                            NETIF_F_RXCSUM | NETIF_F_GRO;
10516
10517         if (BNXT_SUPPORTS_TPA(bp))
10518                 dev->hw_features |= NETIF_F_LRO;
10519
10520         dev->hw_enc_features =
10521                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10522                         NETIF_F_TSO | NETIF_F_TSO6 |
10523                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
10524                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10525                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
10526         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10527                                     NETIF_F_GSO_GRE_CSUM;
10528         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10529         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10530                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
10531         if (BNXT_SUPPORTS_TPA(bp))
10532                 dev->hw_features |= NETIF_F_GRO_HW;
10533         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
10534         if (dev->features & NETIF_F_GRO_HW)
10535                 dev->features &= ~NETIF_F_LRO;
10536         dev->priv_flags |= IFF_UNICAST_FLT;
10537
10538 #ifdef CONFIG_BNXT_SRIOV
10539         init_waitqueue_head(&bp->sriov_cfg_wait);
10540         mutex_init(&bp->sriov_lock);
10541 #endif
10542         if (BNXT_SUPPORTS_TPA(bp)) {
10543                 bp->gro_func = bnxt_gro_func_5730x;
10544                 if (BNXT_CHIP_P4(bp))
10545                         bp->gro_func = bnxt_gro_func_5731x;
10546         }
10547         if (!BNXT_CHIP_P4_PLUS(bp))
10548                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
10549
10550         rc = bnxt_hwrm_func_drv_rgtr(bp);
10551         if (rc)
10552                 goto init_err_pci_clean;
10553
10554         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10555         if (rc)
10556                 goto init_err_pci_clean;
10557
10558         bp->ulp_probe = bnxt_ulp_probe;
10559
10560         rc = bnxt_hwrm_queue_qportcfg(bp);
10561         if (rc) {
10562                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10563                            rc);
10564                 rc = -1;
10565                 goto init_err_pci_clean;
10566         }
10567         /* Get the MAX capabilities for this function */
10568         rc = bnxt_hwrm_func_qcaps(bp);
10569         if (rc) {
10570                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10571                            rc);
10572                 rc = -1;
10573                 goto init_err_pci_clean;
10574         }
10575         rc = bnxt_init_mac_addr(bp);
10576         if (rc) {
10577                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10578                 rc = -EADDRNOTAVAIL;
10579                 goto init_err_pci_clean;
10580         }
10581
10582         bnxt_hwrm_func_qcfg(bp);
10583         bnxt_hwrm_vnic_qcaps(bp);
10584         bnxt_hwrm_port_led_qcaps(bp);
10585         bnxt_ethtool_init(bp);
10586         bnxt_dcb_init(bp);
10587
10588         /* MTU range: 60 - FW defined max */
10589         dev->min_mtu = ETH_ZLEN;
10590         dev->max_mtu = bp->max_mtu;
10591
10592         rc = bnxt_probe_phy(bp);
10593         if (rc)
10594                 goto init_err_pci_clean;
10595
10596         bnxt_set_rx_skb_mode(bp, false);
10597         bnxt_set_tpa_flags(bp);
10598         bnxt_set_ring_params(bp);
10599         rc = bnxt_set_dflt_rings(bp, true);
10600         if (rc) {
10601                 netdev_err(bp->dev, "Not enough rings available.\n");
10602                 rc = -ENOMEM;
10603                 goto init_err_pci_clean;
10604         }
10605
10606         /* Default RSS hash cfg. */
10607         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10608                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10609                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10610                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10611         if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10612                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10613                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10614                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10615         }
10616
10617         if (bnxt_rfs_supported(bp)) {
10618                 dev->hw_features |= NETIF_F_NTUPLE;
10619                 if (bnxt_rfs_capable(bp)) {
10620                         bp->flags |= BNXT_FLAG_RFS;
10621                         dev->features |= NETIF_F_NTUPLE;
10622                 }
10623         }
10624
10625         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10626                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10627
10628         rc = bnxt_init_int_mode(bp);
10629         if (rc)
10630                 goto init_err_pci_clean;
10631
10632         /* No TC has been set yet and rings may have been trimmed due to
10633          * limited MSIX, so we re-initialize the TX rings per TC.
10634          */
10635         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10636
10637         bnxt_get_wol_settings(bp);
10638         if (bp->flags & BNXT_FLAG_WOL_CAP)
10639                 device_set_wakeup_enable(&pdev->dev, bp->wol);
10640         else
10641                 device_set_wakeup_capable(&pdev->dev, false);
10642
10643         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10644
10645         bnxt_hwrm_coal_params_qcaps(bp);
10646
10647         if (BNXT_PF(bp)) {
10648                 if (!bnxt_pf_wq) {
10649                         bnxt_pf_wq =
10650                                 create_singlethread_workqueue("bnxt_pf_wq");
10651                         if (!bnxt_pf_wq) {
10652                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10653                                 goto init_err_pci_clean;
10654                         }
10655                 }
10656                 bnxt_init_tc(bp);
10657         }
10658
10659         rc = register_netdev(dev);
10660         if (rc)
10661                 goto init_err_cleanup_tc;
10662
10663         if (BNXT_PF(bp))
10664                 bnxt_dl_register(bp);
10665
10666         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10667                     board_info[ent->driver_data].name,
10668                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
10669         pcie_print_link_status(pdev);
10670
10671         return 0;
10672
10673 init_err_cleanup_tc:
10674         bnxt_shutdown_tc(bp);
10675         bnxt_clear_int_mode(bp);
10676
10677 init_err_pci_clean:
10678         bnxt_free_hwrm_resources(bp);
10679         bnxt_free_ctx_mem(bp);
10680         kfree(bp->ctx);
10681         bp->ctx = NULL;
10682         bnxt_cleanup_pci(bp);
10683
10684 init_err_free:
10685         free_netdev(dev);
10686         return rc;
10687 }
10688
10689 static void bnxt_shutdown(struct pci_dev *pdev)
10690 {
10691         struct net_device *dev = pci_get_drvdata(pdev);
10692         struct bnxt *bp;
10693
10694         if (!dev)
10695                 return;
10696
10697         rtnl_lock();
10698         bp = netdev_priv(dev);
10699         if (!bp)
10700                 goto shutdown_exit;
10701
10702         if (netif_running(dev))
10703                 dev_close(dev);
10704
10705         bnxt_ulp_shutdown(bp);
10706
10707         if (system_state == SYSTEM_POWER_OFF) {
10708                 bnxt_clear_int_mode(bp);
10709                 pci_wake_from_d3(pdev, bp->wol);
10710                 pci_set_power_state(pdev, PCI_D3hot);
10711         }
10712
10713 shutdown_exit:
10714         rtnl_unlock();
10715 }
10716
10717 #ifdef CONFIG_PM_SLEEP
10718 static int bnxt_suspend(struct device *device)
10719 {
10720         struct pci_dev *pdev = to_pci_dev(device);
10721         struct net_device *dev = pci_get_drvdata(pdev);
10722         struct bnxt *bp = netdev_priv(dev);
10723         int rc = 0;
10724
10725         rtnl_lock();
10726         if (netif_running(dev)) {
10727                 netif_device_detach(dev);
10728                 rc = bnxt_close(dev);
10729         }
10730         bnxt_hwrm_func_drv_unrgtr(bp);
10731         rtnl_unlock();
10732         return rc;
10733 }
10734
10735 static int bnxt_resume(struct device *device)
10736 {
10737         struct pci_dev *pdev = to_pci_dev(device);
10738         struct net_device *dev = pci_get_drvdata(pdev);
10739         struct bnxt *bp = netdev_priv(dev);
10740         int rc = 0;
10741
10742         rtnl_lock();
10743         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10744                 rc = -ENODEV;
10745                 goto resume_exit;
10746         }
10747         rc = bnxt_hwrm_func_reset(bp);
10748         if (rc) {
10749                 rc = -EBUSY;
10750                 goto resume_exit;
10751         }
10752         bnxt_get_wol_settings(bp);
10753         if (netif_running(dev)) {
10754                 rc = bnxt_open(dev);
10755                 if (!rc)
10756                         netif_device_attach(dev);
10757         }
10758
10759 resume_exit:
10760         rtnl_unlock();
10761         return rc;
10762 }
10763
10764 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10765 #define BNXT_PM_OPS (&bnxt_pm_ops)
10766
10767 #else
10768
10769 #define BNXT_PM_OPS NULL
10770
10771 #endif /* CONFIG_PM_SLEEP */
10772
10773 /**
10774  * bnxt_io_error_detected - called when PCI error is detected
10775  * @pdev: Pointer to PCI device
10776  * @state: The current pci connection state
10777  *
10778  * This function is called after a PCI bus error affecting
10779  * this device has been detected.
10780  */
10781 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10782                                                pci_channel_state_t state)
10783 {
10784         struct net_device *netdev = pci_get_drvdata(pdev);
10785         struct bnxt *bp = netdev_priv(netdev);
10786
10787         netdev_info(netdev, "PCI I/O error detected\n");
10788
10789         rtnl_lock();
10790         netif_device_detach(netdev);
10791
10792         bnxt_ulp_stop(bp);
10793
10794         if (state == pci_channel_io_perm_failure) {
10795                 rtnl_unlock();
10796                 return PCI_ERS_RESULT_DISCONNECT;
10797         }
10798
10799         if (netif_running(netdev))
10800                 bnxt_close(netdev);
10801
10802         pci_disable_device(pdev);
10803         rtnl_unlock();
10804
10805         /* Request a slot slot reset. */
10806         return PCI_ERS_RESULT_NEED_RESET;
10807 }
10808
10809 /**
10810  * bnxt_io_slot_reset - called after the pci bus has been reset.
10811  * @pdev: Pointer to PCI device
10812  *
10813  * Restart the card from scratch, as if from a cold-boot.
10814  * At this point, the card has exprienced a hard reset,
10815  * followed by fixups by BIOS, and has its config space
10816  * set up identically to what it was at cold boot.
10817  */
10818 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10819 {
10820         struct net_device *netdev = pci_get_drvdata(pdev);
10821         struct bnxt *bp = netdev_priv(netdev);
10822         int err = 0;
10823         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10824
10825         netdev_info(bp->dev, "PCI Slot Reset\n");
10826
10827         rtnl_lock();
10828
10829         if (pci_enable_device(pdev)) {
10830                 dev_err(&pdev->dev,
10831                         "Cannot re-enable PCI device after reset.\n");
10832         } else {
10833                 pci_set_master(pdev);
10834
10835                 err = bnxt_hwrm_func_reset(bp);
10836                 if (!err && netif_running(netdev))
10837                         err = bnxt_open(netdev);
10838
10839                 if (!err) {
10840                         result = PCI_ERS_RESULT_RECOVERED;
10841                         bnxt_ulp_start(bp);
10842                 }
10843         }
10844
10845         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10846                 dev_close(netdev);
10847
10848         rtnl_unlock();
10849
10850         return PCI_ERS_RESULT_RECOVERED;
10851 }
10852
10853 /**
10854  * bnxt_io_resume - called when traffic can start flowing again.
10855  * @pdev: Pointer to PCI device
10856  *
10857  * This callback is called when the error recovery driver tells
10858  * us that its OK to resume normal operation.
10859  */
10860 static void bnxt_io_resume(struct pci_dev *pdev)
10861 {
10862         struct net_device *netdev = pci_get_drvdata(pdev);
10863
10864         rtnl_lock();
10865
10866         netif_device_attach(netdev);
10867
10868         rtnl_unlock();
10869 }
10870
10871 static const struct pci_error_handlers bnxt_err_handler = {
10872         .error_detected = bnxt_io_error_detected,
10873         .slot_reset     = bnxt_io_slot_reset,
10874         .resume         = bnxt_io_resume
10875 };
10876
10877 static struct pci_driver bnxt_pci_driver = {
10878         .name           = DRV_MODULE_NAME,
10879         .id_table       = bnxt_pci_tbl,
10880         .probe          = bnxt_init_one,
10881         .remove         = bnxt_remove_one,
10882         .shutdown       = bnxt_shutdown,
10883         .driver.pm      = BNXT_PM_OPS,
10884         .err_handler    = &bnxt_err_handler,
10885 #if defined(CONFIG_BNXT_SRIOV)
10886         .sriov_configure = bnxt_sriov_configure,
10887 #endif
10888 };
10889
10890 static int __init bnxt_init(void)
10891 {
10892         bnxt_debug_init();
10893         return pci_register_driver(&bnxt_pci_driver);
10894 }
10895
10896 static void __exit bnxt_exit(void)
10897 {
10898         pci_unregister_driver(&bnxt_pci_driver);
10899         if (bnxt_pf_wq)
10900                 destroy_workqueue(bnxt_pf_wq);
10901         bnxt_debug_exit();
10902 }
10903
10904 module_init(bnxt_init);
10905 module_exit(bnxt_exit);